repo_name
string
path
string
copies
string
size
string
content
string
license
string
PyroDD/Pyro
drivers/net/irda/nsc-ircc.c
2528
60879
/********************************************************************* * * Filename: nsc-ircc.c * Version: 1.0 * Description: Driver for the NSC PC'108 and PC'338 IrDA chipsets * Status: Stable. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sat Nov 7 21:43:15 1998 * Modified at: Wed Mar 1 11:29:34 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no> * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com> * Copyright (c) 1998 Actisys Corp., www.actisys.com * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com> * All Rights Reserved * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * * Notice that all functions that needs to access the chip in _any_ * way, must save BSR register on entry, and restore it on exit. * It is _very_ important to follow this policy! * * __u8 bank; * * bank = inb(iobase+BSR); * * do_your_stuff_here(); * * outb(bank, iobase+BSR); * * If you find bugs in this file, its very likely that the same bug * will also be in w83977af_ir.c since the implementations are quite * similar. * ********************************************************************/ #include <linux/module.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/dma-mapping.h> #include <linux/pnp.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include <net/irda/wrapper.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> #include "nsc-ircc.h" #define CHIP_IO_EXTENT 8 #define BROKEN_DONGLE_ID static char *driver_name = "nsc-ircc"; /* Power Management */ #define NSC_IRCC_DRIVER_NAME "nsc-ircc" static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state); static int nsc_ircc_resume(struct platform_device *dev); static struct platform_driver nsc_ircc_driver = { .suspend = nsc_ircc_suspend, .resume = nsc_ircc_resume, .driver = { .name = NSC_IRCC_DRIVER_NAME, }, }; /* Module parameters */ static int qos_mtt_bits = 0x07; /* 1 ms or more */ static int dongle_id; /* Use BIOS settions by default, but user may supply module parameters */ static unsigned int io[] = { ~0, ~0, ~0, ~0, ~0 }; static unsigned int irq[] = { 0, 0, 0, 0, 0 }; static unsigned int dma[] = { 0, 0, 0, 0, 0 }; static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); #ifdef CONFIG_PNP static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id); #endif /* These are the known NSC chips */ static nsc_chip_t chips[] = { /* Name, {cfg registers}, chip id index reg, chip id expected value, revision mask */ { "PC87108", { 0x150, 0x398, 0xea }, 0x05, 0x10, 0xf0, nsc_ircc_probe_108, nsc_ircc_init_108 }, { "PC87338", { 0x398, 0x15c, 0x2e }, 0x08, 0xb0, 0xf8, nsc_ircc_probe_338, nsc_ircc_init_338 }, /* Contributed by Steffen Pingel - IBM X40 */ { "PC8738x", { 0x164e, 0x4e, 0x2e }, 0x20, 0xf4, 0xff, nsc_ircc_probe_39x, nsc_ircc_init_39x }, /* Contributed by Jan Frey - IBM A30/A31 */ { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff, nsc_ircc_probe_39x, nsc_ircc_init_39x }, /* IBM ThinkPads using PC8738x (T60/X60/Z60) */ { "IBM-PC8738x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff, nsc_ircc_probe_39x, nsc_ircc_init_39x }, /* IBM ThinkPads using PC8394T (T43/R52/?) */ { "IBM-PC8394T", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf9, 0xff, nsc_ircc_probe_39x, nsc_ircc_init_39x }, { NULL } }; static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL, NULL }; static char *dongle_types[] = { "Differential serial interface", "Differential serial interface", "Reserved", "Reserved", "Sharp RY5HD01", "Reserved", "Single-ended serial interface", "Consumer-IR only", "HP HSDL-2300, HP HSDL-3600/HSDL-3610", "IBM31T1100 or Temic TFDS6000/TFDS6500", "Reserved", "Reserved", "HP HSDL-1100/HSDL-2100", "HP HSDL-1100/HSDL-2100", "Supports SIR Mode only", "No dongle connected", }; /* PNP probing */ static chipio_t pnp_info; static const struct pnp_device_id nsc_ircc_pnp_table[] = { { .id = "NSC6001", .driver_data = 0 }, { .id = "HWPC224", .driver_data = 0 }, { .id = "IBM0071", .driver_data = NSC_FORCE_DONGLE_TYPE9 }, { } }; MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table); static struct pnp_driver nsc_ircc_pnp_driver = { #ifdef CONFIG_PNP .name = "nsc-ircc", .id_table = nsc_ircc_pnp_table, .probe = nsc_ircc_pnp_probe, #endif }; /* Some prototypes */ static int nsc_ircc_open(chipio_t *info); static int nsc_ircc_close(struct nsc_ircc_cb *self); static int nsc_ircc_setup(chipio_t *info); static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self); static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self); static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase); static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev); static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size); static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase); static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 baud); static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self); static int nsc_ircc_read_dongle_id (int iobase); static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id); static int nsc_ircc_net_open(struct net_device *dev); static int nsc_ircc_net_close(struct net_device *dev); static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); /* Globals */ static int pnp_registered; static int pnp_succeeded; /* * Function nsc_ircc_init () * * Initialize chip. Just try to find out how many chips we are dealing with * and where they are */ static int __init nsc_ircc_init(void) { chipio_t info; nsc_chip_t *chip; int ret; int cfg_base; int cfg, id; int reg; int i = 0; ret = platform_driver_register(&nsc_ircc_driver); if (ret) { IRDA_ERROR("%s, Can't register driver!\n", driver_name); return ret; } /* Register with PnP subsystem to detect disable ports */ ret = pnp_register_driver(&nsc_ircc_pnp_driver); if (!ret) pnp_registered = 1; ret = -ENODEV; /* Probe for all the NSC chipsets we know about */ for (chip = chips; chip->name ; chip++) { IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name); /* Try all config registers for this chip */ for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) { cfg_base = chip->cfg[cfg]; if (!cfg_base) continue; /* Read index register */ reg = inb(cfg_base); if (reg == 0xff) { IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __func__, cfg_base); continue; } /* Read chip identification register */ outb(chip->cid_index, cfg_base); id = inb(cfg_base+1); if ((id & chip->cid_mask) == chip->cid_value) { IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n", __func__, chip->name, id & ~chip->cid_mask); /* * If we found a correct PnP setting, * we first try it. */ if (pnp_succeeded) { memset(&info, 0, sizeof(chipio_t)); info.cfg_base = cfg_base; info.fir_base = pnp_info.fir_base; info.dma = pnp_info.dma; info.irq = pnp_info.irq; if (info.fir_base < 0x2000) { IRDA_MESSAGE("%s, chip->init\n", driver_name); chip->init(chip, &info); } else chip->probe(chip, &info); if (nsc_ircc_open(&info) >= 0) ret = 0; } /* * Opening based on PnP values failed. * Let's fallback to user values, or probe * the chip. */ if (ret) { IRDA_DEBUG(2, "%s, PnP init failed\n", driver_name); memset(&info, 0, sizeof(chipio_t)); info.cfg_base = cfg_base; info.fir_base = io[i]; info.dma = dma[i]; info.irq = irq[i]; /* * If the user supplies the base address, then * we init the chip, if not we probe the values * set by the BIOS */ if (io[i] < 0x2000) { chip->init(chip, &info); } else chip->probe(chip, &info); if (nsc_ircc_open(&info) >= 0) ret = 0; } i++; } else { IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __func__, id); } } } if (ret) { platform_driver_unregister(&nsc_ircc_driver); pnp_unregister_driver(&nsc_ircc_pnp_driver); pnp_registered = 0; } return ret; } /* * Function nsc_ircc_cleanup () * * Close all configured chips * */ static void __exit nsc_ircc_cleanup(void) { int i; for (i = 0; i < ARRAY_SIZE(dev_self); i++) { if (dev_self[i]) nsc_ircc_close(dev_self[i]); } platform_driver_unregister(&nsc_ircc_driver); if (pnp_registered) pnp_unregister_driver(&nsc_ircc_pnp_driver); pnp_registered = 0; } static const struct net_device_ops nsc_ircc_sir_ops = { .ndo_open = nsc_ircc_net_open, .ndo_stop = nsc_ircc_net_close, .ndo_start_xmit = nsc_ircc_hard_xmit_sir, .ndo_do_ioctl = nsc_ircc_net_ioctl, }; static const struct net_device_ops nsc_ircc_fir_ops = { .ndo_open = nsc_ircc_net_open, .ndo_stop = nsc_ircc_net_close, .ndo_start_xmit = nsc_ircc_hard_xmit_fir, .ndo_do_ioctl = nsc_ircc_net_ioctl, }; /* * Function nsc_ircc_open (iobase, irq) * * Open driver instance * */ static int __init nsc_ircc_open(chipio_t *info) { struct net_device *dev; struct nsc_ircc_cb *self; void *ret; int err, chip_index; IRDA_DEBUG(2, "%s()\n", __func__); for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) { if (!dev_self[chip_index]) break; } if (chip_index == ARRAY_SIZE(dev_self)) { IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __func__); return -ENOMEM; } IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, info->cfg_base); if ((nsc_ircc_setup(info)) == -1) return -1; IRDA_MESSAGE("%s, driver loaded (Dag Brattli)\n", driver_name); dev = alloc_irdadev(sizeof(struct nsc_ircc_cb)); if (dev == NULL) { IRDA_ERROR("%s(), can't allocate memory for " "control block!\n", __func__); return -ENOMEM; } self = netdev_priv(dev); self->netdev = dev; spin_lock_init(&self->lock); /* Need to store self somewhere */ dev_self[chip_index] = self; self->index = chip_index; /* Initialize IO */ self->io.cfg_base = info->cfg_base; self->io.fir_base = info->fir_base; self->io.irq = info->irq; self->io.fir_ext = CHIP_IO_EXTENT; self->io.dma = info->dma; self->io.fifo_size = 32; /* Reserve the ioports that we need */ ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name); if (!ret) { IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__, self->io.fir_base); err = -ENODEV; goto out1; } /* Initialize QoS for this device */ irda_init_max_qos_capabilies(&self->qos); /* The only value we must override it the baudrate */ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| IR_115200|IR_576000|IR_1152000 |(IR_4000000 << 8); self->qos.min_turn_time.bits = qos_mtt_bits; irda_qos_bits_to_value(&self->qos); /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */ self->rx_buff.truesize = 14384; self->tx_buff.truesize = 14384; /* Allocate memory if needed */ self->rx_buff.head = dma_alloc_coherent(NULL, self->rx_buff.truesize, &self->rx_buff_dma, GFP_KERNEL); if (self->rx_buff.head == NULL) { err = -ENOMEM; goto out2; } memset(self->rx_buff.head, 0, self->rx_buff.truesize); self->tx_buff.head = dma_alloc_coherent(NULL, self->tx_buff.truesize, &self->tx_buff_dma, GFP_KERNEL); if (self->tx_buff.head == NULL) { err = -ENOMEM; goto out3; } memset(self->tx_buff.head, 0, self->tx_buff.truesize); self->rx_buff.in_frame = FALSE; self->rx_buff.state = OUTSIDE_FRAME; self->tx_buff.data = self->tx_buff.head; self->rx_buff.data = self->rx_buff.head; /* Reset Tx queue info */ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; /* Override the network functions we need to use */ dev->netdev_ops = &nsc_ircc_sir_ops; err = register_netdev(dev); if (err) { IRDA_ERROR("%s(), register_netdev() failed!\n", __func__); goto out4; } IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); /* Check if user has supplied a valid dongle id or not */ if ((dongle_id <= 0) || (dongle_id >= ARRAY_SIZE(dongle_types))) { dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name, dongle_types[dongle_id]); } else { IRDA_MESSAGE("%s, Using dongle: %s\n", driver_name, dongle_types[dongle_id]); } self->io.dongle_id = dongle_id; nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id); self->pldev = platform_device_register_simple(NSC_IRCC_DRIVER_NAME, self->index, NULL, 0); if (IS_ERR(self->pldev)) { err = PTR_ERR(self->pldev); goto out5; } platform_set_drvdata(self->pldev, self); return chip_index; out5: unregister_netdev(dev); out4: dma_free_coherent(NULL, self->tx_buff.truesize, self->tx_buff.head, self->tx_buff_dma); out3: dma_free_coherent(NULL, self->rx_buff.truesize, self->rx_buff.head, self->rx_buff_dma); out2: release_region(self->io.fir_base, self->io.fir_ext); out1: free_netdev(dev); dev_self[chip_index] = NULL; return err; } /* * Function nsc_ircc_close (self) * * Close driver instance * */ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self) { int iobase; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return -1;); iobase = self->io.fir_base; platform_device_unregister(self->pldev); /* Remove netdevice */ unregister_netdev(self->netdev); /* Release the PORT that this driver is using */ IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base); release_region(self->io.fir_base, self->io.fir_ext); if (self->tx_buff.head) dma_free_coherent(NULL, self->tx_buff.truesize, self->tx_buff.head, self->tx_buff_dma); if (self->rx_buff.head) dma_free_coherent(NULL, self->rx_buff.truesize, self->rx_buff.head, self->rx_buff_dma); dev_self[self->index] = NULL; free_netdev(self->netdev); return 0; } /* * Function nsc_ircc_init_108 (iobase, cfg_base, irq, dma) * * Initialize the NSC '108 chip * */ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; __u8 temp=0; outb(2, cfg_base); /* Mode Control Register (MCTL) */ outb(0x00, cfg_base+1); /* Disable device */ /* Base Address and Interrupt Control Register (BAIC) */ outb(CFG_108_BAIC, cfg_base); switch (info->fir_base) { case 0x3e8: outb(0x14, cfg_base+1); break; case 0x2e8: outb(0x15, cfg_base+1); break; case 0x3f8: outb(0x16, cfg_base+1); break; case 0x2f8: outb(0x17, cfg_base+1); break; default: IRDA_ERROR("%s(), invalid base_address", __func__); } /* Control Signal Routing Register (CSRT) */ switch (info->irq) { case 3: temp = 0x01; break; case 4: temp = 0x02; break; case 5: temp = 0x03; break; case 7: temp = 0x04; break; case 9: temp = 0x05; break; case 11: temp = 0x06; break; case 15: temp = 0x07; break; default: IRDA_ERROR("%s(), invalid irq", __func__); } outb(CFG_108_CSRT, cfg_base); switch (info->dma) { case 0: outb(0x08+temp, cfg_base+1); break; case 1: outb(0x10+temp, cfg_base+1); break; case 3: outb(0x18+temp, cfg_base+1); break; default: IRDA_ERROR("%s(), invalid dma", __func__); } outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */ outb(0x03, cfg_base+1); /* Enable device */ return 0; } /* * Function nsc_ircc_probe_108 (chip, info) * * * */ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; int reg; /* Read address and interrupt control register (BAIC) */ outb(CFG_108_BAIC, cfg_base); reg = inb(cfg_base+1); switch (reg & 0x03) { case 0: info->fir_base = 0x3e8; break; case 1: info->fir_base = 0x2e8; break; case 2: info->fir_base = 0x3f8; break; case 3: info->fir_base = 0x2f8; break; } info->sir_base = info->fir_base; IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base); /* Read control signals routing register (CSRT) */ outb(CFG_108_CSRT, cfg_base); reg = inb(cfg_base+1); switch (reg & 0x07) { case 0: info->irq = -1; break; case 1: info->irq = 3; break; case 2: info->irq = 4; break; case 3: info->irq = 5; break; case 4: info->irq = 7; break; case 5: info->irq = 9; break; case 6: info->irq = 11; break; case 7: info->irq = 15; break; } IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq); /* Currently we only read Rx DMA but it will also be used for Tx */ switch ((reg >> 3) & 0x03) { case 0: info->dma = -1; break; case 1: info->dma = 0; break; case 2: info->dma = 1; break; case 3: info->dma = 3; break; } IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma); /* Read mode control register (MCTL) */ outb(CFG_108_MCTL, cfg_base); reg = inb(cfg_base+1); info->enabled = reg & 0x01; info->suspended = !((reg >> 1) & 0x01); return 0; } /* * Function nsc_ircc_init_338 (chip, info) * * Initialize the NSC '338 chip. Remember that the 87338 needs two * consecutive writes to the data registers while CPU interrupts are * disabled. The 97338 does not require this, but shouldn't be any * harm if we do it anyway. */ static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info) { /* No init yet */ return 0; } /* * Function nsc_ircc_probe_338 (chip, info) * * * */ static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; int reg, com = 0; int pnp; /* Read function enable register (FER) */ outb(CFG_338_FER, cfg_base); reg = inb(cfg_base+1); info->enabled = (reg >> 2) & 0x01; /* Check if we are in Legacy or PnP mode */ outb(CFG_338_PNP0, cfg_base); reg = inb(cfg_base+1); pnp = (reg >> 3) & 0x01; if (pnp) { IRDA_DEBUG(2, "(), Chip is in PnP mode\n"); outb(0x46, cfg_base); reg = (inb(cfg_base+1) & 0xfe) << 2; outb(0x47, cfg_base); reg |= ((inb(cfg_base+1) & 0xfc) << 8); info->fir_base = reg; } else { /* Read function address register (FAR) */ outb(CFG_338_FAR, cfg_base); reg = inb(cfg_base+1); switch ((reg >> 4) & 0x03) { case 0: info->fir_base = 0x3f8; break; case 1: info->fir_base = 0x2f8; break; case 2: com = 3; break; case 3: com = 4; break; } if (com) { switch ((reg >> 6) & 0x03) { case 0: if (com == 3) info->fir_base = 0x3e8; else info->fir_base = 0x2e8; break; case 1: if (com == 3) info->fir_base = 0x338; else info->fir_base = 0x238; break; case 2: if (com == 3) info->fir_base = 0x2e8; else info->fir_base = 0x2e0; break; case 3: if (com == 3) info->fir_base = 0x220; else info->fir_base = 0x228; break; } } } info->sir_base = info->fir_base; /* Read PnP register 1 (PNP1) */ outb(CFG_338_PNP1, cfg_base); reg = inb(cfg_base+1); info->irq = reg >> 4; /* Read PnP register 3 (PNP3) */ outb(CFG_338_PNP3, cfg_base); reg = inb(cfg_base+1); info->dma = (reg & 0x07) - 1; /* Read power and test register (PTR) */ outb(CFG_338_PTR, cfg_base); reg = inb(cfg_base+1); info->suspended = reg & 0x01; return 0; } /* * Function nsc_ircc_init_39x (chip, info) * * Now that we know it's a '39x (see probe below), we need to * configure it so we can use it. * * The NSC '338 chip is a Super I/O chip with a "bank" architecture, * the configuration of the different functionality (serial, parallel, * floppy...) are each in a different bank (Logical Device Number). * The base address, irq and dma configuration registers are common * to all functionalities (index 0x30 to 0x7F). * There is only one configuration register specific to the * serial port, CFG_39X_SPC. * JeanII * * Note : this code was written by Jan Frey <janfrey@web.de> */ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; int enabled; /* User is sure about his config... accept it. */ IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): " "io=0x%04x, irq=%d, dma=%d\n", __func__, info->fir_base, info->irq, info->dma); /* Access bank for SP2 */ outb(CFG_39X_LDN, cfg_base); outb(0x02, cfg_base+1); /* Configure SP2 */ /* We want to enable the device if not enabled */ outb(CFG_39X_ACT, cfg_base); enabled = inb(cfg_base+1) & 0x01; if (!enabled) { /* Enable the device */ outb(CFG_39X_SIOCF1, cfg_base); outb(0x01, cfg_base+1); /* May want to update info->enabled. Jean II */ } /* Enable UART bank switching (bit 7) ; Sets the chip to normal * power mode (wake up from sleep mode) (bit 1) */ outb(CFG_39X_SPC, cfg_base); outb(0x82, cfg_base+1); return 0; } /* * Function nsc_ircc_probe_39x (chip, info) * * Test if we really have a '39x chip at the given address * * Note : this code was written by Jan Frey <janfrey@web.de> */ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) { int cfg_base = info->cfg_base; int reg1, reg2, irq, irqt, dma1, dma2; int enabled, susp; IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n", __func__, cfg_base); /* This function should be executed with irq off to avoid * another driver messing with the Super I/O bank - Jean II */ /* Access bank for SP2 */ outb(CFG_39X_LDN, cfg_base); outb(0x02, cfg_base+1); /* Read infos about SP2 ; store in info struct */ outb(CFG_39X_BASEH, cfg_base); reg1 = inb(cfg_base+1); outb(CFG_39X_BASEL, cfg_base); reg2 = inb(cfg_base+1); info->fir_base = (reg1 << 8) | reg2; outb(CFG_39X_IRQNUM, cfg_base); irq = inb(cfg_base+1); outb(CFG_39X_IRQSEL, cfg_base); irqt = inb(cfg_base+1); info->irq = irq; outb(CFG_39X_DMA0, cfg_base); dma1 = inb(cfg_base+1); outb(CFG_39X_DMA1, cfg_base); dma2 = inb(cfg_base+1); info->dma = dma1 -1; outb(CFG_39X_ACT, cfg_base); info->enabled = enabled = inb(cfg_base+1) & 0x01; outb(CFG_39X_SPC, cfg_base); susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1); IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __func__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp); /* Configure SP2 */ /* We want to enable the device if not enabled */ outb(CFG_39X_ACT, cfg_base); enabled = inb(cfg_base+1) & 0x01; if (!enabled) { /* Enable the device */ outb(CFG_39X_SIOCF1, cfg_base); outb(0x01, cfg_base+1); /* May want to update info->enabled. Jean II */ } /* Enable UART bank switching (bit 7) ; Sets the chip to normal * power mode (wake up from sleep mode) (bit 1) */ outb(CFG_39X_SPC, cfg_base); outb(0x82, cfg_base+1); return 0; } #ifdef CONFIG_PNP /* PNP probing */ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id) { memset(&pnp_info, 0, sizeof(chipio_t)); pnp_info.irq = -1; pnp_info.dma = -1; pnp_succeeded = 1; if (id->driver_data & NSC_FORCE_DONGLE_TYPE9) dongle_id = 0x9; /* There doesn't seem to be any way of getting the cfg_base. * On my box, cfg_base is in the PnP descriptor of the * motherboard. Oh well... Jean II */ if (pnp_port_valid(dev, 0) && !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) pnp_info.fir_base = pnp_port_start(dev, 0); if (pnp_irq_valid(dev, 0) && !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED)) pnp_info.irq = pnp_irq(dev, 0); if (pnp_dma_valid(dev, 0) && !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) pnp_info.dma = pnp_dma(dev, 0); IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n", __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma); if((pnp_info.fir_base == 0) || (pnp_info.irq == -1) || (pnp_info.dma == -1)) { /* Returning an error will disable the device. Yuck ! */ //return -EINVAL; pnp_succeeded = 0; } return 0; } #endif /* * Function nsc_ircc_setup (info) * * Returns non-negative on success. * */ static int nsc_ircc_setup(chipio_t *info) { int version; int iobase = info->fir_base; /* Read the Module ID */ switch_bank(iobase, BANK3); version = inb(iobase+MID); IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n", __func__, driver_name, version); /* Should be 0x2? */ if (0x20 != (version & 0xf0)) { IRDA_ERROR("%s, Wrong chip version %02x\n", driver_name, version); return -1; } /* Switch to advanced mode */ switch_bank(iobase, BANK2); outb(ECR1_EXT_SL, iobase+ECR1); switch_bank(iobase, BANK0); /* Set FIFO threshold to TX17, RX16, reset and enable FIFO's */ switch_bank(iobase, BANK0); outb(FCR_RXTH|FCR_TXTH|FCR_TXSR|FCR_RXSR|FCR_FIFO_EN, iobase+FCR); outb(0x03, iobase+LCR); /* 8 bit word length */ outb(MCR_SIR, iobase+MCR); /* Start at SIR-mode, also clears LSR*/ /* Set FIFO size to 32 */ switch_bank(iobase, BANK2); outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2); /* IRCR2: FEND_MD is not set */ switch_bank(iobase, BANK5); outb(0x02, iobase+4); /* Make sure that some defaults are OK */ switch_bank(iobase, BANK6); outb(0x20, iobase+0); /* Set 32 bits FIR CRC */ outb(0x0a, iobase+1); /* Set MIR pulse width */ outb(0x0d, iobase+2); /* Set SIR pulse width to 1.6us */ outb(0x2a, iobase+4); /* Set beginning frag, and preamble length */ /* Enable receive interrupts */ switch_bank(iobase, BANK0); outb(IER_RXHDL_IE, iobase+IER); return 0; } /* * Function nsc_ircc_read_dongle_id (void) * * Try to read dongle indentification. This procedure needs to be executed * once after power-on/reset. It also needs to be used whenever you suspect * that the user may have plugged/unplugged the IrDA Dongle. */ static int nsc_ircc_read_dongle_id (int iobase) { int dongle_id; __u8 bank; bank = inb(iobase+BSR); /* Select Bank 7 */ switch_bank(iobase, BANK7); /* IRCFG4: IRSL0_DS and IRSL21_DS are cleared */ outb(0x00, iobase+7); /* ID0, 1, and 2 are pulled up/down very slowly */ udelay(50); /* IRCFG1: read the ID bits */ dongle_id = inb(iobase+4) & 0x0f; #ifdef BROKEN_DONGLE_ID if (dongle_id == 0x0a) dongle_id = 0x09; #endif /* Go back to bank 0 before returning */ switch_bank(iobase, BANK0); outb(bank, iobase+BSR); return dongle_id; } /* * Function nsc_ircc_init_dongle_interface (iobase, dongle_id) * * This function initializes the dongle for the transceiver that is * used. This procedure needs to be executed once after * power-on/reset. It also needs to be used whenever you suspect that * the dongle is changed. */ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id) { int bank; /* Save current bank */ bank = inb(iobase+BSR); /* Select Bank 7 */ switch_bank(iobase, BANK7); /* IRCFG4: set according to dongle_id */ switch (dongle_id) { case 0x00: /* same as */ case 0x01: /* Differential serial interface */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x02: /* same as */ case 0x03: /* Reserved */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x04: /* Sharp RY5HD01 */ break; case 0x05: /* Reserved, but this is what the Thinkpad reports */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x06: /* Single-ended serial interface */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x07: /* Consumer-IR only */ IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", __func__, dongle_types[dongle_id]); break; case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ IRDA_DEBUG(0, "%s(), %s\n", __func__, dongle_types[dongle_id]); break; case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */ outb(0x28, iobase+7); /* Set irsl[0-2] as output */ break; case 0x0A: /* same as */ case 0x0B: /* Reserved */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x0C: /* same as */ case 0x0D: /* HP HSDL-1100/HSDL-2100 */ /* * Set irsl0 as input, irsl[1-2] as output, and separate * inputs are used for SIR and MIR/FIR */ outb(0x48, iobase+7); break; case 0x0E: /* Supports SIR Mode only */ outb(0x28, iobase+7); /* Set irsl[0-2] as output */ break; case 0x0F: /* No dongle connected */ IRDA_DEBUG(0, "%s(), %s\n", __func__, dongle_types[dongle_id]); switch_bank(iobase, BANK0); outb(0x62, iobase+MCR); break; default: IRDA_DEBUG(0, "%s(), invalid dongle_id %#x", __func__, dongle_id); } /* IRCFG1: IRSL1 and 2 are set to IrDA mode */ outb(0x00, iobase+4); /* Restore bank register */ outb(bank, iobase+BSR); } /* set_up_dongle_interface */ /* * Function nsc_ircc_change_dongle_speed (iobase, speed, dongle_id) * * Change speed of the attach dongle * */ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id) { __u8 bank; /* Save current bank */ bank = inb(iobase+BSR); /* Select Bank 7 */ switch_bank(iobase, BANK7); /* IRCFG1: set according to dongle_id */ switch (dongle_id) { case 0x00: /* same as */ case 0x01: /* Differential serial interface */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x02: /* same as */ case 0x03: /* Reserved */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x04: /* Sharp RY5HD01 */ break; case 0x05: /* Reserved */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x06: /* Single-ended serial interface */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x07: /* Consumer-IR only */ IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", __func__, dongle_types[dongle_id]); break; case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ IRDA_DEBUG(0, "%s(), %s\n", __func__, dongle_types[dongle_id]); outb(0x00, iobase+4); if (speed > 115200) outb(0x01, iobase+4); break; case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */ outb(0x01, iobase+4); if (speed == 4000000) { /* There was a cli() there, but we now are already * under spin_lock_irqsave() - JeanII */ outb(0x81, iobase+4); outb(0x80, iobase+4); } else outb(0x00, iobase+4); break; case 0x0A: /* same as */ case 0x0B: /* Reserved */ IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", __func__, dongle_types[dongle_id]); break; case 0x0C: /* same as */ case 0x0D: /* HP HSDL-1100/HSDL-2100 */ break; case 0x0E: /* Supports SIR Mode only */ break; case 0x0F: /* No dongle connected */ IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", __func__, dongle_types[dongle_id]); switch_bank(iobase, BANK0); outb(0x62, iobase+MCR); break; default: IRDA_DEBUG(0, "%s(), invalid data_rate\n", __func__); } /* Restore bank register */ outb(bank, iobase+BSR); } /* * Function nsc_ircc_change_speed (self, baud) * * Change the speed of the device * * This function *must* be called with irq off and spin-lock. */ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed) { struct net_device *dev = self->netdev; __u8 mcr = MCR_SIR; int iobase; __u8 bank; __u8 ier; /* Interrupt enable register */ IRDA_DEBUG(2, "%s(), speed=%d\n", __func__, speed); IRDA_ASSERT(self != NULL, return 0;); iobase = self->io.fir_base; /* Update accounting for new speed */ self->io.speed = speed; /* Save current bank */ bank = inb(iobase+BSR); /* Disable interrupts */ switch_bank(iobase, BANK0); outb(0, iobase+IER); /* Select Bank 2 */ switch_bank(iobase, BANK2); outb(0x00, iobase+BGDH); switch (speed) { case 9600: outb(0x0c, iobase+BGDL); break; case 19200: outb(0x06, iobase+BGDL); break; case 38400: outb(0x03, iobase+BGDL); break; case 57600: outb(0x02, iobase+BGDL); break; case 115200: outb(0x01, iobase+BGDL); break; case 576000: switch_bank(iobase, BANK5); /* IRCR2: MDRS is set */ outb(inb(iobase+4) | 0x04, iobase+4); mcr = MCR_MIR; IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__); break; case 1152000: mcr = MCR_MIR; IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__); break; case 4000000: mcr = MCR_FIR; IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__); break; default: mcr = MCR_FIR; IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__, speed); break; } /* Set appropriate speed mode */ switch_bank(iobase, BANK0); outb(mcr | MCR_TX_DFR, iobase+MCR); /* Give some hits to the transceiver */ nsc_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id); /* Set FIFO threshold to TX17, RX16 */ switch_bank(iobase, BANK0); outb(0x00, iobase+FCR); outb(FCR_FIFO_EN, iobase+FCR); outb(FCR_RXTH| /* Set Rx FIFO threshold */ FCR_TXTH| /* Set Tx FIFO threshold */ FCR_TXSR| /* Reset Tx FIFO */ FCR_RXSR| /* Reset Rx FIFO */ FCR_FIFO_EN, /* Enable FIFOs */ iobase+FCR); /* Set FIFO size to 32 */ switch_bank(iobase, BANK2); outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2); /* Enable some interrupts so we can receive frames */ switch_bank(iobase, BANK0); if (speed > 115200) { /* Install FIR xmit handler */ dev->netdev_ops = &nsc_ircc_fir_ops; ier = IER_SFIF_IE; nsc_ircc_dma_receive(self); } else { /* Install SIR xmit handler */ dev->netdev_ops = &nsc_ircc_sir_ops; ier = IER_RXHDL_IE; } /* Set our current interrupt mask */ outb(ier, iobase+IER); /* Restore BSR */ outb(bank, iobase+BSR); /* Make sure interrupt handlers keep the proper interrupt mask */ return ier; } /* * Function nsc_ircc_hard_xmit (skb, dev) * * Transmit the frame! * */ static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev) { struct nsc_ircc_cb *self; unsigned long flags; int iobase; __s32 speed; __u8 bank; self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;); iobase = self->io.fir_base; netif_stop_queue(dev); /* Make sure tests *& speed change are atomic */ spin_lock_irqsave(&self->lock, flags); /* Check if we need to change the speed */ speed = irda_get_next_speed(skb); if ((speed != self->io.speed) && (speed != -1)) { /* Check for empty frame. */ if (!skb->len) { /* If we just sent a frame, we get called before * the last bytes get out (because of the SIR FIFO). * If this is the case, let interrupt handler change * the speed itself... Jean II */ if (self->io.direction == IO_RECV) { nsc_ircc_change_speed(self, speed); /* TODO : For SIR->SIR, the next packet * may get corrupted - Jean II */ netif_wake_queue(dev); } else { self->new_speed = speed; /* Queue will be restarted after speed change * to make sure packets gets through the * proper xmit handler - Jean II */ } dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } else self->new_speed = speed; } /* Save current bank */ bank = inb(iobase+BSR); self->tx_buff.data = self->tx_buff.head; self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize); dev->stats.tx_bytes += self->tx_buff.len; /* Add interrupt on tx low level (will fire immediately) */ switch_bank(iobase, BANK0); outb(IER_TXLDL_IE, iobase+IER); /* Restore bank register */ outb(bank, iobase+BSR); dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev) { struct nsc_ircc_cb *self; unsigned long flags; int iobase; __s32 speed; __u8 bank; int mtt, diff; self = netdev_priv(dev); iobase = self->io.fir_base; netif_stop_queue(dev); /* Make sure tests *& speed change are atomic */ spin_lock_irqsave(&self->lock, flags); /* Check if we need to change the speed */ speed = irda_get_next_speed(skb); if ((speed != self->io.speed) && (speed != -1)) { /* Check for empty frame. */ if (!skb->len) { /* If we are currently transmitting, defer to * interrupt handler. - Jean II */ if(self->tx_fifo.len == 0) { nsc_ircc_change_speed(self, speed); netif_wake_queue(dev); } else { self->new_speed = speed; /* Keep queue stopped : * the speed change operation may change the * xmit handler, and we want to make sure * the next packet get through the proper * Tx path, so block the Tx queue until * the speed change has been done. * Jean II */ } dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } else { /* Change speed after current frame */ self->new_speed = speed; } } /* Save current bank */ bank = inb(iobase+BSR); /* Register and copy this frame to DMA memory */ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail; self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; self->tx_fifo.tail += skb->len; dev->stats.tx_bytes += skb->len; skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, skb->len); self->tx_fifo.len++; self->tx_fifo.free++; /* Start transmit only if there is currently no transmit going on */ if (self->tx_fifo.len == 1) { /* Check if we must wait the min turn time or not */ mtt = irda_get_mtt(skb); if (mtt) { /* Check how much time we have used already */ do_gettimeofday(&self->now); diff = self->now.tv_usec - self->stamp.tv_usec; if (diff < 0) diff += 1000000; /* Check if the mtt is larger than the time we have * already used by all the protocol processing */ if (mtt > diff) { mtt -= diff; /* * Use timer if delay larger than 125 us, and * use udelay for smaller values which should * be acceptable */ if (mtt > 125) { /* Adjust for timer resolution */ mtt = mtt / 125; /* Setup timer */ switch_bank(iobase, BANK4); outb(mtt & 0xff, iobase+TMRL); outb((mtt >> 8) & 0x0f, iobase+TMRH); /* Start timer */ outb(IRCR1_TMR_EN, iobase+IRCR1); self->io.direction = IO_XMIT; /* Enable timer interrupt */ switch_bank(iobase, BANK0); outb(IER_TMR_IE, iobase+IER); /* Timer will take care of the rest */ goto out; } else udelay(mtt); } } /* Enable DMA interrupt */ switch_bank(iobase, BANK0); outb(IER_DMA_IE, iobase+IER); /* Transmit frame */ nsc_ircc_dma_xmit(self, iobase); } out: /* Not busy transmitting anymore if window is not full, * and if we don't need to change speed */ if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) netif_wake_queue(self->netdev); /* Restore bank register */ outb(bank, iobase+BSR); dev->trans_start = jiffies; spin_unlock_irqrestore(&self->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * Function nsc_ircc_dma_xmit (self, iobase) * * Transmit data using DMA * */ static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase) { int bsr; /* Save current bank */ bsr = inb(iobase+BSR); /* Disable DMA */ switch_bank(iobase, BANK0); outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR); self->io.direction = IO_XMIT; /* Choose transmit DMA channel */ switch_bank(iobase, BANK2); outb(ECR1_DMASWP|ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1); irda_setup_dma(self->io.dma, ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start - self->tx_buff.head) + self->tx_buff_dma, self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE); /* Enable DMA and SIR interaction pulse */ switch_bank(iobase, BANK0); outb(inb(iobase+MCR)|MCR_TX_DFR|MCR_DMA_EN|MCR_IR_PLS, iobase+MCR); /* Restore bank register */ outb(bsr, iobase+BSR); } /* * Function nsc_ircc_pio_xmit (self, iobase) * * Transmit data using PIO. Returns the number of bytes that actually * got transferred * */ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size) { int actual = 0; __u8 bank; IRDA_DEBUG(4, "%s()\n", __func__); /* Save current bank */ bank = inb(iobase+BSR); switch_bank(iobase, BANK0); if (!(inb_p(iobase+LSR) & LSR_TXEMP)) { IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n", __func__); /* FIFO may still be filled to the Tx interrupt threshold */ fifo_size -= 17; } /* Fill FIFO with current frame */ while ((fifo_size-- > 0) && (actual < len)) { /* Transmit next byte */ outb(buf[actual++], iobase+TXD); } IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", __func__, fifo_size, actual, len); /* Restore bank */ outb(bank, iobase+BSR); return actual; } /* * Function nsc_ircc_dma_xmit_complete (self) * * The transfer of a frame in finished. This function will only be called * by the interrupt handler * */ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self) { int iobase; __u8 bank; int ret = TRUE; IRDA_DEBUG(2, "%s()\n", __func__); iobase = self->io.fir_base; /* Save current bank */ bank = inb(iobase+BSR); /* Disable DMA */ switch_bank(iobase, BANK0); outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR); /* Check for underrrun! */ if (inb(iobase+ASCR) & ASCR_TXUR) { self->netdev->stats.tx_errors++; self->netdev->stats.tx_fifo_errors++; /* Clear bit, by writing 1 into it */ outb(ASCR_TXUR, iobase+ASCR); } else { self->netdev->stats.tx_packets++; } /* Finished with this frame, so prepare for next */ self->tx_fifo.ptr++; self->tx_fifo.len--; /* Any frames to be sent back-to-back? */ if (self->tx_fifo.len) { nsc_ircc_dma_xmit(self, iobase); /* Not finished yet! */ ret = FALSE; } else { /* Reset Tx FIFO info */ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; } /* Make sure we have room for more frames and * that we don't need to change speed */ if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) { /* Not busy transmitting anymore */ /* Tell the network layer, that we can accept more frames */ netif_wake_queue(self->netdev); } /* Restore bank */ outb(bank, iobase+BSR); return ret; } /* * Function nsc_ircc_dma_receive (self) * * Get ready for receiving a frame. The device will initiate a DMA * if it starts to receive a frame. * */ static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self) { int iobase; __u8 bsr; iobase = self->io.fir_base; /* Reset Tx FIFO info */ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; /* Save current bank */ bsr = inb(iobase+BSR); /* Disable DMA */ switch_bank(iobase, BANK0); outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR); /* Choose DMA Rx, DMA Fairness, and Advanced mode */ switch_bank(iobase, BANK2); outb(ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1); self->io.direction = IO_RECV; self->rx_buff.data = self->rx_buff.head; /* Reset Rx FIFO. This will also flush the ST_FIFO */ switch_bank(iobase, BANK0); outb(FCR_RXSR|FCR_FIFO_EN, iobase+FCR); self->st_fifo.len = self->st_fifo.pending_bytes = 0; self->st_fifo.tail = self->st_fifo.head = 0; irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, DMA_RX_MODE); /* Enable DMA */ switch_bank(iobase, BANK0); outb(inb(iobase+MCR)|MCR_DMA_EN, iobase+MCR); /* Restore bank register */ outb(bsr, iobase+BSR); return 0; } /* * Function nsc_ircc_dma_receive_complete (self) * * Finished with receiving frames * * */ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) { struct st_fifo *st_fifo; struct sk_buff *skb; __u8 status; __u8 bank; int len; st_fifo = &self->st_fifo; /* Save current bank */ bank = inb(iobase+BSR); /* Read all entries in status FIFO */ switch_bank(iobase, BANK5); while ((status = inb(iobase+FRM_ST)) & FRM_ST_VLD) { /* We must empty the status FIFO no matter what */ len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8); if (st_fifo->tail >= MAX_RX_WINDOW) { IRDA_DEBUG(0, "%s(), window is full!\n", __func__); continue; } st_fifo->entries[st_fifo->tail].status = status; st_fifo->entries[st_fifo->tail].len = len; st_fifo->pending_bytes += len; st_fifo->tail++; st_fifo->len++; } /* Try to process all entries in status FIFO */ while (st_fifo->len > 0) { /* Get first entry */ status = st_fifo->entries[st_fifo->head].status; len = st_fifo->entries[st_fifo->head].len; st_fifo->pending_bytes -= len; st_fifo->head++; st_fifo->len--; /* Check for errors */ if (status & FRM_ST_ERR_MSK) { if (status & FRM_ST_LOST_FR) { /* Add number of lost frames to stats */ self->netdev->stats.rx_errors += len; } else { /* Skip frame */ self->netdev->stats.rx_errors++; self->rx_buff.data += len; if (status & FRM_ST_MAX_LEN) self->netdev->stats.rx_length_errors++; if (status & FRM_ST_PHY_ERR) self->netdev->stats.rx_frame_errors++; if (status & FRM_ST_BAD_CRC) self->netdev->stats.rx_crc_errors++; } /* The errors below can be reported in both cases */ if (status & FRM_ST_OVR1) self->netdev->stats.rx_fifo_errors++; if (status & FRM_ST_OVR2) self->netdev->stats.rx_fifo_errors++; } else { /* * First we must make sure that the frame we * want to deliver is all in main memory. If we * cannot tell, then we check if the Rx FIFO is * empty. If not then we will have to take a nap * and try again later. */ if (st_fifo->pending_bytes < self->io.fifo_size) { switch_bank(iobase, BANK0); if (inb(iobase+LSR) & LSR_RXDA) { /* Put this entry back in fifo */ st_fifo->head--; st_fifo->len++; st_fifo->pending_bytes += len; st_fifo->entries[st_fifo->head].status = status; st_fifo->entries[st_fifo->head].len = len; /* * DMA not finished yet, so try again * later, set timer value, resolution * 125 us */ switch_bank(iobase, BANK4); outb(0x02, iobase+TMRL); /* x 125 us */ outb(0x00, iobase+TMRH); /* Start timer */ outb(IRCR1_TMR_EN, iobase+IRCR1); /* Restore bank register */ outb(bank, iobase+BSR); return FALSE; /* I'll be back! */ } } /* * Remember the time we received this frame, so we can * reduce the min turn time a bit since we will know * how much time we have used for protocol processing */ do_gettimeofday(&self->stamp); skb = dev_alloc_skb(len+1); if (skb == NULL) { IRDA_WARNING("%s(), memory squeeze, " "dropping frame.\n", __func__); self->netdev->stats.rx_dropped++; /* Restore bank register */ outb(bank, iobase+BSR); return FALSE; } /* Make sure IP header gets aligned */ skb_reserve(skb, 1); /* Copy frame without CRC */ if (self->io.speed < 4000000) { skb_put(skb, len-2); skb_copy_to_linear_data(skb, self->rx_buff.data, len - 2); } else { skb_put(skb, len-4); skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); } /* Move to next frame */ self->rx_buff.data += len; self->netdev->stats.rx_bytes += len; self->netdev->stats.rx_packets++; skb->dev = self->netdev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); } } /* Restore bank register */ outb(bank, iobase+BSR); return TRUE; } /* * Function nsc_ircc_pio_receive (self) * * Receive all data in receiver FIFO * */ static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self) { __u8 byte; int iobase; iobase = self->io.fir_base; /* Receive all characters in Rx FIFO */ do { byte = inb(iobase+RXD); async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff, byte); } while (inb(iobase+LSR) & LSR_RXDA); /* Data available */ } /* * Function nsc_ircc_sir_interrupt (self, eir) * * Handle SIR interrupt * */ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir) { int actual; /* Check if transmit FIFO is low on data */ if (eir & EIR_TXLDL_EV) { /* Write data left in transmit buffer */ actual = nsc_ircc_pio_write(self->io.fir_base, self->tx_buff.data, self->tx_buff.len, self->io.fifo_size); self->tx_buff.data += actual; self->tx_buff.len -= actual; self->io.direction = IO_XMIT; /* Check if finished */ if (self->tx_buff.len > 0) self->ier = IER_TXLDL_IE; else { self->netdev->stats.tx_packets++; netif_wake_queue(self->netdev); self->ier = IER_TXEMP_IE; } } /* Check if transmission has completed */ if (eir & EIR_TXEMP_EV) { /* Turn around and get ready to receive some data */ self->io.direction = IO_RECV; self->ier = IER_RXHDL_IE; /* Check if we need to change the speed? * Need to be after self->io.direction to avoid race with * nsc_ircc_hard_xmit_sir() - Jean II */ if (self->new_speed) { IRDA_DEBUG(2, "%s(), Changing speed!\n", __func__); self->ier = nsc_ircc_change_speed(self, self->new_speed); self->new_speed = 0; netif_wake_queue(self->netdev); /* Check if we are going to FIR */ if (self->io.speed > 115200) { /* No need to do anymore SIR stuff */ return; } } } /* Rx FIFO threshold or timeout */ if (eir & EIR_RXHDL_EV) { nsc_ircc_pio_receive(self); /* Keep receiving */ self->ier = IER_RXHDL_IE; } } /* * Function nsc_ircc_fir_interrupt (self, eir) * * Handle MIR/FIR interrupt * */ static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase, int eir) { __u8 bank; bank = inb(iobase+BSR); /* Status FIFO event*/ if (eir & EIR_SFIF_EV) { /* Check if DMA has finished */ if (nsc_ircc_dma_receive_complete(self, iobase)) { /* Wait for next status FIFO interrupt */ self->ier = IER_SFIF_IE; } else { self->ier = IER_SFIF_IE | IER_TMR_IE; } } else if (eir & EIR_TMR_EV) { /* Timer finished */ /* Disable timer */ switch_bank(iobase, BANK4); outb(0, iobase+IRCR1); /* Clear timer event */ switch_bank(iobase, BANK0); outb(ASCR_CTE, iobase+ASCR); /* Check if this is a Tx timer interrupt */ if (self->io.direction == IO_XMIT) { nsc_ircc_dma_xmit(self, iobase); /* Interrupt on DMA */ self->ier = IER_DMA_IE; } else { /* Check (again) if DMA has finished */ if (nsc_ircc_dma_receive_complete(self, iobase)) { self->ier = IER_SFIF_IE; } else { self->ier = IER_SFIF_IE | IER_TMR_IE; } } } else if (eir & EIR_DMA_EV) { /* Finished with all transmissions? */ if (nsc_ircc_dma_xmit_complete(self)) { if(self->new_speed != 0) { /* As we stop the Tx queue, the speed change * need to be done when the Tx fifo is * empty. Ask for a Tx done interrupt */ self->ier = IER_TXEMP_IE; } else { /* Check if there are more frames to be * transmitted */ if (irda_device_txqueue_empty(self->netdev)) { /* Prepare for receive */ nsc_ircc_dma_receive(self); self->ier = IER_SFIF_IE; } else IRDA_WARNING("%s(), potential " "Tx queue lockup !\n", __func__); } } else { /* Not finished yet, so interrupt on DMA again */ self->ier = IER_DMA_IE; } } else if (eir & EIR_TXEMP_EV) { /* The Tx FIFO has totally drained out, so now we can change * the speed... - Jean II */ self->ier = nsc_ircc_change_speed(self, self->new_speed); self->new_speed = 0; netif_wake_queue(self->netdev); /* Note : nsc_ircc_change_speed() restarted Rx fifo */ } outb(bank, iobase+BSR); } /* * Function nsc_ircc_interrupt (irq, dev_id, regs) * * An interrupt from the chip has arrived. Time to do some work * */ static irqreturn_t nsc_ircc_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct nsc_ircc_cb *self; __u8 bsr, eir; int iobase; self = netdev_priv(dev); spin_lock(&self->lock); iobase = self->io.fir_base; bsr = inb(iobase+BSR); /* Save current bank */ switch_bank(iobase, BANK0); self->ier = inb(iobase+IER); eir = inb(iobase+EIR) & self->ier; /* Mask out the interesting ones */ outb(0, iobase+IER); /* Disable interrupts */ if (eir) { /* Dispatch interrupt handler for the current speed */ if (self->io.speed > 115200) nsc_ircc_fir_interrupt(self, iobase, eir); else nsc_ircc_sir_interrupt(self, eir); } outb(self->ier, iobase+IER); /* Restore interrupts */ outb(bsr, iobase+BSR); /* Restore bank register */ spin_unlock(&self->lock); return IRQ_RETVAL(eir); } /* * Function nsc_ircc_is_receiving (self) * * Return TRUE is we are currently receiving a frame * */ static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self) { unsigned long flags; int status = FALSE; int iobase; __u8 bank; IRDA_ASSERT(self != NULL, return FALSE;); spin_lock_irqsave(&self->lock, flags); if (self->io.speed > 115200) { iobase = self->io.fir_base; /* Check if rx FIFO is not empty */ bank = inb(iobase+BSR); switch_bank(iobase, BANK2); if ((inb(iobase+RXFLV) & 0x3f) != 0) { /* We are receiving something */ status = TRUE; } outb(bank, iobase+BSR); } else status = (self->rx_buff.state != OUTSIDE_FRAME); spin_unlock_irqrestore(&self->lock, flags); return status; } /* * Function nsc_ircc_net_open (dev) * * Start the device * */ static int nsc_ircc_net_open(struct net_device *dev) { struct nsc_ircc_cb *self; int iobase; char hwname[32]; __u8 bank; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); iobase = self->io.fir_base; if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) { IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name, self->io.irq); return -EAGAIN; } /* * Always allocate the DMA channel after the IRQ, and clean up on * failure. */ if (request_dma(self->io.dma, dev->name)) { IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name, self->io.dma); free_irq(self->io.irq, dev); return -EAGAIN; } /* Save current bank */ bank = inb(iobase+BSR); /* turn on interrupts */ switch_bank(iobase, BANK0); outb(IER_LS_IE | IER_RXHDL_IE, iobase+IER); /* Restore bank register */ outb(bank, iobase+BSR); /* Ready to play! */ netif_start_queue(dev); /* Give self a hardware name */ sprintf(hwname, "NSC-FIR @ 0x%03x", self->io.fir_base); /* * Open new IrLAP layer instance, now that everything should be * initialized properly */ self->irlap = irlap_open(dev, &self->qos, hwname); return 0; } /* * Function nsc_ircc_net_close (dev) * * Stop the device * */ static int nsc_ircc_net_close(struct net_device *dev) { struct nsc_ircc_cb *self; int iobase; __u8 bank; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return 0;); /* Stop device */ netif_stop_queue(dev); /* Stop and remove instance of IrLAP */ if (self->irlap) irlap_close(self->irlap); self->irlap = NULL; iobase = self->io.fir_base; disable_dma(self->io.dma); /* Save current bank */ bank = inb(iobase+BSR); /* Disable interrupts */ switch_bank(iobase, BANK0); outb(0, iobase+IER); free_irq(self->io.irq, dev); free_dma(self->io.dma); /* Restore bank register */ outb(bank, iobase+BSR); return 0; } /* * Function nsc_ircc_net_ioctl (dev, rq, cmd) * * Process IOCTL commands for this device * */ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct if_irda_req *irq = (struct if_irda_req *) rq; struct nsc_ircc_cb *self; unsigned long flags; int ret = 0; IRDA_ASSERT(dev != NULL, return -1;); self = netdev_priv(dev); IRDA_ASSERT(self != NULL, return -1;); IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } spin_lock_irqsave(&self->lock, flags); nsc_ircc_change_speed(self, irq->ifr_baudrate); spin_unlock_irqrestore(&self->lock, flags); break; case SIOCSMEDIABUSY: /* Set media busy */ if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } irda_device_set_media_busy(self->netdev, TRUE); break; case SIOCGRECEIVING: /* Check if we are receiving right now */ /* This is already protected */ irq->ifr_receiving = nsc_ircc_is_receiving(self); break; default: ret = -EOPNOTSUPP; } return ret; } static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state) { struct nsc_ircc_cb *self = platform_get_drvdata(dev); int bank; unsigned long flags; int iobase = self->io.fir_base; if (self->io.suspended) return 0; IRDA_DEBUG(1, "%s, Suspending\n", driver_name); rtnl_lock(); if (netif_running(self->netdev)) { netif_device_detach(self->netdev); spin_lock_irqsave(&self->lock, flags); /* Save current bank */ bank = inb(iobase+BSR); /* Disable interrupts */ switch_bank(iobase, BANK0); outb(0, iobase+IER); /* Restore bank register */ outb(bank, iobase+BSR); spin_unlock_irqrestore(&self->lock, flags); free_irq(self->io.irq, self->netdev); disable_dma(self->io.dma); } self->io.suspended = 1; rtnl_unlock(); return 0; } static int nsc_ircc_resume(struct platform_device *dev) { struct nsc_ircc_cb *self = platform_get_drvdata(dev); unsigned long flags; if (!self->io.suspended) return 0; IRDA_DEBUG(1, "%s, Waking up\n", driver_name); rtnl_lock(); nsc_ircc_setup(&self->io); nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id); if (netif_running(self->netdev)) { if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, self->netdev->name, self->netdev)) { IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name, self->io.irq); /* * Don't fail resume process, just kill this * network interface */ unregister_netdevice(self->netdev); } else { spin_lock_irqsave(&self->lock, flags); nsc_ircc_change_speed(self, self->io.speed); spin_unlock_irqrestore(&self->lock, flags); netif_device_attach(self->netdev); } } else { spin_lock_irqsave(&self->lock, flags); nsc_ircc_change_speed(self, 9600); spin_unlock_irqrestore(&self->lock, flags); } self->io.suspended = 0; rtnl_unlock(); return 0; } MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); MODULE_DESCRIPTION("NSC IrDA Device Driver"); MODULE_LICENSE("GPL"); module_param(qos_mtt_bits, int, 0); MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time"); module_param_array(io, int, NULL, 0); MODULE_PARM_DESC(io, "Base I/O addresses"); module_param_array(irq, int, NULL, 0); MODULE_PARM_DESC(irq, "IRQ lines"); module_param_array(dma, int, NULL, 0); MODULE_PARM_DESC(dma, "DMA channels"); module_param(dongle_id, int, 0); MODULE_PARM_DESC(dongle_id, "Type-id of used dongle"); module_init(nsc_ircc_init); module_exit(nsc_ircc_cleanup);
gpl-2.0
sgp-blackphone/Blackphone-BP2-Kernel
drivers/net/wireless/ti/wl1251/rx.c
2528
5927
/* * This file is part of wl1251 * * Copyright (c) 1998-2007 Texas Instruments Incorporated * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/skbuff.h> #include <linux/gfp.h> #include <net/mac80211.h> #include "wl1251.h" #include "reg.h" #include "io.h" #include "rx.h" #include "cmd.h" #include "acx.h" static void wl1251_rx_header(struct wl1251 *wl, struct wl1251_rx_descriptor *desc) { u32 rx_packet_ring_addr; rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr; if (wl->rx_current_buffer) rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size; wl1251_mem_read(wl, rx_packet_ring_addr, desc, sizeof(*desc)); } static void wl1251_rx_status(struct wl1251 *wl, struct wl1251_rx_descriptor *desc, struct ieee80211_rx_status *status, u8 beacon) { u64 mactime; int ret; memset(status, 0, sizeof(struct ieee80211_rx_status)); status->band = IEEE80211_BAND_2GHZ; status->mactime = desc->timestamp; /* * The rx status timestamp is a 32 bits value while the TSF is a * 64 bits one. * For IBSS merging, TSF is mandatory, so we have to get it * somehow, so we ask for ACX_TSF_INFO. * That could be moved to the get_tsf() hook, but unfortunately, * this one must be atomic, while our SPI routines can sleep. */ if ((wl->bss_type == BSS_TYPE_IBSS) && beacon) { ret = wl1251_acx_tsf_info(wl, &mactime); if (ret == 0) status->mactime = mactime; } status->signal = desc->rssi; /* * FIXME: guessing that snr needs to be divided by two, otherwise * the values don't make any sense */ wl->noise = desc->rssi - desc->snr / 2; status->freq = ieee80211_channel_to_frequency(desc->channel, status->band); status->flag |= RX_FLAG_MACTIME_START; if (desc->flags & RX_DESC_ENCRYPTION_MASK) { status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL))) status->flag |= RX_FLAG_DECRYPTED; if (unlikely(desc->flags & RX_DESC_MIC_FAIL)) status->flag |= RX_FLAG_MMIC_ERROR; } if (unlikely(!(desc->flags & RX_DESC_VALID_FCS))) status->flag |= RX_FLAG_FAILED_FCS_CRC; switch (desc->rate) { /* skip 1 and 12 Mbps because they have same value 0x0a */ case RATE_2MBPS: status->rate_idx = 1; break; case RATE_5_5MBPS: status->rate_idx = 2; break; case RATE_11MBPS: status->rate_idx = 3; break; case RATE_6MBPS: status->rate_idx = 4; break; case RATE_9MBPS: status->rate_idx = 5; break; case RATE_18MBPS: status->rate_idx = 7; break; case RATE_24MBPS: status->rate_idx = 8; break; case RATE_36MBPS: status->rate_idx = 9; break; case RATE_48MBPS: status->rate_idx = 10; break; case RATE_54MBPS: status->rate_idx = 11; break; } /* for 1 and 12 Mbps we have to check the modulation */ if (desc->rate == RATE_1MBPS) { if (!(desc->mod_pre & OFDM_RATE_BIT)) /* CCK -> RATE_1MBPS */ status->rate_idx = 0; else /* OFDM -> RATE_12MBPS */ status->rate_idx = 6; } if (desc->mod_pre & SHORT_PREAMBLE_BIT) status->flag |= RX_FLAG_SHORTPRE; } static void wl1251_rx_body(struct wl1251 *wl, struct wl1251_rx_descriptor *desc) { struct sk_buff *skb; struct ieee80211_rx_status status; u8 *rx_buffer, beacon = 0; u16 length, *fc; u32 curr_id, last_id_inc, rx_packet_ring_addr; length = WL1251_RX_ALIGN(desc->length - PLCP_HEADER_LENGTH); curr_id = (desc->flags & RX_DESC_SEQNUM_MASK) >> RX_DESC_PACKETID_SHIFT; last_id_inc = (wl->rx_last_id + 1) % (RX_MAX_PACKET_ID + 1); if (last_id_inc != curr_id) { wl1251_warning("curr ID:%d, last ID inc:%d", curr_id, last_id_inc); wl->rx_last_id = curr_id; } else { wl->rx_last_id = last_id_inc; } rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr + sizeof(struct wl1251_rx_descriptor) + 20; if (wl->rx_current_buffer) rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size; skb = __dev_alloc_skb(length, GFP_KERNEL); if (!skb) { wl1251_error("Couldn't allocate RX frame"); return; } rx_buffer = skb_put(skb, length); wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length); /* The actual length doesn't include the target's alignment */ skb->len = desc->length - PLCP_HEADER_LENGTH; fc = (u16 *)skb->data; if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) beacon = 1; wl1251_rx_status(wl, desc, &status, beacon); wl1251_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, beacon ? "beacon" : ""); memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); ieee80211_rx_ni(wl->hw, skb); } static void wl1251_rx_ack(struct wl1251 *wl) { u32 data, addr; if (wl->rx_current_buffer) { addr = ACX_REG_INTERRUPT_TRIG_H; data = INTR_TRIG_RX_PROC1; } else { addr = ACX_REG_INTERRUPT_TRIG; data = INTR_TRIG_RX_PROC0; } wl1251_reg_write32(wl, addr, data); /* Toggle buffer ring */ wl->rx_current_buffer = !wl->rx_current_buffer; } void wl1251_rx(struct wl1251 *wl) { struct wl1251_rx_descriptor *rx_desc; if (wl->state != WL1251_STATE_ON) return; rx_desc = wl->rx_descriptor; /* We first read the frame's header */ wl1251_rx_header(wl, rx_desc); /* Now we can read the body */ wl1251_rx_body(wl, rx_desc); /* Finally, we need to ACK the RX */ wl1251_rx_ack(wl); }
gpl-2.0
dastin1015/android_kernel_htc_villec2
lib/devres.c
2784
7927
#include <linux/pci.h> #include <linux/io.h> #include <linux/gfp.h> #include <linux/module.h> void devm_ioremap_release(struct device *dev, void *res) { iounmap(*(void __iomem **)res); } static int devm_ioremap_match(struct device *dev, void *res, void *match_data) { return *(void **)res == match_data; } /** * devm_ioremap - Managed ioremap() * @dev: Generic device to remap IO address for * @offset: BUS offset to map * @size: Size of map * * Managed ioremap(). Map is automatically unmapped on driver detach. */ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, unsigned long size) { void __iomem **ptr, *addr; ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; addr = ioremap(offset, size); if (addr) { *ptr = addr; devres_add(dev, ptr); } else devres_free(ptr); return addr; } EXPORT_SYMBOL(devm_ioremap); /** * devm_ioremap_nocache - Managed ioremap_nocache() * @dev: Generic device to remap IO address for * @offset: BUS offset to map * @size: Size of map * * Managed ioremap_nocache(). Map is automatically unmapped on driver * detach. */ void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, unsigned long size) { void __iomem **ptr, *addr; ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; addr = ioremap_nocache(offset, size); if (addr) { *ptr = addr; devres_add(dev, ptr); } else devres_free(ptr); return addr; } EXPORT_SYMBOL(devm_ioremap_nocache); /** * devm_iounmap - Managed iounmap() * @dev: Generic device to unmap for * @addr: Address to unmap * * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). */ void devm_iounmap(struct device *dev, void __iomem *addr) { iounmap(addr); WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, (void *)addr)); } EXPORT_SYMBOL(devm_iounmap); #ifdef CONFIG_HAS_IOPORT /* * Generic iomap devres */ static void devm_ioport_map_release(struct device *dev, void *res) { ioport_unmap(*(void __iomem **)res); } static int devm_ioport_map_match(struct device *dev, void *res, void *match_data) { return *(void **)res == match_data; } /** * devm_ioport_map - Managed ioport_map() * @dev: Generic device to map ioport for * @port: Port to map * @nr: Number of ports to map * * Managed ioport_map(). Map is automatically unmapped on driver * detach. */ void __iomem * devm_ioport_map(struct device *dev, unsigned long port, unsigned int nr) { void __iomem **ptr, *addr; ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; addr = ioport_map(port, nr); if (addr) { *ptr = addr; devres_add(dev, ptr); } else devres_free(ptr); return addr; } EXPORT_SYMBOL(devm_ioport_map); /** * devm_ioport_unmap - Managed ioport_unmap() * @dev: Generic device to unmap for * @addr: Address to unmap * * Managed ioport_unmap(). @addr must have been mapped using * devm_ioport_map(). */ void devm_ioport_unmap(struct device *dev, void __iomem *addr) { ioport_unmap(addr); WARN_ON(devres_destroy(dev, devm_ioport_map_release, devm_ioport_map_match, (void *)addr)); } EXPORT_SYMBOL(devm_ioport_unmap); #ifdef CONFIG_PCI /* * PCI iomap devres */ #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE struct pcim_iomap_devres { void __iomem *table[PCIM_IOMAP_MAX]; }; static void pcim_iomap_release(struct device *gendev, void *res) { struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); struct pcim_iomap_devres *this = res; int i; for (i = 0; i < PCIM_IOMAP_MAX; i++) if (this->table[i]) pci_iounmap(dev, this->table[i]); } /** * pcim_iomap_table - access iomap allocation table * @pdev: PCI device to access iomap table for * * Access iomap allocation table for @dev. If iomap table doesn't * exist and @pdev is managed, it will be allocated. All iomaps * recorded in the iomap table are automatically unmapped on driver * detach. * * This function might sleep when the table is first allocated but can * be safely called without context and guaranteed to succed once * allocated. */ void __iomem * const * pcim_iomap_table(struct pci_dev *pdev) { struct pcim_iomap_devres *dr, *new_dr; dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); if (dr) return dr->table; new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); if (!new_dr) return NULL; dr = devres_get(&pdev->dev, new_dr, NULL, NULL); return dr->table; } EXPORT_SYMBOL(pcim_iomap_table); /** * pcim_iomap - Managed pcim_iomap() * @pdev: PCI device to iomap for * @bar: BAR to iomap * @maxlen: Maximum length of iomap * * Managed pci_iomap(). Map is automatically unmapped on driver * detach. */ void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) { void __iomem **tbl; BUG_ON(bar >= PCIM_IOMAP_MAX); tbl = (void __iomem **)pcim_iomap_table(pdev); if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ return NULL; tbl[bar] = pci_iomap(pdev, bar, maxlen); return tbl[bar]; } EXPORT_SYMBOL(pcim_iomap); /** * pcim_iounmap - Managed pci_iounmap() * @pdev: PCI device to iounmap for * @addr: Address to unmap * * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). */ void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) { void __iomem **tbl; int i; pci_iounmap(pdev, addr); tbl = (void __iomem **)pcim_iomap_table(pdev); BUG_ON(!tbl); for (i = 0; i < PCIM_IOMAP_MAX; i++) if (tbl[i] == addr) { tbl[i] = NULL; return; } WARN_ON(1); } EXPORT_SYMBOL(pcim_iounmap); /** * pcim_iomap_regions - Request and iomap PCI BARs * @pdev: PCI device to map IO resources for * @mask: Mask of BARs to request and iomap * @name: Name used when requesting regions * * Request and iomap regions specified by @mask. */ int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name) { void __iomem * const *iomap; int i, rc; iomap = pcim_iomap_table(pdev); if (!iomap) return -ENOMEM; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { unsigned long len; if (!(mask & (1 << i))) continue; rc = -EINVAL; len = pci_resource_len(pdev, i); if (!len) goto err_inval; rc = pci_request_region(pdev, i, name); if (rc) goto err_inval; rc = -ENOMEM; if (!pcim_iomap(pdev, i, 0)) goto err_region; } return 0; err_region: pci_release_region(pdev, i); err_inval: while (--i >= 0) { if (!(mask & (1 << i))) continue; pcim_iounmap(pdev, iomap[i]); pci_release_region(pdev, i); } return rc; } EXPORT_SYMBOL(pcim_iomap_regions); /** * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones * @pdev: PCI device to map IO resources for * @mask: Mask of BARs to iomap * @name: Name used when requesting regions * * Request all PCI BARs and iomap regions specified by @mask. */ int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask, const char *name) { int request_mask = ((1 << 6) - 1) & ~mask; int rc; rc = pci_request_selected_regions(pdev, request_mask, name); if (rc) return rc; rc = pcim_iomap_regions(pdev, mask, name); if (rc) pci_release_selected_regions(pdev, request_mask); return rc; } EXPORT_SYMBOL(pcim_iomap_regions_request_all); /** * pcim_iounmap_regions - Unmap and release PCI BARs * @pdev: PCI device to map IO resources for * @mask: Mask of BARs to unmap and release * * Unmap and release regions specified by @mask. */ void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) { void __iomem * const *iomap; int i; iomap = pcim_iomap_table(pdev); if (!iomap) return; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if (!(mask & (1 << i))) continue; pcim_iounmap(pdev, iomap[i]); pci_release_region(pdev, i); } } EXPORT_SYMBOL(pcim_iounmap_regions); #endif #endif
gpl-2.0
maxfierke/axstrom-kernel
arch/arm/plat-samsung/pwm-clock.c
3552
10189
/* linux/arch/arm/plat-s3c24xx/pwm-clock.c * * Copyright (c) 2007 Simtec Electronics * Copyright (c) 2007, 2008 Ben Dooks * Ben Dooks <ben-linux@fluff.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/log2.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/map.h> #include <asm/irq.h> #include <plat/clock.h> #include <plat/cpu.h> #include <plat/regs-timer.h> #include <mach/pwm-clock.h> /* Each of the timers 0 through 5 go through the following * clock tree, with the inputs depending on the timers. * * pclk ---- [ prescaler 0 ] -+---> timer 0 * +---> timer 1 * * pclk ---- [ prescaler 1 ] -+---> timer 2 * +---> timer 3 * \---> timer 4 * * Which are fed into the timers as so: * * prescaled 0 ---- [ div 2,4,8,16 ] ---\ * [mux] -> timer 0 * tclk 0 ------------------------------/ * * prescaled 0 ---- [ div 2,4,8,16 ] ---\ * [mux] -> timer 1 * tclk 0 ------------------------------/ * * * prescaled 1 ---- [ div 2,4,8,16 ] ---\ * [mux] -> timer 2 * tclk 1 ------------------------------/ * * prescaled 1 ---- [ div 2,4,8,16 ] ---\ * [mux] -> timer 3 * tclk 1 ------------------------------/ * * prescaled 1 ---- [ div 2,4,8, 16 ] --\ * [mux] -> timer 4 * tclk 1 ------------------------------/ * * Since the mux and the divider are tied together in the * same register space, it is impossible to set the parent * and the rate at the same time. To avoid this, we add an * intermediate 'prescaled-and-divided' clock to select * as the parent for the timer input clock called tdiv. * * prescaled clk --> pwm-tdiv ---\ * [ mux ] --> timer X * tclk -------------------------/ */ static struct clk clk_timer_scaler[]; static unsigned long clk_pwm_scaler_get_rate(struct clk *clk) { unsigned long tcfg0 = __raw_readl(S3C2410_TCFG0); if (clk == &clk_timer_scaler[1]) { tcfg0 &= S3C2410_TCFG_PRESCALER1_MASK; tcfg0 >>= S3C2410_TCFG_PRESCALER1_SHIFT; } else { tcfg0 &= S3C2410_TCFG_PRESCALER0_MASK; } return clk_get_rate(clk->parent) / (tcfg0 + 1); } static unsigned long clk_pwm_scaler_round_rate(struct clk *clk, unsigned long rate) { unsigned long parent_rate = clk_get_rate(clk->parent); unsigned long divisor = parent_rate / rate; if (divisor > 256) divisor = 256; else if (divisor < 2) divisor = 2; return parent_rate / divisor; } static int clk_pwm_scaler_set_rate(struct clk *clk, unsigned long rate) { unsigned long round = clk_pwm_scaler_round_rate(clk, rate); unsigned long tcfg0; unsigned long divisor; unsigned long flags; divisor = clk_get_rate(clk->parent) / round; divisor--; local_irq_save(flags); tcfg0 = __raw_readl(S3C2410_TCFG0); if (clk == &clk_timer_scaler[1]) { tcfg0 &= ~S3C2410_TCFG_PRESCALER1_MASK; tcfg0 |= divisor << S3C2410_TCFG_PRESCALER1_SHIFT; } else { tcfg0 &= ~S3C2410_TCFG_PRESCALER0_MASK; tcfg0 |= divisor; } __raw_writel(tcfg0, S3C2410_TCFG0); local_irq_restore(flags); return 0; } static struct clk_ops clk_pwm_scaler_ops = { .get_rate = clk_pwm_scaler_get_rate, .set_rate = clk_pwm_scaler_set_rate, .round_rate = clk_pwm_scaler_round_rate, }; static struct clk clk_timer_scaler[] = { [0] = { .name = "pwm-scaler0", .id = -1, .ops = &clk_pwm_scaler_ops, }, [1] = { .name = "pwm-scaler1", .id = -1, .ops = &clk_pwm_scaler_ops, }, }; static struct clk clk_timer_tclk[] = { [0] = { .name = "pwm-tclk0", .id = -1, }, [1] = { .name = "pwm-tclk1", .id = -1, }, }; struct pwm_tdiv_clk { struct clk clk; unsigned int divisor; }; static inline struct pwm_tdiv_clk *to_tdiv(struct clk *clk) { return container_of(clk, struct pwm_tdiv_clk, clk); } static unsigned long clk_pwm_tdiv_get_rate(struct clk *clk) { unsigned long tcfg1 = __raw_readl(S3C2410_TCFG1); unsigned int divisor; tcfg1 >>= S3C2410_TCFG1_SHIFT(clk->id); tcfg1 &= S3C2410_TCFG1_MUX_MASK; if (pwm_cfg_src_is_tclk(tcfg1)) divisor = to_tdiv(clk)->divisor; else divisor = tcfg_to_divisor(tcfg1); return clk_get_rate(clk->parent) / divisor; } static unsigned long clk_pwm_tdiv_round_rate(struct clk *clk, unsigned long rate) { unsigned long parent_rate; unsigned long divisor; parent_rate = clk_get_rate(clk->parent); divisor = parent_rate / rate; if (divisor <= 1 && pwm_tdiv_has_div1()) divisor = 1; else if (divisor <= 2) divisor = 2; else if (divisor <= 4) divisor = 4; else if (divisor <= 8) divisor = 8; else divisor = 16; return parent_rate / divisor; } static unsigned long clk_pwm_tdiv_bits(struct pwm_tdiv_clk *divclk) { return pwm_tdiv_div_bits(divclk->divisor); } static void clk_pwm_tdiv_update(struct pwm_tdiv_clk *divclk) { unsigned long tcfg1 = __raw_readl(S3C2410_TCFG1); unsigned long bits = clk_pwm_tdiv_bits(divclk); unsigned long flags; unsigned long shift = S3C2410_TCFG1_SHIFT(divclk->clk.id); local_irq_save(flags); tcfg1 = __raw_readl(S3C2410_TCFG1); tcfg1 &= ~(S3C2410_TCFG1_MUX_MASK << shift); tcfg1 |= bits << shift; __raw_writel(tcfg1, S3C2410_TCFG1); local_irq_restore(flags); } static int clk_pwm_tdiv_set_rate(struct clk *clk, unsigned long rate) { struct pwm_tdiv_clk *divclk = to_tdiv(clk); unsigned long tcfg1 = __raw_readl(S3C2410_TCFG1); unsigned long parent_rate = clk_get_rate(clk->parent); unsigned long divisor; tcfg1 >>= S3C2410_TCFG1_SHIFT(clk->id); tcfg1 &= S3C2410_TCFG1_MUX_MASK; rate = clk_round_rate(clk, rate); divisor = parent_rate / rate; if (divisor > 16) return -EINVAL; divclk->divisor = divisor; /* Update the current MUX settings if we are currently * selected as the clock source for this clock. */ if (!pwm_cfg_src_is_tclk(tcfg1)) clk_pwm_tdiv_update(divclk); return 0; } static struct clk_ops clk_tdiv_ops = { .get_rate = clk_pwm_tdiv_get_rate, .set_rate = clk_pwm_tdiv_set_rate, .round_rate = clk_pwm_tdiv_round_rate, }; static struct pwm_tdiv_clk clk_timer_tdiv[] = { [0] = { .clk = { .name = "pwm-tdiv", .ops = &clk_tdiv_ops, .parent = &clk_timer_scaler[0], }, }, [1] = { .clk = { .name = "pwm-tdiv", .ops = &clk_tdiv_ops, .parent = &clk_timer_scaler[0], } }, [2] = { .clk = { .name = "pwm-tdiv", .ops = &clk_tdiv_ops, .parent = &clk_timer_scaler[1], }, }, [3] = { .clk = { .name = "pwm-tdiv", .ops = &clk_tdiv_ops, .parent = &clk_timer_scaler[1], }, }, [4] = { .clk = { .name = "pwm-tdiv", .ops = &clk_tdiv_ops, .parent = &clk_timer_scaler[1], }, }, }; static int __init clk_pwm_tdiv_register(unsigned int id) { struct pwm_tdiv_clk *divclk = &clk_timer_tdiv[id]; unsigned long tcfg1 = __raw_readl(S3C2410_TCFG1); tcfg1 >>= S3C2410_TCFG1_SHIFT(id); tcfg1 &= S3C2410_TCFG1_MUX_MASK; divclk->clk.id = id; divclk->divisor = tcfg_to_divisor(tcfg1); return s3c24xx_register_clock(&divclk->clk); } static inline struct clk *s3c24xx_pwmclk_tclk(unsigned int id) { return (id >= 2) ? &clk_timer_tclk[1] : &clk_timer_tclk[0]; } static inline struct clk *s3c24xx_pwmclk_tdiv(unsigned int id) { return &clk_timer_tdiv[id].clk; } static int clk_pwm_tin_set_parent(struct clk *clk, struct clk *parent) { unsigned int id = clk->id; unsigned long tcfg1; unsigned long flags; unsigned long bits; unsigned long shift = S3C2410_TCFG1_SHIFT(id); if (parent == s3c24xx_pwmclk_tclk(id)) bits = S3C_TCFG1_MUX_TCLK << shift; else if (parent == s3c24xx_pwmclk_tdiv(id)) bits = clk_pwm_tdiv_bits(to_tdiv(parent)) << shift; else return -EINVAL; clk->parent = parent; local_irq_save(flags); tcfg1 = __raw_readl(S3C2410_TCFG1); tcfg1 &= ~(S3C2410_TCFG1_MUX_MASK << shift); __raw_writel(tcfg1 | bits, S3C2410_TCFG1); local_irq_restore(flags); return 0; } static struct clk_ops clk_tin_ops = { .set_parent = clk_pwm_tin_set_parent, }; static struct clk clk_tin[] = { [0] = { .name = "pwm-tin", .id = 0, .ops = &clk_tin_ops, }, [1] = { .name = "pwm-tin", .id = 1, .ops = &clk_tin_ops, }, [2] = { .name = "pwm-tin", .id = 2, .ops = &clk_tin_ops, }, [3] = { .name = "pwm-tin", .id = 3, .ops = &clk_tin_ops, }, [4] = { .name = "pwm-tin", .id = 4, .ops = &clk_tin_ops, }, }; static __init int clk_pwm_tin_register(struct clk *pwm) { unsigned long tcfg1 = __raw_readl(S3C2410_TCFG1); unsigned int id = pwm->id; struct clk *parent; int ret; ret = s3c24xx_register_clock(pwm); if (ret < 0) return ret; tcfg1 >>= S3C2410_TCFG1_SHIFT(id); tcfg1 &= S3C2410_TCFG1_MUX_MASK; if (pwm_cfg_src_is_tclk(tcfg1)) parent = s3c24xx_pwmclk_tclk(id); else parent = s3c24xx_pwmclk_tdiv(id); return clk_set_parent(pwm, parent); } /** * s3c_pwmclk_init() - initialise pwm clocks * * Initialise and register the clocks which provide the inputs for the * pwm timer blocks. * * Note, this call is required by the time core, so must be called after * the base clocks are added and before any of the initcalls are run. */ __init void s3c_pwmclk_init(void) { struct clk *clk_timers; unsigned int clk; int ret; clk_timers = clk_get(NULL, "timers"); if (IS_ERR(clk_timers)) { printk(KERN_ERR "%s: no parent clock\n", __func__); return; } for (clk = 0; clk < ARRAY_SIZE(clk_timer_scaler); clk++) clk_timer_scaler[clk].parent = clk_timers; s3c_register_clocks(clk_timer_scaler, ARRAY_SIZE(clk_timer_scaler)); s3c_register_clocks(clk_timer_tclk, ARRAY_SIZE(clk_timer_tclk)); for (clk = 0; clk < ARRAY_SIZE(clk_timer_tdiv); clk++) { ret = clk_pwm_tdiv_register(clk); if (ret < 0) { printk(KERN_ERR "error adding pwm%d tdiv clock\n", clk); return; } } for (clk = 0; clk < ARRAY_SIZE(clk_tin); clk++) { ret = clk_pwm_tin_register(&clk_tin[clk]); if (ret < 0) { printk(KERN_ERR "error adding pwm%d tin clock\n", clk); return; } } }
gpl-2.0
HelpMeRuth/Ruthless_Kernel
arch/arm/mach-s3c24xx/pm-s3c2412.c
4320
3036
/* linux/arch/arm/mach-s3c2412/pm.c * * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * http://armlinux.simtec.co.uk/. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/device.h> #include <linux/syscore_ops.h> #include <linux/platform_device.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/regs-gpio.h> #include <plat/cpu.h> #include <plat/pm.h> #include <plat/wakeup-mask.h> #include "regs-dsc.h" #include "s3c2412-power.h" extern void s3c2412_sleep_enter(void); static int s3c2412_cpu_suspend(unsigned long arg) { unsigned long tmp; /* set our standby method to sleep */ tmp = __raw_readl(S3C2412_PWRCFG); tmp |= S3C2412_PWRCFG_STANDBYWFI_SLEEP; __raw_writel(tmp, S3C2412_PWRCFG); s3c2412_sleep_enter(); pr_info("Failed to suspend the system\n"); return 1; /* Aborting suspend */ } /* mapping of interrupts to parts of the wakeup mask */ static struct samsung_wakeup_mask wake_irqs[] = { { .irq = IRQ_RTC, .bit = S3C2412_PWRCFG_RTC_MASKIRQ, }, }; static void s3c2412_pm_prepare(void) { samsung_sync_wakemask(S3C2412_PWRCFG, wake_irqs, ARRAY_SIZE(wake_irqs)); } static int s3c2412_pm_add(struct device *dev, struct subsys_interface *sif) { pm_cpu_prep = s3c2412_pm_prepare; pm_cpu_sleep = s3c2412_cpu_suspend; return 0; } static struct sleep_save s3c2412_sleep[] = { SAVE_ITEM(S3C2412_DSC0), SAVE_ITEM(S3C2412_DSC1), SAVE_ITEM(S3C2413_GPJDAT), SAVE_ITEM(S3C2413_GPJCON), SAVE_ITEM(S3C2413_GPJUP), /* save the PWRCFG to get back to original sleep method */ SAVE_ITEM(S3C2412_PWRCFG), /* save the sleep configuration anyway, just in case these * get damaged during wakeup */ SAVE_ITEM(S3C2412_GPBSLPCON), SAVE_ITEM(S3C2412_GPCSLPCON), SAVE_ITEM(S3C2412_GPDSLPCON), SAVE_ITEM(S3C2412_GPFSLPCON), SAVE_ITEM(S3C2412_GPGSLPCON), SAVE_ITEM(S3C2412_GPHSLPCON), SAVE_ITEM(S3C2413_GPJSLPCON), }; static struct subsys_interface s3c2412_pm_interface = { .name = "s3c2412_pm", .subsys = &s3c2412_subsys, .add_dev = s3c2412_pm_add, }; static __init int s3c2412_pm_init(void) { return subsys_interface_register(&s3c2412_pm_interface); } arch_initcall(s3c2412_pm_init); static int s3c2412_pm_suspend(void) { s3c_pm_do_save(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); return 0; } static void s3c2412_pm_resume(void) { unsigned long tmp; tmp = __raw_readl(S3C2412_PWRCFG); tmp &= ~S3C2412_PWRCFG_STANDBYWFI_MASK; tmp |= S3C2412_PWRCFG_STANDBYWFI_IDLE; __raw_writel(tmp, S3C2412_PWRCFG); s3c_pm_do_restore(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); } struct syscore_ops s3c2412_pm_syscore_ops = { .suspend = s3c2412_pm_suspend, .resume = s3c2412_pm_resume, };
gpl-2.0
VegaDevTeam/android_kernel_pantech_ef60s
drivers/coresight/coresight-etm-cp14.c
4576
9853
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <asm/hardware/cp14.h> static unsigned int etm_read_reg(uint32_t reg) { switch (reg) { case 0x0: return etm_read(ETMCR); case 0x1: return etm_read(ETMCCR); case 0x2: return etm_read(ETMTRIGGER); case 0x4: return etm_read(ETMSR); case 0x5: return etm_read(ETMSCR); case 0x6: return etm_read(ETMTSSCR); case 0x8: return etm_read(ETMTEEVR); case 0x9: return etm_read(ETMTECR1); case 0xB: return etm_read(ETMFFLR); case 0x10: return etm_read(ETMACVR0); case 0x11: return etm_read(ETMACVR1); case 0x12: return etm_read(ETMACVR2); case 0x13: return etm_read(ETMACVR3); case 0x14: return etm_read(ETMACVR4); case 0x15: return etm_read(ETMACVR5); case 0x16: return etm_read(ETMACVR6); case 0x17: return etm_read(ETMACVR7); case 0x18: return etm_read(ETMACVR8); case 0x19: return etm_read(ETMACVR9); case 0x1A: return etm_read(ETMACVR10); case 0x1B: return etm_read(ETMACVR11); case 0x1C: return etm_read(ETMACVR12); case 0x1D: return etm_read(ETMACVR13); case 0x1E: return etm_read(ETMACVR14); case 0x1F: return etm_read(ETMACVR15); case 0x20: return etm_read(ETMACTR0); case 0x21: return etm_read(ETMACTR1); case 0x22: return etm_read(ETMACTR2); case 0x23: return etm_read(ETMACTR3); case 0x24: return etm_read(ETMACTR4); case 0x25: return etm_read(ETMACTR5); case 0x26: return etm_read(ETMACTR6); case 0x27: return etm_read(ETMACTR7); case 0x28: return etm_read(ETMACTR8); case 0x29: return etm_read(ETMACTR9); case 0x2A: return etm_read(ETMACTR10); case 0x2B: return etm_read(ETMACTR11); case 0x2C: return etm_read(ETMACTR12); case 0x2D: return etm_read(ETMACTR13); case 0x2E: return etm_read(ETMACTR14); case 0x2F: return etm_read(ETMACTR15); case 0x50: return etm_read(ETMCNTRLDVR0); case 0x51: return etm_read(ETMCNTRLDVR1); case 0x52: return etm_read(ETMCNTRLDVR2); case 0x53: return etm_read(ETMCNTRLDVR3); case 0x54: return etm_read(ETMCNTENR0); case 0x55: return etm_read(ETMCNTENR1); case 0x56: return etm_read(ETMCNTENR2); case 0x57: return etm_read(ETMCNTENR3); case 0x58: return etm_read(ETMCNTRLDEVR0); case 0x59: return etm_read(ETMCNTRLDEVR1); case 0x5A: return etm_read(ETMCNTRLDEVR2); case 0x5B: return etm_read(ETMCNTRLDEVR3); case 0x5C: return etm_read(ETMCNTVR0); case 0x5D: return etm_read(ETMCNTVR1); case 0x5E: return etm_read(ETMCNTVR2); case 0x5F: return etm_read(ETMCNTVR3); case 0x60: return etm_read(ETMSQ12EVR); case 0x61: return etm_read(ETMSQ21EVR); case 0x62: return etm_read(ETMSQ23EVR); case 0x63: return etm_read(ETMSQ31EVR); case 0x64: return etm_read(ETMSQ32EVR); case 0x65: return etm_read(ETMSQ13EVR); case 0x67: return etm_read(ETMSQR); case 0x68: return etm_read(ETMEXTOUTEVR0); case 0x69: return etm_read(ETMEXTOUTEVR1); case 0x6A: return etm_read(ETMEXTOUTEVR2); case 0x6B: return etm_read(ETMEXTOUTEVR3); case 0x6C: return etm_read(ETMCIDCVR0); case 0x6D: return etm_read(ETMCIDCVR1); case 0x6E: return etm_read(ETMCIDCVR2); case 0x6F: return etm_read(ETMCIDCMR); case 0x70: return etm_read(ETMIMPSPEC0); case 0x71: return etm_read(ETMIMPSPEC1); case 0x72: return etm_read(ETMIMPSPEC2); case 0x73: return etm_read(ETMIMPSPEC3); case 0x74: return etm_read(ETMIMPSPEC4); case 0x75: return etm_read(ETMIMPSPEC5); case 0x76: return etm_read(ETMIMPSPEC6); case 0x77: return etm_read(ETMIMPSPEC7); case 0x78: return etm_read(ETMSYNCFR); case 0x79: return etm_read(ETMIDR); case 0x7A: return etm_read(ETMCCER); case 0x7B: return etm_read(ETMEXTINSELR); case 0x7C: return etm_read(ETMTESSEICR); case 0x7D: return etm_read(ETMEIBCR); case 0x7E: return etm_read(ETMTSEVR); case 0x7F: return etm_read(ETMAUXCR); case 0x80: return etm_read(ETMTRACEIDR); case 0x90: return etm_read(ETMVMIDCVR); case 0xC1: return etm_read(ETMOSLSR); case 0xC2: return etm_read(ETMOSSRR); case 0xC4: return etm_read(ETMPDCR); case 0xC5: return etm_read(ETMPDSR); default: WARN(1, "invalid CP14 access to ETM reg: %lx", (unsigned long)reg); return 0; } } static void etm_write_reg(uint32_t val, uint32_t reg) { switch (reg) { case 0x0: etm_write(val, ETMCR); return; case 0x2: etm_write(val, ETMTRIGGER); return; case 0x4: etm_write(val, ETMSR); return; case 0x6: etm_write(val, ETMTSSCR); return; case 0x8: etm_write(val, ETMTEEVR); return; case 0x9: etm_write(val, ETMTECR1); return; case 0xB: etm_write(val, ETMFFLR); return; case 0x10: etm_write(val, ETMACVR0); return; case 0x11: etm_write(val, ETMACVR1); return; case 0x12: etm_write(val, ETMACVR2); return; case 0x13: etm_write(val, ETMACVR3); return; case 0x14: etm_write(val, ETMACVR4); return; case 0x15: etm_write(val, ETMACVR5); return; case 0x16: etm_write(val, ETMACVR6); return; case 0x17: etm_write(val, ETMACVR7); return; case 0x18: etm_write(val, ETMACVR8); return; case 0x19: etm_write(val, ETMACVR9); return; case 0x1A: etm_write(val, ETMACVR10); return; case 0x1B: etm_write(val, ETMACVR11); return; case 0x1C: etm_write(val, ETMACVR12); return; case 0x1D: etm_write(val, ETMACVR13); return; case 0x1E: etm_write(val, ETMACVR14); return; case 0x1F: etm_write(val, ETMACVR15); return; case 0x20: etm_write(val, ETMACTR0); return; case 0x21: etm_write(val, ETMACTR1); return; case 0x22: etm_write(val, ETMACTR2); return; case 0x23: etm_write(val, ETMACTR3); return; case 0x24: etm_write(val, ETMACTR4); return; case 0x25: etm_write(val, ETMACTR5); return; case 0x26: etm_write(val, ETMACTR6); return; case 0x27: etm_write(val, ETMACTR7); return; case 0x28: etm_write(val, ETMACTR8); return; case 0x29: etm_write(val, ETMACTR9); return; case 0x2A: etm_write(val, ETMACTR10); return; case 0x2B: etm_write(val, ETMACTR11); return; case 0x2C: etm_write(val, ETMACTR12); return; case 0x2D: etm_write(val, ETMACTR13); return; case 0x2E: etm_write(val, ETMACTR14); return; case 0x2F: etm_write(val, ETMACTR15); return; case 0x50: etm_write(val, ETMCNTRLDVR0); return; case 0x51: etm_write(val, ETMCNTRLDVR1); return; case 0x52: etm_write(val, ETMCNTRLDVR2); return; case 0x53: etm_write(val, ETMCNTRLDVR3); return; case 0x54: etm_write(val, ETMCNTENR0); return; case 0x55: etm_write(val, ETMCNTENR1); return; case 0x56: etm_write(val, ETMCNTENR2); return; case 0x57: etm_write(val, ETMCNTENR3); return; case 0x58: etm_write(val, ETMCNTRLDEVR0); return; case 0x59: etm_write(val, ETMCNTRLDEVR1); return; case 0x5A: etm_write(val, ETMCNTRLDEVR2); return; case 0x5B: etm_write(val, ETMCNTRLDEVR3); return; case 0x5C: etm_write(val, ETMCNTVR0); return; case 0x5D: etm_write(val, ETMCNTVR1); return; case 0x5E: etm_write(val, ETMCNTVR2); return; case 0x5F: etm_write(val, ETMCNTVR3); return; case 0x60: etm_write(val, ETMSQ12EVR); return; case 0x61: etm_write(val, ETMSQ21EVR); return; case 0x62: etm_write(val, ETMSQ23EVR); return; case 0x63: etm_write(val, ETMSQ31EVR); return; case 0x64: etm_write(val, ETMSQ32EVR); return; case 0x65: etm_write(val, ETMSQ13EVR); return; case 0x67: etm_write(val, ETMSQR); return; case 0x68: etm_write(val, ETMEXTOUTEVR0); return; case 0x69: etm_write(val, ETMEXTOUTEVR1); return; case 0x6A: etm_write(val, ETMEXTOUTEVR2); return; case 0x6B: etm_write(val, ETMEXTOUTEVR3); return; case 0x6C: etm_write(val, ETMCIDCVR0); return; case 0x6D: etm_write(val, ETMCIDCVR1); return; case 0x6E: etm_write(val, ETMCIDCVR2); return; case 0x6F: etm_write(val, ETMCIDCMR); return; case 0x70: etm_write(val, ETMIMPSPEC0); return; case 0x71: etm_write(val, ETMIMPSPEC1); return; case 0x72: etm_write(val, ETMIMPSPEC2); return; case 0x73: etm_write(val, ETMIMPSPEC3); return; case 0x74: etm_write(val, ETMIMPSPEC4); return; case 0x75: etm_write(val, ETMIMPSPEC5); return; case 0x76: etm_write(val, ETMIMPSPEC6); return; case 0x77: etm_write(val, ETMIMPSPEC7); return; case 0x78: etm_write(val, ETMSYNCFR); return; case 0x7B: etm_write(val, ETMEXTINSELR); return; case 0x7C: etm_write(val, ETMTESSEICR); return; case 0x7D: etm_write(val, ETMEIBCR); return; case 0x7E: etm_write(val, ETMTSEVR); return; case 0x7F: etm_write(val, ETMAUXCR); return; case 0x80: etm_write(val, ETMTRACEIDR); return; case 0x90: etm_write(val, ETMVMIDCVR); return; case 0xC0: etm_write(val, ETMOSLAR); return; case 0xC2: etm_write(val, ETMOSSRR); return; case 0xC4: etm_write(val, ETMPDCR); return; case 0xC5: etm_write(val, ETMPDSR); return; default: WARN(1, "invalid CP14 access to ETM reg: %lx", (unsigned long)reg); return; } } static inline uint32_t offset_to_reg_num(uint32_t off) { return off >> 2; } unsigned int etm_readl_cp14(uint32_t off) { uint32_t reg = offset_to_reg_num(off); return etm_read_reg(reg); } void etm_writel_cp14(uint32_t val, uint32_t off) { uint32_t reg = offset_to_reg_num(off); etm_write_reg(val, reg); }
gpl-2.0
hejiann/android_kernel_huawei_u8860
drivers/input/keyboard/hil_kbd.c
8160
15102
/* * Generic linux-input device driver for keyboard devices * * Copyright (c) 2001 Brian S. Julin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * * References: * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A * */ #include <linux/hil.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/slab.h> #include <linux/pci_ids.h> #define PREFIX "HIL: " MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>"); MODULE_DESCRIPTION("HIL keyboard/mouse driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("serio:ty03pr25id00ex*"); /* HIL keyboard */ MODULE_ALIAS("serio:ty03pr25id0Fex*"); /* HIL mouse */ #define HIL_PACKET_MAX_LENGTH 16 #define HIL_KBD_SET1_UPBIT 0x01 #define HIL_KBD_SET1_SHIFT 1 static unsigned int hil_kbd_set1[HIL_KEYCODES_SET1_TBLSIZE] __read_mostly = { HIL_KEYCODES_SET1 }; #define HIL_KBD_SET2_UPBIT 0x01 #define HIL_KBD_SET2_SHIFT 1 /* Set2 is user defined */ #define HIL_KBD_SET3_UPBIT 0x80 #define HIL_KBD_SET3_SHIFT 0 static unsigned int hil_kbd_set3[HIL_KEYCODES_SET3_TBLSIZE] __read_mostly = { HIL_KEYCODES_SET3 }; static const char hil_language[][16] = { HIL_LOCALE_MAP }; struct hil_dev { struct input_dev *dev; struct serio *serio; /* Input buffer and index for packets from HIL bus. */ hil_packet data[HIL_PACKET_MAX_LENGTH]; int idx4; /* four counts per packet */ /* Raw device info records from HIL bus, see hil.h for fields. */ char idd[HIL_PACKET_MAX_LENGTH]; /* DID byte and IDD record */ char rsc[HIL_PACKET_MAX_LENGTH]; /* RSC record */ char exd[HIL_PACKET_MAX_LENGTH]; /* EXD record */ char rnm[HIL_PACKET_MAX_LENGTH + 1]; /* RNM record + NULL term. */ struct completion cmd_done; bool is_pointer; /* Extra device details needed for pointing devices. */ unsigned int nbtn, naxes; unsigned int btnmap[7]; }; static bool hil_dev_is_command_response(hil_packet p) { if ((p & ~HIL_CMDCT_POL) == (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL)) return false; if ((p & ~HIL_CMDCT_RPL) == (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_RPL)) return false; return true; } static void hil_dev_handle_command_response(struct hil_dev *dev) { hil_packet p; char *buf; int i, idx; idx = dev->idx4 / 4; p = dev->data[idx - 1]; switch (p & HIL_PKT_DATA_MASK) { case HIL_CMD_IDD: buf = dev->idd; break; case HIL_CMD_RSC: buf = dev->rsc; break; case HIL_CMD_EXD: buf = dev->exd; break; case HIL_CMD_RNM: dev->rnm[HIL_PACKET_MAX_LENGTH] = 0; buf = dev->rnm; break; default: /* These occur when device isn't present */ if (p != (HIL_ERR_INT | HIL_PKT_CMD)) { /* Anything else we'd like to know about. */ printk(KERN_WARNING PREFIX "Device sent unknown record %x\n", p); } goto out; } for (i = 0; i < idx; i++) buf[i] = dev->data[i] & HIL_PKT_DATA_MASK; for (; i < HIL_PACKET_MAX_LENGTH; i++) buf[i] = 0; out: complete(&dev->cmd_done); } static void hil_dev_handle_kbd_events(struct hil_dev *kbd) { struct input_dev *dev = kbd->dev; int idx = kbd->idx4 / 4; int i; switch (kbd->data[0] & HIL_POL_CHARTYPE_MASK) { case HIL_POL_CHARTYPE_NONE: return; case HIL_POL_CHARTYPE_ASCII: for (i = 1; i < idx - 1; i++) input_report_key(dev, kbd->data[i] & 0x7f, 1); break; case HIL_POL_CHARTYPE_RSVD1: case HIL_POL_CHARTYPE_RSVD2: case HIL_POL_CHARTYPE_BINARY: for (i = 1; i < idx - 1; i++) input_report_key(dev, kbd->data[i], 1); break; case HIL_POL_CHARTYPE_SET1: for (i = 1; i < idx - 1; i++) { unsigned int key = kbd->data[i]; int up = key & HIL_KBD_SET1_UPBIT; key &= (~HIL_KBD_SET1_UPBIT & 0xff); key = hil_kbd_set1[key >> HIL_KBD_SET1_SHIFT]; input_report_key(dev, key, !up); } break; case HIL_POL_CHARTYPE_SET2: for (i = 1; i < idx - 1; i++) { unsigned int key = kbd->data[i]; int up = key & HIL_KBD_SET2_UPBIT; key &= (~HIL_KBD_SET1_UPBIT & 0xff); key = key >> HIL_KBD_SET2_SHIFT; input_report_key(dev, key, !up); } break; case HIL_POL_CHARTYPE_SET3: for (i = 1; i < idx - 1; i++) { unsigned int key = kbd->data[i]; int up = key & HIL_KBD_SET3_UPBIT; key &= (~HIL_KBD_SET1_UPBIT & 0xff); key = hil_kbd_set3[key >> HIL_KBD_SET3_SHIFT]; input_report_key(dev, key, !up); } break; } input_sync(dev); } static void hil_dev_handle_ptr_events(struct hil_dev *ptr) { struct input_dev *dev = ptr->dev; int idx = ptr->idx4 / 4; hil_packet p = ptr->data[idx - 1]; int i, cnt, laxis; bool absdev, ax16; if ((p & HIL_CMDCT_POL) != idx - 1) { printk(KERN_WARNING PREFIX "Malformed poll packet %x (idx = %i)\n", p, idx); return; } i = (p & HIL_POL_AXIS_ALT) ? 3 : 0; laxis = (p & HIL_POL_NUM_AXES_MASK) + i; ax16 = ptr->idd[1] & HIL_IDD_HEADER_16BIT; /* 8 or 16bit resolution */ absdev = ptr->idd[1] & HIL_IDD_HEADER_ABS; for (cnt = 1; i < laxis; i++) { unsigned int lo, hi, val; lo = ptr->data[cnt++] & HIL_PKT_DATA_MASK; hi = ax16 ? (ptr->data[cnt++] & HIL_PKT_DATA_MASK) : 0; if (absdev) { val = lo + (hi << 8); #ifdef TABLET_AUTOADJUST if (val < input_abs_get_min(dev, ABS_X + i)) input_abs_set_min(dev, ABS_X + i, val); if (val > input_abs_get_max(dev, ABS_X + i)) input_abs_set_max(dev, ABS_X + i, val); #endif if (i % 3) val = input_abs_get_max(dev, ABS_X + i) - val; input_report_abs(dev, ABS_X + i, val); } else { val = (int) (((int8_t) lo) | ((int8_t) hi << 8)); if (i % 3) val *= -1; input_report_rel(dev, REL_X + i, val); } } while (cnt < idx - 1) { unsigned int btn = ptr->data[cnt++]; int up = btn & 1; btn &= 0xfe; if (btn == 0x8e) continue; /* TODO: proximity == touch? */ if (btn > 0x8c || btn < 0x80) continue; btn = (btn - 0x80) >> 1; btn = ptr->btnmap[btn]; input_report_key(dev, btn, !up); } input_sync(dev); } static void hil_dev_process_err(struct hil_dev *dev) { printk(KERN_WARNING PREFIX "errored HIL packet\n"); dev->idx4 = 0; complete(&dev->cmd_done); /* just in case somebody is waiting */ } static irqreturn_t hil_dev_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct hil_dev *dev; hil_packet packet; int idx; dev = serio_get_drvdata(serio); BUG_ON(dev == NULL); if (dev->idx4 >= HIL_PACKET_MAX_LENGTH * sizeof(hil_packet)) { hil_dev_process_err(dev); goto out; } idx = dev->idx4 / 4; if (!(dev->idx4 % 4)) dev->data[idx] = 0; packet = dev->data[idx]; packet |= ((hil_packet)data) << ((3 - (dev->idx4 % 4)) * 8); dev->data[idx] = packet; /* Records of N 4-byte hil_packets must terminate with a command. */ if ((++dev->idx4 % 4) == 0) { if ((packet & 0xffff0000) != HIL_ERR_INT) { hil_dev_process_err(dev); } else if (packet & HIL_PKT_CMD) { if (hil_dev_is_command_response(packet)) hil_dev_handle_command_response(dev); else if (dev->is_pointer) hil_dev_handle_ptr_events(dev); else hil_dev_handle_kbd_events(dev); dev->idx4 = 0; } } out: return IRQ_HANDLED; } static void hil_dev_disconnect(struct serio *serio) { struct hil_dev *dev = serio_get_drvdata(serio); BUG_ON(dev == NULL); serio_close(serio); input_unregister_device(dev->dev); serio_set_drvdata(serio, NULL); kfree(dev); } static void hil_dev_keyboard_setup(struct hil_dev *kbd) { struct input_dev *input_dev = kbd->dev; uint8_t did = kbd->idd[0]; int i; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); input_dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) | BIT_MASK(LED_SCROLLL); for (i = 0; i < 128; i++) { __set_bit(hil_kbd_set1[i], input_dev->keybit); __set_bit(hil_kbd_set3[i], input_dev->keybit); } __clear_bit(KEY_RESERVED, input_dev->keybit); input_dev->keycodemax = HIL_KEYCODES_SET1_TBLSIZE; input_dev->keycodesize = sizeof(hil_kbd_set1[0]); input_dev->keycode = hil_kbd_set1; input_dev->name = strlen(kbd->rnm) ? kbd->rnm : "HIL keyboard"; input_dev->phys = "hpkbd/input0"; printk(KERN_INFO PREFIX "HIL keyboard found (did = 0x%02x, lang = %s)\n", did, hil_language[did & HIL_IDD_DID_TYPE_KB_LANG_MASK]); } static void hil_dev_pointer_setup(struct hil_dev *ptr) { struct input_dev *input_dev = ptr->dev; uint8_t did = ptr->idd[0]; uint8_t *idd = ptr->idd + 1; unsigned int naxsets = HIL_IDD_NUM_AXSETS(*idd); unsigned int i, btntype; const char *txt; ptr->naxes = HIL_IDD_NUM_AXES_PER_SET(*idd); switch (did & HIL_IDD_DID_TYPE_MASK) { case HIL_IDD_DID_TYPE_REL: input_dev->evbit[0] = BIT_MASK(EV_REL); for (i = 0; i < ptr->naxes; i++) __set_bit(REL_X + i, input_dev->relbit); for (i = 3; naxsets > 1 && i < ptr->naxes + 3; i++) __set_bit(REL_X + i, input_dev->relbit); txt = "relative"; break; case HIL_IDD_DID_TYPE_ABS: input_dev->evbit[0] = BIT_MASK(EV_ABS); for (i = 0; i < ptr->naxes; i++) input_set_abs_params(input_dev, ABS_X + i, 0, HIL_IDD_AXIS_MAX(idd, i), 0, 0); for (i = 3; naxsets > 1 && i < ptr->naxes + 3; i++) input_set_abs_params(input_dev, ABS_X + i, 0, HIL_IDD_AXIS_MAX(idd, i - 3), 0, 0); #ifdef TABLET_AUTOADJUST for (i = 0; i < ABS_MAX; i++) { int diff = input_abs_get_max(input_dev, ABS_X + i) / 10; input_abs_set_min(input_dev, ABS_X + i, input_abs_get_min(input_dev, ABS_X + i) + diff); input_abs_set_max(input_dev, ABS_X + i, input_abs_get_max(input_dev, ABS_X + i) - diff); } #endif txt = "absolute"; break; default: BUG(); } ptr->nbtn = HIL_IDD_NUM_BUTTONS(idd); if (ptr->nbtn) input_dev->evbit[0] |= BIT_MASK(EV_KEY); btntype = BTN_MISC; if ((did & HIL_IDD_DID_ABS_TABLET_MASK) == HIL_IDD_DID_ABS_TABLET) #ifdef TABLET_SIMULATES_MOUSE btntype = BTN_TOUCH; #else btntype = BTN_DIGI; #endif if ((did & HIL_IDD_DID_ABS_TSCREEN_MASK) == HIL_IDD_DID_ABS_TSCREEN) btntype = BTN_TOUCH; if ((did & HIL_IDD_DID_REL_MOUSE_MASK) == HIL_IDD_DID_REL_MOUSE) btntype = BTN_MOUSE; for (i = 0; i < ptr->nbtn; i++) { __set_bit(btntype | i, input_dev->keybit); ptr->btnmap[i] = btntype | i; } if (btntype == BTN_MOUSE) { /* Swap buttons 2 and 3 */ ptr->btnmap[1] = BTN_MIDDLE; ptr->btnmap[2] = BTN_RIGHT; } input_dev->name = strlen(ptr->rnm) ? ptr->rnm : "HIL pointer device"; printk(KERN_INFO PREFIX "HIL pointer device found (did: 0x%02x, axis: %s)\n", did, txt); printk(KERN_INFO PREFIX "HIL pointer has %i buttons and %i sets of %i axes\n", ptr->nbtn, naxsets, ptr->naxes); } static int hil_dev_connect(struct serio *serio, struct serio_driver *drv) { struct hil_dev *dev; struct input_dev *input_dev; uint8_t did, *idd; int error; dev = kzalloc(sizeof(*dev), GFP_KERNEL); input_dev = input_allocate_device(); if (!dev || !input_dev) { error = -ENOMEM; goto bail0; } dev->serio = serio; dev->dev = input_dev; error = serio_open(serio, drv); if (error) goto bail0; serio_set_drvdata(serio, dev); /* Get device info. MLC driver supplies devid/status/etc. */ init_completion(&dev->cmd_done); serio_write(serio, 0); serio_write(serio, 0); serio_write(serio, HIL_PKT_CMD >> 8); serio_write(serio, HIL_CMD_IDD); error = wait_for_completion_killable(&dev->cmd_done); if (error) goto bail1; init_completion(&dev->cmd_done); serio_write(serio, 0); serio_write(serio, 0); serio_write(serio, HIL_PKT_CMD >> 8); serio_write(serio, HIL_CMD_RSC); error = wait_for_completion_killable(&dev->cmd_done); if (error) goto bail1; init_completion(&dev->cmd_done); serio_write(serio, 0); serio_write(serio, 0); serio_write(serio, HIL_PKT_CMD >> 8); serio_write(serio, HIL_CMD_RNM); error = wait_for_completion_killable(&dev->cmd_done); if (error) goto bail1; init_completion(&dev->cmd_done); serio_write(serio, 0); serio_write(serio, 0); serio_write(serio, HIL_PKT_CMD >> 8); serio_write(serio, HIL_CMD_EXD); error = wait_for_completion_killable(&dev->cmd_done); if (error) goto bail1; did = dev->idd[0]; idd = dev->idd + 1; switch (did & HIL_IDD_DID_TYPE_MASK) { case HIL_IDD_DID_TYPE_KB_INTEGRAL: case HIL_IDD_DID_TYPE_KB_ITF: case HIL_IDD_DID_TYPE_KB_RSVD: case HIL_IDD_DID_TYPE_CHAR: if (HIL_IDD_NUM_BUTTONS(idd) || HIL_IDD_NUM_AXES_PER_SET(*idd)) { printk(KERN_INFO PREFIX "combo devices are not supported.\n"); goto bail1; } dev->is_pointer = false; hil_dev_keyboard_setup(dev); break; case HIL_IDD_DID_TYPE_REL: case HIL_IDD_DID_TYPE_ABS: dev->is_pointer = true; hil_dev_pointer_setup(dev); break; default: goto bail1; } input_dev->id.bustype = BUS_HIL; input_dev->id.vendor = PCI_VENDOR_ID_HP; input_dev->id.product = 0x0001; /* TODO: get from kbd->rsc */ input_dev->id.version = 0x0100; /* TODO: get from kbd->rsc */ input_dev->dev.parent = &serio->dev; if (!dev->is_pointer) { serio_write(serio, 0); serio_write(serio, 0); serio_write(serio, HIL_PKT_CMD >> 8); /* Enable Keyswitch Autorepeat 1 */ serio_write(serio, HIL_CMD_EK1); /* No need to wait for completion */ } error = input_register_device(input_dev); if (error) goto bail1; return 0; bail1: serio_close(serio); serio_set_drvdata(serio, NULL); bail0: input_free_device(input_dev); kfree(dev); return error; } static struct serio_device_id hil_dev_ids[] = { { .type = SERIO_HIL_MLC, .proto = SERIO_HIL, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, hil_dev_ids); static struct serio_driver hil_serio_drv = { .driver = { .name = "hil_dev", }, .description = "HP HIL keyboard/mouse/tablet driver", .id_table = hil_dev_ids, .connect = hil_dev_connect, .disconnect = hil_dev_disconnect, .interrupt = hil_dev_interrupt }; static int __init hil_dev_init(void) { return serio_register_driver(&hil_serio_drv); } static void __exit hil_dev_exit(void) { serio_unregister_driver(&hil_serio_drv); } module_init(hil_dev_init); module_exit(hil_dev_exit);
gpl-2.0
TimofeyFox/S7270_kernel
drivers/net/arcnet/com20020.c
8160
10293
/* * Linux ARCnet driver - COM20020 chipset support * * Written 1997 by David Woodhouse. * Written 1994-1999 by Avery Pennarun. * Written 1999 by Martin Mares <mj@ucw.cz>. * Derived from skeleton.c by Donald Becker. * * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) * for sponsoring the further development of this driver. * * ********************** * * The original copyright of skeleton.c was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * * For more details, see drivers/net/arcnet.c * * ********************** */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/arcdevice.h> #include <linux/com20020.h> #include <asm/io.h> #define VERSION "arcnet: COM20020 chipset support (by David Woodhouse et al.)\n" static char *clockrates[] = {"10 Mb/s", "Reserved", "5 Mb/s", "2.5 Mb/s", "1.25Mb/s", "625 Kb/s", "312.5 Kb/s", "156.25 Kb/s", "Reserved", "Reserved", "Reserved"}; static void com20020_command(struct net_device *dev, int command); static int com20020_status(struct net_device *dev); static void com20020_setmask(struct net_device *dev, int mask); static int com20020_reset(struct net_device *dev, int really_reset); static void com20020_copy_to_card(struct net_device *dev, int bufnum, int offset, void *buf, int count); static void com20020_copy_from_card(struct net_device *dev, int bufnum, int offset, void *buf, int count); static void com20020_set_mc_list(struct net_device *dev); static void com20020_close(struct net_device *); static void com20020_copy_from_card(struct net_device *dev, int bufnum, int offset, void *buf, int count) { int ioaddr = dev->base_addr, ofs = 512 * bufnum + offset; /* set up the address register */ outb((ofs >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI); outb(ofs & 0xff, _ADDR_LO); /* copy the data */ TIME("insb", count, insb(_MEMDATA, buf, count)); } static void com20020_copy_to_card(struct net_device *dev, int bufnum, int offset, void *buf, int count) { int ioaddr = dev->base_addr, ofs = 512 * bufnum + offset; /* set up the address register */ outb((ofs >> 8) | AUTOINCflag, _ADDR_HI); outb(ofs & 0xff, _ADDR_LO); /* copy the data */ TIME("outsb", count, outsb(_MEMDATA, buf, count)); } /* Reset the card and check some basic stuff during the detection stage. */ int com20020_check(struct net_device *dev) { int ioaddr = dev->base_addr, status; struct arcnet_local *lp = netdev_priv(dev); ARCRESET0; mdelay(RESETtime); lp->setup = lp->clockm ? 0 : (lp->clockp << 1); lp->setup2 = (lp->clockm << 4) | 8; /* CHECK: should we do this for SOHARD cards ? */ /* Enable P1Mode for backplane mode */ lp->setup = lp->setup | P1MODE; SET_SUBADR(SUB_SETUP1); outb(lp->setup, _XREG); if (lp->clockm != 0) { SET_SUBADR(SUB_SETUP2); outb(lp->setup2, _XREG); /* must now write the magic "restart operation" command */ mdelay(1); outb(0x18, _COMMAND); } lp->config = 0x21 | (lp->timeout << 3) | (lp->backplane << 2); /* set node ID to 0x42 (but transmitter is disabled, so it's okay) */ SETCONF; outb(0x42, ioaddr + BUS_ALIGN*7); status = ASTATUS(); if ((status & 0x99) != (NORXflag | TXFREEflag | RESETflag)) { BUGMSG(D_NORMAL, "status invalid (%Xh).\n", status); return -ENODEV; } BUGMSG(D_INIT_REASONS, "status after reset: %X\n", status); /* Enable TX */ outb(0x39, _CONFIG); outb(inb(ioaddr + BUS_ALIGN*8), ioaddr + BUS_ALIGN*7); ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear); status = ASTATUS(); BUGMSG(D_INIT_REASONS, "status after reset acknowledged: %X\n", status); /* Read first location of memory */ outb(0 | RDDATAflag | AUTOINCflag, _ADDR_HI); outb(0, _ADDR_LO); if ((status = inb(_MEMDATA)) != TESTvalue) { BUGMSG(D_NORMAL, "Signature byte not found (%02Xh != D1h).\n", status); return -ENODEV; } return 0; } const struct net_device_ops com20020_netdev_ops = { .ndo_open = arcnet_open, .ndo_stop = arcnet_close, .ndo_start_xmit = arcnet_send_packet, .ndo_tx_timeout = arcnet_timeout, .ndo_set_rx_mode = com20020_set_mc_list, }; /* Set up the struct net_device associated with this card. Called after * probing succeeds. */ int com20020_found(struct net_device *dev, int shared) { struct arcnet_local *lp; int ioaddr = dev->base_addr; /* Initialize the rest of the device structure. */ lp = netdev_priv(dev); lp->hw.owner = THIS_MODULE; lp->hw.command = com20020_command; lp->hw.status = com20020_status; lp->hw.intmask = com20020_setmask; lp->hw.reset = com20020_reset; lp->hw.copy_to_card = com20020_copy_to_card; lp->hw.copy_from_card = com20020_copy_from_card; lp->hw.close = com20020_close; if (!dev->dev_addr[0]) dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */ SET_SUBADR(SUB_SETUP1); outb(lp->setup, _XREG); if (lp->card_flags & ARC_CAN_10MBIT) { SET_SUBADR(SUB_SETUP2); outb(lp->setup2, _XREG); /* must now write the magic "restart operation" command */ mdelay(1); outb(0x18, _COMMAND); } lp->config = 0x20 | (lp->timeout << 3) | (lp->backplane << 2) | 1; /* Default 0x38 + register: Node ID */ SETCONF; outb(dev->dev_addr[0], _XREG); /* reserve the irq */ if (request_irq(dev->irq, arcnet_interrupt, shared, "arcnet (COM20020)", dev)) { BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); return -ENODEV; } dev->base_addr = ioaddr; BUGMSG(D_NORMAL, "%s: station %02Xh found at %03lXh, IRQ %d.\n", lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq); if (lp->backplane) BUGMSG(D_NORMAL, "Using backplane mode.\n"); if (lp->timeout != 3) BUGMSG(D_NORMAL, "Using extended timeout value of %d.\n", lp->timeout); BUGMSG(D_NORMAL, "Using CKP %d - data rate %s.\n", lp->setup >> 1, clockrates[3 - ((lp->setup2 & 0xF0) >> 4) + ((lp->setup & 0x0F) >> 1)]); if (register_netdev(dev)) { free_irq(dev->irq, dev); return -EIO; } return 0; } /* * Do a hardware reset on the card, and set up necessary registers. * * This should be called as little as possible, because it disrupts the * token on the network (causes a RECON) and requires a significant delay. * * However, it does make sure the card is in a defined state. */ static int com20020_reset(struct net_device *dev, int really_reset) { struct arcnet_local *lp = netdev_priv(dev); u_int ioaddr = dev->base_addr; u_char inbyte; BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n", __FILE__,__LINE__,__func__,dev,lp,dev->name); BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS()); BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2); /* power-up defaults */ SETCONF; BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); if (really_reset) { /* reset the card */ ARCRESET; mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */ } /* clear flags & end reset */ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear); /* verify that the ARCnet signature byte is present */ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); com20020_copy_from_card(dev, 0, 0, &inbyte, 1); BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); if (inbyte != TESTvalue) { BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n"); return 1; } /* enable extended (512-byte) packets */ ACOMMAND(CONFIGcmd | EXTconf); BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); /* done! return success. */ return 0; } static void com20020_setmask(struct net_device *dev, int mask) { u_int ioaddr = dev->base_addr; BUGMSG(D_DURING, "Setting mask to %x at %x\n",mask,ioaddr); AINTMASK(mask); } static void com20020_command(struct net_device *dev, int cmd) { u_int ioaddr = dev->base_addr; ACOMMAND(cmd); } static int com20020_status(struct net_device *dev) { u_int ioaddr = dev->base_addr; return ASTATUS() + (ADIAGSTATUS()<<8); } static void com20020_close(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; /* disable transmitter */ lp->config &= ~TXENcfg; SETCONF; } /* Set or clear the multicast filter for this adaptor. * num_addrs == -1 Promiscuous mode, receive all packets * num_addrs == 0 Normal mode, clear multicast list * num_addrs > 0 Multicast mode, receive normal and MC packets, and do * best-effort filtering. * FIXME - do multicast stuff, not just promiscuous. */ static void com20020_set_mc_list(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; if ((dev->flags & IFF_PROMISC) && (dev->flags & IFF_UP)) { /* Enable promiscuous mode */ if (!(lp->setup & PROMISCset)) BUGMSG(D_NORMAL, "Setting promiscuous flag...\n"); SET_SUBADR(SUB_SETUP1); lp->setup |= PROMISCset; outb(lp->setup, _XREG); } else /* Disable promiscuous mode, use normal mode */ { if ((lp->setup & PROMISCset)) BUGMSG(D_NORMAL, "Resetting promiscuous flag...\n"); SET_SUBADR(SUB_SETUP1); lp->setup &= ~PROMISCset; outb(lp->setup, _XREG); } } #if defined(CONFIG_ARCNET_COM20020_PCI_MODULE) || \ defined(CONFIG_ARCNET_COM20020_ISA_MODULE) || \ defined(CONFIG_ARCNET_COM20020_CS_MODULE) EXPORT_SYMBOL(com20020_check); EXPORT_SYMBOL(com20020_found); EXPORT_SYMBOL(com20020_netdev_ops); #endif MODULE_LICENSE("GPL"); #ifdef MODULE static int __init com20020_module_init(void) { BUGLVL(D_NORMAL) printk(VERSION); return 0; } static void __exit com20020_module_exit(void) { } module_init(com20020_module_init); module_exit(com20020_module_exit); #endif /* MODULE */
gpl-2.0
inyourface09/android_kernel_lge_fx3
drivers/firmware/google/memconsole.c
10720
3916
/* * memconsole.c * * Infrastructure for importing the BIOS memory based console * into the kernel log ringbuffer. * * Copyright 2010 Google Inc. All rights reserved. */ #include <linux/ctype.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/kobject.h> #include <linux/module.h> #include <linux/dmi.h> #include <asm/bios_ebda.h> #define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE #define BIOS_MEMCONSOLE_V2_MAGIC (('M')|('C'<<8)|('O'<<16)|('N'<<24)) struct biosmemcon_ebda { u32 signature; union { struct { u8 enabled; u32 buffer_addr; u16 start; u16 end; u16 num_chars; u8 wrapped; } __packed v1; struct { u32 buffer_addr; /* Misdocumented as number of pages! */ u16 num_bytes; u16 start; u16 end; } __packed v2; }; } __packed; static char *memconsole_baseaddr; static size_t memconsole_length; static ssize_t memconsole_read(struct file *filp, struct kobject *kobp, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr, memconsole_length); } static struct bin_attribute memconsole_bin_attr = { .attr = {.name = "log", .mode = 0444}, .read = memconsole_read, }; static void found_v1_header(struct biosmemcon_ebda *hdr) { printk(KERN_INFO "BIOS console v1 EBDA structure found at %p\n", hdr); printk(KERN_INFO "BIOS console buffer at 0x%.8x, " "start = %d, end = %d, num = %d\n", hdr->v1.buffer_addr, hdr->v1.start, hdr->v1.end, hdr->v1.num_chars); memconsole_length = hdr->v1.num_chars; memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr); } static void found_v2_header(struct biosmemcon_ebda *hdr) { printk(KERN_INFO "BIOS console v2 EBDA structure found at %p\n", hdr); printk(KERN_INFO "BIOS console buffer at 0x%.8x, " "start = %d, end = %d, num_bytes = %d\n", hdr->v2.buffer_addr, hdr->v2.start, hdr->v2.end, hdr->v2.num_bytes); memconsole_length = hdr->v2.end - hdr->v2.start; memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start); } /* * Search through the EBDA for the BIOS Memory Console, and * set the global variables to point to it. Return true if found. */ static bool found_memconsole(void) { unsigned int address; size_t length, cur; address = get_bios_ebda(); if (!address) { printk(KERN_INFO "BIOS EBDA non-existent.\n"); return false; } /* EBDA length is byte 0 of EBDA (in KB) */ length = *(u8 *)phys_to_virt(address); length <<= 10; /* convert to bytes */ /* * Search through EBDA for BIOS memory console structure * note: signature is not necessarily dword-aligned */ for (cur = 0; cur < length; cur++) { struct biosmemcon_ebda *hdr = phys_to_virt(address + cur); /* memconsole v1 */ if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) { found_v1_header(hdr); return true; } /* memconsole v2 */ if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) { found_v2_header(hdr); return true; } } printk(KERN_INFO "BIOS console EBDA structure not found!\n"); return false; } static struct dmi_system_id memconsole_dmi_table[] __initdata = { { .ident = "Google Board", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."), }, }, {} }; MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table); static int __init memconsole_init(void) { int ret; if (!dmi_check_system(memconsole_dmi_table)) return -ENODEV; if (!found_memconsole()) return -ENODEV; memconsole_bin_attr.size = memconsole_length; ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr); return ret; } static void __exit memconsole_exit(void) { sysfs_remove_bin_file(firmware_kobj, &memconsole_bin_attr); } module_init(memconsole_init); module_exit(memconsole_exit); MODULE_AUTHOR("Google, Inc."); MODULE_LICENSE("GPL");
gpl-2.0
netmodule/kernel-zx3
fs/cifs/cifssmb.c
225
195519
/* * fs/cifs/cifssmb.c * * Copyright (C) International Business Machines Corp., 2002,2010 * Author(s): Steve French (sfrench@us.ibm.com) * * Contains the routines for constructing the SMB PDUs themselves * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* SMB/CIFS PDU handling routines here - except for leftovers in connect.c */ /* These are mostly routines that operate on a pathname, or on a tree id */ /* (mounted volume), but there are eight handle based routines which must be */ /* treated slightly differently for reconnection purposes since we never */ /* want to reuse a stale file handle and only the caller knows the file info */ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/vfs.h> #include <linux/slab.h> #include <linux/posix_acl_xattr.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/task_io_accounting_ops.h> #include <asm/uaccess.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsacl.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "fscache.h" #ifdef CONFIG_CIFS_POSIX static struct { int index; char *name; } protocols[] = { #ifdef CONFIG_CIFS_WEAK_PW_HASH {LANMAN_PROT, "\2LM1.2X002"}, {LANMAN2_PROT, "\2LANMAN2.1"}, #endif /* weak password hashing for legacy clients */ {CIFS_PROT, "\2NT LM 0.12"}, {POSIX_PROT, "\2POSIX 2"}, {BAD_PROT, "\2"} }; #else static struct { int index; char *name; } protocols[] = { #ifdef CONFIG_CIFS_WEAK_PW_HASH {LANMAN_PROT, "\2LM1.2X002"}, {LANMAN2_PROT, "\2LANMAN2.1"}, #endif /* weak password hashing for legacy clients */ {CIFS_PROT, "\2NT LM 0.12"}, {BAD_PROT, "\2"} }; #endif /* define the number of elements in the cifs dialect array */ #ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_CIFS_WEAK_PW_HASH #define CIFS_NUM_PROT 4 #else #define CIFS_NUM_PROT 2 #endif /* CIFS_WEAK_PW_HASH */ #else /* not posix */ #ifdef CONFIG_CIFS_WEAK_PW_HASH #define CIFS_NUM_PROT 3 #else #define CIFS_NUM_PROT 1 #endif /* CONFIG_CIFS_WEAK_PW_HASH */ #endif /* CIFS_POSIX */ /* * Mark as invalid, all open files on tree connections since they * were closed when session to server was lost. */ void cifs_mark_open_files_invalid(struct cifs_tcon *tcon) { struct cifsFileInfo *open_file = NULL; struct list_head *tmp; struct list_head *tmp1; /* list all files open on tree connection and mark them invalid */ spin_lock(&cifs_file_list_lock); list_for_each_safe(tmp, tmp1, &tcon->openFileList) { open_file = list_entry(tmp, struct cifsFileInfo, tlist); open_file->invalidHandle = true; open_file->oplock_break_cancelled = true; } spin_unlock(&cifs_file_list_lock); /* * BB Add call to invalidate_inodes(sb) for all superblocks mounted * to this tcon. */ } /* reconnect the socket, tcon, and smb session if needed */ static int cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) { int rc; struct cifs_ses *ses; struct TCP_Server_Info *server; struct nls_table *nls_codepage; /* * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for * tcp and smb session status done differently for those three - in the * calling routine */ if (!tcon) return 0; ses = tcon->ses; server = ses->server; /* * only tree disconnect, open, and write, (and ulogoff which does not * have tcon) are allowed as we start force umount */ if (tcon->tidStatus == CifsExiting) { if (smb_command != SMB_COM_WRITE_ANDX && smb_command != SMB_COM_OPEN_ANDX && smb_command != SMB_COM_TREE_DISCONNECT) { cifs_dbg(FYI, "can not send cmd %d while umounting\n", smb_command); return -ENODEV; } } /* * Give demultiplex thread up to 10 seconds to reconnect, should be * greater than cifs socket timeout which is 7 seconds */ while (server->tcpStatus == CifsNeedReconnect) { wait_event_interruptible_timeout(server->response_q, (server->tcpStatus != CifsNeedReconnect), 10 * HZ); /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) break; /* * on "soft" mounts we wait once. Hard mounts keep * retrying until process is killed or server comes * back on-line */ if (!tcon->retry) { cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n"); return -EHOSTDOWN; } } if (!ses->need_reconnect && !tcon->need_reconnect) return 0; nls_codepage = load_nls_default(); /* * need to prevent multiple threads trying to simultaneously * reconnect the same SMB session */ mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(0, ses); if (rc == 0 && ses->need_reconnect) rc = cifs_setup_session(0, ses, nls_codepage); /* do we need to reconnect tcon? */ if (rc || !tcon->need_reconnect) { mutex_unlock(&ses->session_mutex); goto out; } cifs_mark_open_files_invalid(tcon); rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage); mutex_unlock(&ses->session_mutex); cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); if (rc) goto out; /* * FIXME: check if wsize needs updated due to negotiated smb buffer * size shrinking */ atomic_inc(&tconInfoReconnectCount); /* tell server Unix caps we support */ if (ses->capabilities & CAP_UNIX) reset_cifs_unix_caps(0, tcon, NULL, NULL); /* * Removed call to reopen open files here. It is safer (and faster) to * reopen files one at a time as needed in read and write. * * FIXME: what about file locks? don't we need to reclaim them ASAP? */ out: /* * Check if handle based operation so we know whether we can continue * or not without returning to caller to reset file handle */ switch (smb_command) { case SMB_COM_READ_ANDX: case SMB_COM_WRITE_ANDX: case SMB_COM_CLOSE: case SMB_COM_FIND_CLOSE2: case SMB_COM_LOCKING_ANDX: rc = -EAGAIN; } unload_nls(nls_codepage); return rc; } /* Allocate and return pointer to an SMB request buffer, and set basic SMB information in the SMB header. If the return code is zero, this function must have filled in request_buf pointer */ static int small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf) { int rc; rc = cifs_reconnect_tcon(tcon, smb_command); if (rc) return rc; *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, wct); if (tcon != NULL) cifs_stats_inc(&tcon->num_smbs_sent); return 0; } int small_smb_init_no_tc(const int smb_command, const int wct, struct cifs_ses *ses, void **request_buf) { int rc; struct smb_hdr *buffer; rc = small_smb_init(smb_command, wct, NULL, request_buf); if (rc) return rc; buffer = (struct smb_hdr *)*request_buf; buffer->Mid = get_next_mid(ses->server); if (ses->capabilities & CAP_UNICODE) buffer->Flags2 |= SMBFLG2_UNICODE; if (ses->capabilities & CAP_STATUS32) buffer->Flags2 |= SMBFLG2_ERR_STATUS; /* uid, tid can stay at zero as set in header assemble */ /* BB add support for turning on the signing when this function is used after 1st of session setup requests */ return rc; } /* If the return code is zero, this function must fill in request_buf pointer */ static int __smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { *request_buf = cifs_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } /* Although the original thought was we needed the response buf for */ /* potential retries of smb operations it turns out we can determine */ /* from the mid flags when the request buffer can be resent without */ /* having to use a second distinct buffer for the response */ if (response_buf) *response_buf = *request_buf; header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, wct); if (tcon != NULL) cifs_stats_inc(&tcon->num_smbs_sent); return 0; } /* If the return code is zero, this function must fill in request_buf pointer */ static int smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { int rc; rc = cifs_reconnect_tcon(tcon, smb_command); if (rc) return rc; return __smb_init(smb_command, wct, tcon, request_buf, response_buf); } static int smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { if (tcon->ses->need_reconnect || tcon->need_reconnect) return -EHOSTDOWN; return __smb_init(smb_command, wct, tcon, request_buf, response_buf); } static int validate_t2(struct smb_t2_rsp *pSMB) { unsigned int total_size; /* check for plausible wct */ if (pSMB->hdr.WordCount < 10) goto vt2_err; /* check for parm and data offset going beyond end of smb */ if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 || get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024) goto vt2_err; total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount); if (total_size >= 512) goto vt2_err; /* check that bcc is at least as big as parms + data, and that it is * less than negotiated smb buffer */ total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount); if (total_size > get_bcc(&pSMB->hdr) || total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) goto vt2_err; return 0; vt2_err: cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB, sizeof(struct smb_t2_rsp) + 16); return -EINVAL; } static int decode_ext_sec_blob(struct cifs_ses *ses, NEGOTIATE_RSP *pSMBr) { int rc = 0; u16 count; char *guid = pSMBr->u.extended_response.GUID; struct TCP_Server_Info *server = ses->server; count = get_bcc(&pSMBr->hdr); if (count < SMB1_CLIENT_GUID_SIZE) return -EIO; spin_lock(&cifs_tcp_ses_lock); if (server->srv_count > 1) { spin_unlock(&cifs_tcp_ses_lock); if (memcmp(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE) != 0) { cifs_dbg(FYI, "server UID changed\n"); memcpy(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE); } } else { spin_unlock(&cifs_tcp_ses_lock); memcpy(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE); } if (count == SMB1_CLIENT_GUID_SIZE) { server->sec_ntlmssp = true; } else { count -= SMB1_CLIENT_GUID_SIZE; rc = decode_negTokenInit( pSMBr->u.extended_response.SecurityBlob, count, server); if (rc != 1) return -EINVAL; } return 0; } int cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) { bool srv_sign_required = server->sec_mode & server->vals->signing_required; bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; bool mnt_sign_enabled = global_secflags & CIFSSEC_MAY_SIGN; /* * Is signing required by mnt options? If not then check * global_secflags to see if it is there. */ if (!mnt_sign_required) mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN); /* * If signing is required then it's automatically enabled too, * otherwise, check to see if the secflags allow it. */ mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : (global_secflags & CIFSSEC_MAY_SIGN); /* If server requires signing, does client allow it? */ if (srv_sign_required) { if (!mnt_sign_enabled) { cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!"); return -ENOTSUPP; } server->sign = true; } /* If client requires signing, does server allow it? */ if (mnt_sign_required) { if (!srv_sign_enabled) { cifs_dbg(VFS, "Server does not support signing!"); return -ENOTSUPP; } server->sign = true; } return 0; } #ifdef CONFIG_CIFS_WEAK_PW_HASH static int decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr) { __s16 tmp; struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr; if (server->dialect != LANMAN_PROT && server->dialect != LANMAN2_PROT) return -EOPNOTSUPP; server->sec_mode = le16_to_cpu(rsp->SecurityMode); server->maxReq = min_t(unsigned int, le16_to_cpu(rsp->MaxMpxCount), cifs_max_pending); set_credits(server, server->maxReq); server->maxBuf = le16_to_cpu(rsp->MaxBufSize); /* even though we do not use raw we might as well set this accurately, in case we ever find a need for it */ if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) { server->max_rw = 0xFF00; server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE; } else { server->max_rw = 0;/* do not need to use raw anyway */ server->capabilities = CAP_MPX_MODE; } tmp = (__s16)le16_to_cpu(rsp->ServerTimeZone); if (tmp == -1) { /* OS/2 often does not set timezone therefore * we must use server time to calc time zone. * Could deviate slightly from the right zone. * Smallest defined timezone difference is 15 minutes * (i.e. Nepal). Rounding up/down is done to match * this requirement. */ int val, seconds, remain, result; struct timespec ts, utc; utc = CURRENT_TIME; ts = cnvrtDosUnixTm(rsp->SrvTime.Date, rsp->SrvTime.Time, 0); cifs_dbg(FYI, "SrvTime %d sec since 1970 (utc: %d) diff: %d\n", (int)ts.tv_sec, (int)utc.tv_sec, (int)(utc.tv_sec - ts.tv_sec)); val = (int)(utc.tv_sec - ts.tv_sec); seconds = abs(val); result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ; remain = seconds % MIN_TZ_ADJ; if (remain >= (MIN_TZ_ADJ / 2)) result += MIN_TZ_ADJ; if (val < 0) result = -result; server->timeAdj = result; } else { server->timeAdj = (int)tmp; server->timeAdj *= 60; /* also in seconds */ } cifs_dbg(FYI, "server->timeAdj: %d seconds\n", server->timeAdj); /* BB get server time for time conversions and add code to use it and timezone since this is not UTC */ if (rsp->EncryptionKeyLength == cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { memcpy(server->cryptkey, rsp->EncryptionKey, CIFS_CRYPTO_KEY_SIZE); } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { return -EIO; /* need cryptkey unless plain text */ } cifs_dbg(FYI, "LANMAN negotiated\n"); return 0; } #else static inline int decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr) { cifs_dbg(VFS, "mount failed, cifs module not built with CIFS_WEAK_PW_HASH support\n"); return -EOPNOTSUPP; } #endif static bool should_set_ext_sec_flag(enum securityEnum sectype) { switch (sectype) { case RawNTLMSSP: case Kerberos: return true; case Unspecified: if (global_secflags & (CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)) return true; /* Fallthrough */ default: return false; } } int CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses) { NEGOTIATE_REQ *pSMB; NEGOTIATE_RSP *pSMBr; int rc = 0; int bytes_returned; int i; struct TCP_Server_Info *server = ses->server; u16 count; if (!server) { WARN(1, "%s: server is NULL!\n", __func__); return -EIO; } rc = smb_init(SMB_COM_NEGOTIATE, 0, NULL /* no tcon yet */ , (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->hdr.Mid = get_next_mid(server); pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); if (should_set_ext_sec_flag(ses->sectype)) { cifs_dbg(FYI, "Requesting extended security."); pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; } count = 0; for (i = 0; i < CIFS_NUM_PROT; i++) { strncpy(pSMB->DialectsArray+count, protocols[i].name, 16); count += strlen(protocols[i].name) + 1; /* null at end of source and target buffers anyway */ } inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc != 0) goto neg_err_exit; server->dialect = le16_to_cpu(pSMBr->DialectIndex); cifs_dbg(FYI, "Dialect: %d\n", server->dialect); /* Check wct = 1 error case */ if ((pSMBr->hdr.WordCount < 13) || (server->dialect == BAD_PROT)) { /* core returns wct = 1, but we do not ask for core - otherwise small wct just comes when dialect index is -1 indicating we could not negotiate a common dialect */ rc = -EOPNOTSUPP; goto neg_err_exit; } else if (pSMBr->hdr.WordCount == 13) { server->negflavor = CIFS_NEGFLAVOR_LANMAN; rc = decode_lanman_negprot_rsp(server, pSMBr); goto signing_check; } else if (pSMBr->hdr.WordCount != 17) { /* unknown wct */ rc = -EOPNOTSUPP; goto neg_err_exit; } /* else wct == 17, NTLM or better */ server->sec_mode = pSMBr->SecurityMode; if ((server->sec_mode & SECMODE_USER) == 0) cifs_dbg(FYI, "share mode security\n"); /* one byte, so no need to convert this or EncryptionKeyLen from little endian */ server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount), cifs_max_pending); set_credits(server, server->maxReq); /* probably no need to store and check maxvcs */ server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize); server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf); server->capabilities = le32_to_cpu(pSMBr->Capabilities); server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); server->timeAdj *= 60; if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { server->negflavor = CIFS_NEGFLAVOR_UNENCAP; memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey, CIFS_CRYPTO_KEY_SIZE); } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC || server->capabilities & CAP_EXTENDED_SECURITY) && (pSMBr->EncryptionKeyLength == 0)) { server->negflavor = CIFS_NEGFLAVOR_EXTENDED; rc = decode_ext_sec_blob(ses, pSMBr); } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { rc = -EIO; /* no crypt key only if plain text pwd */ } else { server->negflavor = CIFS_NEGFLAVOR_UNENCAP; server->capabilities &= ~CAP_EXTENDED_SECURITY; } signing_check: if (!rc) rc = cifs_enable_signing(server, ses->sign); neg_err_exit: cifs_buf_release(pSMB); cifs_dbg(FYI, "negprot rc %d\n", rc); return rc; } int CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon) { struct smb_hdr *smb_buffer; int rc = 0; cifs_dbg(FYI, "In tree disconnect\n"); /* BB: do we need to check this? These should never be NULL. */ if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) return -EIO; /* * No need to return error on this operation if tid invalidated and * closed on server already e.g. due to tcp session crashing. Also, * the tcon is no longer on the list, so no need to take lock before * checking this. */ if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) return 0; rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, (void **)&smb_buffer); if (rc) return rc; rc = SendReceiveNoRsp(xid, tcon->ses, (char *)smb_buffer, 0); if (rc) cifs_dbg(FYI, "Tree disconnect failed %d\n", rc); /* No need to return error on this operation if tid invalidated and closed on server already e.g. due to tcp session crashing */ if (rc == -EAGAIN) rc = 0; return rc; } /* * This is a no-op for now. We're not really interested in the reply, but * rather in the fact that the server sent one and that server->lstrp * gets updated. * * FIXME: maybe we should consider checking that the reply matches request? */ static void cifs_echo_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->callback_data; DeleteMidQEntry(mid); add_credits(server, 1, CIFS_ECHO_OP); } int CIFSSMBEcho(struct TCP_Server_Info *server) { ECHO_REQ *smb; int rc = 0; struct kvec iov; struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; cifs_dbg(FYI, "In echo request\n"); rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb); if (rc) return rc; /* set up echo request */ smb->hdr.Tid = 0xffff; smb->hdr.WordCount = 1; put_unaligned_le16(1, &smb->EchoCount); put_bcc(1, &smb->hdr); smb->Data[0] = 'a'; inc_rfc1001_len(smb, 3); iov.iov_base = smb; iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; rc = cifs_call_async(server, &rqst, NULL, cifs_echo_callback, server, CIFS_ASYNC_OP | CIFS_ECHO_OP); if (rc) cifs_dbg(FYI, "Echo request failed: %d\n", rc); cifs_small_buf_release(smb); return rc; } int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses) { LOGOFF_ANDX_REQ *pSMB; int rc = 0; cifs_dbg(FYI, "In SMBLogoff for session disconnect\n"); /* * BB: do we need to check validity of ses and server? They should * always be valid since we have an active reference. If not, that * should probably be a BUG() */ if (!ses || !ses->server) return -EIO; mutex_lock(&ses->session_mutex); if (ses->need_reconnect) goto session_already_dead; /* no need to send SMBlogoff if uid already closed due to reconnect */ rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB); if (rc) { mutex_unlock(&ses->session_mutex); return rc; } pSMB->hdr.Mid = get_next_mid(ses->server); if (ses->server->sign) pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->hdr.Uid = ses->Suid; pSMB->AndXCommand = 0xFF; rc = SendReceiveNoRsp(xid, ses, (char *) pSMB, 0); session_already_dead: mutex_unlock(&ses->session_mutex); /* if session dead then we do not need to do ulogoff, since server closed smb session, no sense reporting error */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, __u16 type, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; struct unlink_psx_rq *pRqD; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cifs_dbg(FYI, "In POSIX delete\n"); PsxDelete: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB add path length overrun check */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = 0; /* BB double check this with jra */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; /* Setup pointer to Request Data (inode type) */ pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset); pRqD->type = cpu_to_le16(type); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + sizeof(struct unlink_psx_rq); pSMB->DataCount = cpu_to_le16(sizeof(struct unlink_psx_rq)); pSMB->TotalDataCount = cpu_to_le16(sizeof(struct unlink_psx_rq)); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_UNLINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "Posix delete returned %d\n", rc); cifs_buf_release(pSMB); cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes); if (rc == -EAGAIN) goto PsxDelete; return rc; } int CIFSSMBDelFile(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { DELETE_FILE_REQ *pSMB = NULL; DELETE_FILE_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; DelFileRetry: rc = smb_init(SMB_COM_DELETE, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->fileName, name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ name_len = strnlen(name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->fileName, name, name_len); } pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM); pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes); if (rc) cifs_dbg(FYI, "Error in RMFile = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto DelFileRetry; return rc; } int CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { DELETE_DIRECTORY_REQ *pSMB = NULL; DELETE_DIRECTORY_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; cifs_dbg(FYI, "In CIFSSMBRmDir\n"); RmDirRetry: rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ name_len = strnlen(name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->DirName, name, name_len); } pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_rmdirs); if (rc) cifs_dbg(FYI, "Error in RMDir = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto RmDirRetry; return rc; } int CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { int rc = 0; CREATE_DIRECTORY_REQ *pSMB = NULL; CREATE_DIRECTORY_RSP *pSMBr = NULL; int bytes_returned; int name_len; int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; cifs_dbg(FYI, "In CIFSSMBMkDir\n"); MkDirRetry: rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ name_len = strnlen(name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->DirName, name, name_len); } pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_mkdirs); if (rc) cifs_dbg(FYI, "Error in Mkdir = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto MkDirRetry; return rc; } int CIFSPOSIXCreate(const unsigned int xid, struct cifs_tcon *tcon, __u32 posix_flags, __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData, __u32 *pOplock, const char *name, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count, count; OPEN_PSX_REQ *pdata; OPEN_PSX_RSP *psx_rsp; cifs_dbg(FYI, "In POSIX Create\n"); PsxCreat: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, name, name_len); } params = 6 + name_len; count = sizeof(OPEN_PSX_REQ); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* large enough */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset); pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pdata->Permissions = cpu_to_le64(mode); pdata->PosixOpenFlags = cpu_to_le32(posix_flags); pdata->OpenFlags = cpu_to_le32(*pOplock); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_OPEN); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Posix create returned %d\n", rc); goto psx_create_err; } cifs_dbg(FYI, "copying inode info\n"); rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP)) { rc = -EIO; /* bad smb */ goto psx_create_err; } /* copy return information to pRetData */ psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset)); *pOplock = le16_to_cpu(psx_rsp->OplockFlags); if (netfid) *netfid = psx_rsp->Fid; /* cifs fid stays in le */ /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ if (cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction) *pOplock |= CIFS_CREATE_ACTION; /* check to make sure response data is there */ if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) { pRetData->Type = cpu_to_le32(-1); /* unknown */ cifs_dbg(NOISY, "unknown type\n"); } else { if (get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP) + sizeof(FILE_UNIX_BASIC_INFO)) { cifs_dbg(VFS, "Open response data too small\n"); pRetData->Type = cpu_to_le32(-1); goto psx_create_err; } memcpy((char *) pRetData, (char *)psx_rsp + sizeof(OPEN_PSX_RSP), sizeof(FILE_UNIX_BASIC_INFO)); } psx_create_err: cifs_buf_release(pSMB); if (posix_flags & SMB_O_DIRECTORY) cifs_stats_inc(&tcon->stats.cifs_stats.num_posixmkdirs); else cifs_stats_inc(&tcon->stats.cifs_stats.num_posixopens); if (rc == -EAGAIN) goto PsxCreat; return rc; } static __u16 convert_disposition(int disposition) { __u16 ofun = 0; switch (disposition) { case FILE_SUPERSEDE: ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC; break; case FILE_OPEN: ofun = SMBOPEN_OAPPEND; break; case FILE_CREATE: ofun = SMBOPEN_OCREATE; break; case FILE_OPEN_IF: ofun = SMBOPEN_OCREATE | SMBOPEN_OAPPEND; break; case FILE_OVERWRITE: ofun = SMBOPEN_OTRUNC; break; case FILE_OVERWRITE_IF: ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC; break; default: cifs_dbg(FYI, "unknown disposition %d\n", disposition); ofun = SMBOPEN_OAPPEND; /* regular open */ } return ofun; } static int access_flags_to_smbopen_mode(const int access_flags) { int masked_flags = access_flags & (GENERIC_READ | GENERIC_WRITE); if (masked_flags == GENERIC_READ) return SMBOPEN_READ; else if (masked_flags == GENERIC_WRITE) return SMBOPEN_WRITE; /* just go for read/write */ return SMBOPEN_READWRITE; } int SMBLegacyOpen(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const int openDisposition, const int access_flags, const int create_options, __u16 *netfid, int *pOplock, FILE_ALL_INFO *pfile_info, const struct nls_table *nls_codepage, int remap) { int rc = -EACCES; OPENX_REQ *pSMB = NULL; OPENX_RSP *pSMBr = NULL; int bytes_returned; int name_len; __u16 count; OldOpenRetry: rc = smb_init(SMB_COM_OPEN_ANDX, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->AndXCommand = 0xFF; /* none */ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { count = 1; /* account for one byte pad to word boundary */ name_len = cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1), fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ count = 0; /* no pad */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->fileName, fileName, name_len); } if (*pOplock & REQ_OPLOCK) pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK); else if (*pOplock & REQ_BATCHOPLOCK) pSMB->OpenFlags = cpu_to_le16(REQ_BATCHOPLOCK); pSMB->OpenFlags |= cpu_to_le16(REQ_MORE_INFO); pSMB->Mode = cpu_to_le16(access_flags_to_smbopen_mode(access_flags)); pSMB->Mode |= cpu_to_le16(0x40); /* deny none */ /* set file as system file if special file such as fifo and server expecting SFU style and no Unix extensions */ if (create_options & CREATE_OPTION_SPECIAL) pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM); else /* BB FIXME BB */ pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/); if (create_options & CREATE_OPTION_READONLY) pSMB->FileAttributes |= cpu_to_le16(ATTR_READONLY); /* BB FIXME BB */ /* pSMB->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK); */ /* BB FIXME END BB */ pSMB->Sattr = cpu_to_le16(ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->OpenFunction = cpu_to_le16(convert_disposition(openDisposition)); count += name_len; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); /* long_op set to 1 to allow for oplock break timeouts */ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *)pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_opens); if (rc) { cifs_dbg(FYI, "Error in Open = %d\n", rc); } else { /* BB verify if wct == 15 */ /* *pOplock = pSMBr->OplockLevel; */ /* BB take from action field*/ *netfid = pSMBr->Fid; /* cifs fid stays in le */ /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ /* BB FIXME BB */ /* if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction) *pOplock |= CIFS_CREATE_ACTION; */ /* BB FIXME END */ if (pfile_info) { pfile_info->CreationTime = 0; /* BB convert CreateTime*/ pfile_info->LastAccessTime = 0; /* BB fixme */ pfile_info->LastWriteTime = 0; /* BB fixme */ pfile_info->ChangeTime = 0; /* BB fixme */ pfile_info->Attributes = cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes)); /* the file_info buf is endian converted by caller */ pfile_info->AllocationSize = cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile)); pfile_info->EndOfFile = pfile_info->AllocationSize; pfile_info->NumberOfLinks = cpu_to_le32(1); pfile_info->DeletePending = 0; } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto OldOpenRetry; return rc; } int CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, int *oplock, FILE_ALL_INFO *buf) { int rc = -EACCES; OPEN_REQ *req = NULL; OPEN_RSP *rsp = NULL; int bytes_returned; int name_len; __u16 count; struct cifs_sb_info *cifs_sb = oparms->cifs_sb; struct cifs_tcon *tcon = oparms->tcon; int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; const struct nls_table *nls = cifs_sb->local_nls; int create_options = oparms->create_options; int desired_access = oparms->desired_access; int disposition = oparms->disposition; const char *path = oparms->path; openRetry: rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **)&req, (void **)&rsp); if (rc) return rc; /* no commands go after this */ req->AndXCommand = 0xFF; if (req->hdr.Flags2 & SMBFLG2_UNICODE) { /* account for one byte pad to word boundary */ count = 1; name_len = cifsConvertToUTF16((__le16 *)(req->fileName + 1), path, PATH_MAX, nls, remap); /* trailing null */ name_len++; name_len *= 2; req->NameLength = cpu_to_le16(name_len); } else { /* BB improve check for buffer overruns BB */ /* no pad */ count = 0; name_len = strnlen(path, PATH_MAX); /* trailing null */ name_len++; req->NameLength = cpu_to_le16(name_len); strncpy(req->fileName, path, name_len); } if (*oplock & REQ_OPLOCK) req->OpenFlags = cpu_to_le32(REQ_OPLOCK); else if (*oplock & REQ_BATCHOPLOCK) req->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK); req->DesiredAccess = cpu_to_le32(desired_access); req->AllocationSize = 0; /* * Set file as system file if special file such as fifo and server * expecting SFU style and no Unix extensions. */ if (create_options & CREATE_OPTION_SPECIAL) req->FileAttributes = cpu_to_le32(ATTR_SYSTEM); else req->FileAttributes = cpu_to_le32(ATTR_NORMAL); /* * XP does not handle ATTR_POSIX_SEMANTICS but it helps speed up case * sensitive checks for other servers such as Samba. */ if (tcon->ses->capabilities & CAP_UNIX) req->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS); if (create_options & CREATE_OPTION_READONLY) req->FileAttributes |= cpu_to_le32(ATTR_READONLY); req->ShareAccess = cpu_to_le32(FILE_SHARE_ALL); req->CreateDisposition = cpu_to_le32(disposition); req->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK); /* BB Expirement with various impersonation levels and verify */ req->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION); req->SecurityFlags = SECURITY_CONTEXT_TRACKING|SECURITY_EFFECTIVE_ONLY; count += name_len; inc_rfc1001_len(req, count); req->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *)req, (struct smb_hdr *)rsp, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_opens); if (rc) { cifs_dbg(FYI, "Error in Open = %d\n", rc); cifs_buf_release(req); if (rc == -EAGAIN) goto openRetry; return rc; } /* 1 byte no need to le_to_cpu */ *oplock = rsp->OplockLevel; /* cifs fid stays in le */ oparms->fid->netfid = rsp->Fid; /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ if (cpu_to_le32(FILE_CREATE) == rsp->CreateAction) *oplock |= CIFS_CREATE_ACTION; if (buf) { /* copy from CreationTime to Attributes */ memcpy((char *)buf, (char *)&rsp->CreationTime, 36); /* the file_info buf is endian converted by caller */ buf->AllocationSize = rsp->AllocationSize; buf->EndOfFile = rsp->EndOfFile; buf->NumberOfLinks = cpu_to_le32(1); buf->DeletePending = 0; } cifs_buf_release(req); return rc; } /* * Discard any remaining data in the current SMB. To do this, we borrow the * current bigbuf. */ static int cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) { unsigned int rfclen = get_rfc1002_length(server->smallbuf); int remaining = rfclen + 4 - server->total_read; struct cifs_readdata *rdata = mid->callback_data; while (remaining > 0) { int length; length = cifs_read_from_socket(server, server->bigbuf, min_t(unsigned int, remaining, CIFSMaxBufSize + MAX_HEADER_SIZE(server))); if (length < 0) return length; server->total_read += length; remaining -= length; } dequeue_mid(mid, rdata->result); return 0; } int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) { int length, len; unsigned int data_offset, data_len; struct cifs_readdata *rdata = mid->callback_data; char *buf = server->smallbuf; unsigned int buflen = get_rfc1002_length(buf) + 4; cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n", __func__, mid->mid, rdata->offset, rdata->bytes); /* * read the rest of READ_RSP header (sans Data array), or whatever we * can if there's not enough data. At this point, we've read down to * the Mid. */ len = min_t(unsigned int, buflen, server->vals->read_rsp_size) - HEADER_SIZE(server) + 1; rdata->iov.iov_base = buf + HEADER_SIZE(server) - 1; rdata->iov.iov_len = len; length = cifs_readv_from_socket(server, &rdata->iov, 1, len); if (length < 0) return length; server->total_read += length; /* Was the SMB read successful? */ rdata->result = server->ops->map_error(buf, false); if (rdata->result != 0) { cifs_dbg(FYI, "%s: server returned error %d\n", __func__, rdata->result); return cifs_readv_discard(server, mid); } /* Is there enough to get to the rest of the READ_RSP header? */ if (server->total_read < server->vals->read_rsp_size) { cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n", __func__, server->total_read, server->vals->read_rsp_size); rdata->result = -EIO; return cifs_readv_discard(server, mid); } data_offset = server->ops->read_data_offset(buf) + 4; if (data_offset < server->total_read) { /* * win2k8 sometimes sends an offset of 0 when the read * is beyond the EOF. Treat it as if the data starts just after * the header. */ cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", __func__, data_offset); data_offset = server->total_read; } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { /* data_offset is beyond the end of smallbuf */ cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", __func__, data_offset); rdata->result = -EIO; return cifs_readv_discard(server, mid); } cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n", __func__, server->total_read, data_offset); len = data_offset - server->total_read; if (len > 0) { /* read any junk before data into the rest of smallbuf */ rdata->iov.iov_base = buf + server->total_read; rdata->iov.iov_len = len; length = cifs_readv_from_socket(server, &rdata->iov, 1, len); if (length < 0) return length; server->total_read += length; } /* set up first iov for signature check */ rdata->iov.iov_base = buf; rdata->iov.iov_len = server->total_read; cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", rdata->iov.iov_base, rdata->iov.iov_len); /* how much data is in the response? */ data_len = server->ops->read_data_length(buf); if (data_offset + data_len > buflen) { /* data_len is corrupt -- discard frame */ rdata->result = -EIO; return cifs_readv_discard(server, mid); } length = rdata->read_into_pages(server, rdata, data_len); if (length < 0) return length; server->total_read += length; rdata->bytes = length; cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n", server->total_read, buflen, data_len); /* discard anything left over */ if (server->total_read < buflen) return cifs_readv_discard(server, mid); dequeue_mid(mid, false); return length; } static void cifs_readv_callback(struct mid_q_entry *mid) { struct cifs_readdata *rdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct smb_rqst rqst = { .rq_iov = &rdata->iov, .rq_nvec = 1, .rq_pages = rdata->pages, .rq_npages = rdata->nr_pages, .rq_pagesz = rdata->pagesz, .rq_tailsz = rdata->tailsz }; cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n", __func__, mid->mid, mid->mid_state, rdata->result, rdata->bytes); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: /* result already set, check signature */ if (server->sign) { int rc = 0; rc = cifs_verify_signature(&rqst, server, mid->sequence_number); if (rc) cifs_dbg(VFS, "SMB signature verification returned error = %d\n", rc); } /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->bytes); cifs_stats_bytes_read(tcon, rdata->bytes); break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: rdata->result = -EAGAIN; break; default: rdata->result = -EIO; } queue_work(cifsiod_wq, &rdata->work); DeleteMidQEntry(mid); add_credits(server, 1, 0); } /* cifs_async_readv - send an async write, and set up mid to handle result */ int cifs_async_readv(struct cifs_readdata *rdata) { int rc; READ_REQ *smb = NULL; int wct; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); struct smb_rqst rqst = { .rq_iov = &rdata->iov, .rq_nvec = 1 }; cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", __func__, rdata->offset, rdata->bytes); if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 12; else { wct = 10; /* old style read */ if ((rdata->offset >> 32) > 0) { /* can not handle this big offset for old */ return -EIO; } } rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **)&smb); if (rc) return rc; smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid); smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16)); smb->AndXCommand = 0xFF; /* none */ smb->Fid = rdata->cfile->fid.netfid; smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF); if (wct == 12) smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32); smb->Remaining = 0; smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF); smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16); if (wct == 12) smb->ByteCount = 0; else { /* old style read */ struct smb_com_readx_req *smbr = (struct smb_com_readx_req *)smb; smbr->ByteCount = 0; } /* 4 for RFC1001 length + 1 for BCC */ rdata->iov.iov_base = smb; rdata->iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; kref_get(&rdata->refcount); rc = cifs_call_async(tcon->ses->server, &rqst, cifs_readv_receive, cifs_readv_callback, rdata, 0); if (rc == 0) cifs_stats_inc(&tcon->stats.cifs_stats.num_reads); else kref_put(&rdata->refcount, cifs_readdata_release); cifs_small_buf_release(smb); return rc; } int CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, char **buf, int *pbuf_type) { int rc = -EACCES; READ_REQ *pSMB = NULL; READ_RSP *pSMBr = NULL; char *pReadData = NULL; int wct; int resp_buf_type = 0; struct kvec iov[1]; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; cifs_dbg(FYI, "Reading %d bytes on fid %d\n", count, netfid); if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 12; else { wct = 10; /* old style read */ if ((offset >> 32) > 0) { /* can not handle this big offset for old */ return -EIO; } } *nbytes = 0; rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 12) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Remaining = 0; pSMB->MaxCount = cpu_to_le16(count & 0xFFFF); pSMB->MaxCountHigh = cpu_to_le32(count >> 16); if (wct == 12) pSMB->ByteCount = 0; /* no need to do le conversion since 0 */ else { /* old style read */ struct smb_com_readx_req *pSMBW = (struct smb_com_readx_req *)pSMB; pSMBW->ByteCount = 0; } iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, &resp_buf_type, CIFS_LOG_ERROR); cifs_stats_inc(&tcon->stats.cifs_stats.num_reads); pSMBr = (READ_RSP *)iov[0].iov_base; if (rc) { cifs_dbg(VFS, "Send error in read = %d\n", rc); } else { int data_length = le16_to_cpu(pSMBr->DataLengthHigh); data_length = data_length << 16; data_length += le16_to_cpu(pSMBr->DataLength); *nbytes = data_length; /*check that DataLength would not go beyond end of SMB */ if ((data_length > CIFSMaxBufSize) || (data_length > count)) { cifs_dbg(FYI, "bad length %d for count %d\n", data_length, count); rc = -EIO; *nbytes = 0; } else { pReadData = (char *) (&pSMBr->hdr.Protocol) + le16_to_cpu(pSMBr->DataOffset); /* if (rc = copy_to_user(buf, pReadData, data_length)) { cifs_dbg(VFS, "Faulting on read rc = %d\n",rc); rc = -EFAULT; }*/ /* can not use copy_to_user when using page cache*/ if (*buf) memcpy(*buf, pReadData, data_length); } } /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ if (*buf) { if (resp_buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (resp_buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); } else if (resp_buf_type != CIFS_NO_BUFFER) { /* return buffer to caller to free */ *buf = iov[0].iov_base; if (resp_buf_type == CIFS_SMALL_BUFFER) *pbuf_type = CIFS_SMALL_BUFFER; else if (resp_buf_type == CIFS_LARGE_BUFFER) *pbuf_type = CIFS_LARGE_BUFFER; } /* else no valid buffer on return - leave as null */ /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, const char *buf, const char __user *ubuf, const int long_op) { int rc = -EACCES; WRITE_REQ *pSMB = NULL; WRITE_RSP *pSMBr = NULL; int bytes_returned, wct; __u32 bytes_sent; __u16 byte_count; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; *nbytes = 0; /* cifs_dbg(FYI, "write at %lld %d bytes\n", offset, count);*/ if (tcon->ses == NULL) return -ECONNABORTED; if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 14; else { wct = 12; if ((offset >> 32) > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 14) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Reserved = 0xFFFFFFFF; pSMB->WriteMode = 0; pSMB->Remaining = 0; /* Can increase buffer size if buffer is big enough in some cases ie we can send more if LARGE_WRITE_X capability returned by the server and if our buffer is big enough or if we convert to iovecs on socket writes and eliminate the copy to the CIFS buffer */ if (tcon->ses->capabilities & CAP_LARGE_WRITE_X) { bytes_sent = min_t(const unsigned int, CIFSMaxBufSize, count); } else { bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & ~0xFF; } if (bytes_sent > count) bytes_sent = count; pSMB->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); if (buf) memcpy(pSMB->Data, buf, bytes_sent); else if (ubuf) { if (copy_from_user(pSMB->Data, ubuf, bytes_sent)) { cifs_buf_release(pSMB); return -EFAULT; } } else if (count != 0) { /* No buffer */ cifs_buf_release(pSMB); return -EINVAL; } /* else setting file size with write of zero bytes */ if (wct == 14) byte_count = bytes_sent + 1; /* pad */ else /* wct == 12 */ byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */ pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF); pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16); inc_rfc1001_len(pSMB, byte_count); if (wct == 14) pSMB->ByteCount = cpu_to_le16(byte_count); else { /* old style write has byte count 4 bytes earlier so 4 bytes pad */ struct smb_com_writex_req *pSMBW = (struct smb_com_writex_req *)pSMB; pSMBW->ByteCount = cpu_to_le16(byte_count); } rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, long_op); cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); if (rc) { cifs_dbg(FYI, "Send error in write = %d\n", rc); } else { *nbytes = le16_to_cpu(pSMBr->CountHigh); *nbytes = (*nbytes) << 16; *nbytes += le16_to_cpu(pSMBr->Count); /* * Mask off high 16 bits when bytes written as returned by the * server is greater than bytes requested by the client. Some * OS/2 servers are known to set incorrect CountHigh values. */ if (*nbytes > count) *nbytes &= 0xFFFF; } cifs_buf_release(pSMB); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } void cifs_writedata_release(struct kref *refcount) { struct cifs_writedata *wdata = container_of(refcount, struct cifs_writedata, refcount); if (wdata->cfile) cifsFileInfo_put(wdata->cfile); kfree(wdata); } /* * Write failed with a retryable error. Resend the write request. It's also * possible that the page was redirtied so re-clean the page. */ static void cifs_writev_requeue(struct cifs_writedata *wdata) { int i, rc; struct inode *inode = wdata->cfile->dentry->d_inode; struct TCP_Server_Info *server; for (i = 0; i < wdata->nr_pages; i++) { lock_page(wdata->pages[i]); clear_page_dirty_for_io(wdata->pages[i]); } do { server = tlink_tcon(wdata->cfile->tlink)->ses->server; rc = server->ops->async_writev(wdata, cifs_writedata_release); } while (rc == -EAGAIN); for (i = 0; i < wdata->nr_pages; i++) { unlock_page(wdata->pages[i]); if (rc != 0) { SetPageError(wdata->pages[i]); end_page_writeback(wdata->pages[i]); page_cache_release(wdata->pages[i]); } } mapping_set_error(inode->i_mapping, rc); kref_put(&wdata->refcount, cifs_writedata_release); } void cifs_writev_complete(struct work_struct *work) { struct cifs_writedata *wdata = container_of(work, struct cifs_writedata, work); struct inode *inode = wdata->cfile->dentry->d_inode; int i = 0; if (wdata->result == 0) { spin_lock(&inode->i_lock); cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); spin_unlock(&inode->i_lock); cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), wdata->bytes); } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) return cifs_writev_requeue(wdata); for (i = 0; i < wdata->nr_pages; i++) { struct page *page = wdata->pages[i]; if (wdata->result == -EAGAIN) __set_page_dirty_nobuffers(page); else if (wdata->result < 0) SetPageError(page); end_page_writeback(page); page_cache_release(page); } if (wdata->result != -EAGAIN) mapping_set_error(inode->i_mapping, wdata->result); kref_put(&wdata->refcount, cifs_writedata_release); } struct cifs_writedata * cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete) { struct cifs_writedata *wdata; /* writedata + number of page pointers */ wdata = kzalloc(sizeof(*wdata) + sizeof(struct page *) * nr_pages, GFP_NOFS); if (wdata != NULL) { kref_init(&wdata->refcount); INIT_LIST_HEAD(&wdata->list); init_completion(&wdata->done); INIT_WORK(&wdata->work, complete); } return wdata; } /* * Check the mid_state and signature on received buffer (if any), and queue the * workqueue completion task. */ static void cifs_writev_callback(struct mid_q_entry *mid) { struct cifs_writedata *wdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); unsigned int written; WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: wdata->result = cifs_check_receive(mid, tcon->ses->server, 0); if (wdata->result != 0) break; written = le16_to_cpu(smb->CountHigh); written <<= 16; written += le16_to_cpu(smb->Count); /* * Mask off high 16 bits when bytes written as returned * by the server is greater than bytes requested by the * client. OS/2 servers are known to set incorrect * CountHigh values. */ if (written > wdata->bytes) written &= 0xFFFF; if (written < wdata->bytes) wdata->result = -ENOSPC; else wdata->bytes = written; break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: wdata->result = -EAGAIN; break; default: wdata->result = -EIO; break; } queue_work(cifsiod_wq, &wdata->work); DeleteMidQEntry(mid); add_credits(tcon->ses->server, 1, 0); } /* cifs_async_writev - send an async write, and set up mid to handle result */ int cifs_async_writev(struct cifs_writedata *wdata, void (*release)(struct kref *kref)) { int rc = -EACCES; WRITE_REQ *smb = NULL; int wct; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct kvec iov; struct smb_rqst rqst = { }; if (tcon->ses->capabilities & CAP_LARGE_FILES) { wct = 14; } else { wct = 12; if (wdata->offset >> 32 > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb); if (rc) goto async_writev_out; smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid); smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16)); smb->AndXCommand = 0xFF; /* none */ smb->Fid = wdata->cfile->fid.netfid; smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF); if (wct == 14) smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32); smb->Reserved = 0xFFFFFFFF; smb->WriteMode = 0; smb->Remaining = 0; smb->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); /* 4 for RFC1001 length + 1 for BCC */ iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1; iov.iov_base = smb; rqst.rq_iov = &iov; rqst.rq_nvec = 1; rqst.rq_pages = wdata->pages; rqst.rq_npages = wdata->nr_pages; rqst.rq_pagesz = wdata->pagesz; rqst.rq_tailsz = wdata->tailsz; cifs_dbg(FYI, "async write at %llu %u bytes\n", wdata->offset, wdata->bytes); smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF); smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16); if (wct == 14) { inc_rfc1001_len(&smb->hdr, wdata->bytes + 1); put_bcc(wdata->bytes + 1, &smb->hdr); } else { /* wct == 12 */ struct smb_com_writex_req *smbw = (struct smb_com_writex_req *)smb; inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5); put_bcc(wdata->bytes + 5, &smbw->hdr); iov.iov_len += 4; /* pad bigger by four bytes */ } kref_get(&wdata->refcount); rc = cifs_call_async(tcon->ses->server, &rqst, NULL, cifs_writev_callback, wdata, 0); if (rc == 0) cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); else kref_put(&wdata->refcount, release); async_writev_out: cifs_small_buf_release(smb); return rc; } int CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, struct kvec *iov, int n_vec) { int rc = -EACCES; WRITE_REQ *pSMB = NULL; int wct; int smb_hdr_len; int resp_buf_type = 0; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; *nbytes = 0; cifs_dbg(FYI, "write2 at %lld %d bytes\n", (long long)offset, count); if (tcon->ses->capabilities & CAP_LARGE_FILES) { wct = 14; } else { wct = 12; if ((offset >> 32) > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 14) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Reserved = 0xFFFFFFFF; pSMB->WriteMode = 0; pSMB->Remaining = 0; pSMB->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); pSMB->DataLengthLow = cpu_to_le16(count & 0xFFFF); pSMB->DataLengthHigh = cpu_to_le16(count >> 16); /* header + 1 byte pad */ smb_hdr_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 1; if (wct == 14) inc_rfc1001_len(pSMB, count + 1); else /* wct == 12 */ inc_rfc1001_len(pSMB, count + 5); /* smb data starts later */ if (wct == 14) pSMB->ByteCount = cpu_to_le16(count + 1); else /* wct == 12 */ /* bigger pad, smaller smb hdr, keep offset ok */ { struct smb_com_writex_req *pSMBW = (struct smb_com_writex_req *)pSMB; pSMBW->ByteCount = cpu_to_le16(count + 5); } iov[0].iov_base = pSMB; if (wct == 14) iov[0].iov_len = smb_hdr_len + 4; else /* wct == 12 pad bigger by four bytes */ iov[0].iov_len = smb_hdr_len + 8; rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); if (rc) { cifs_dbg(FYI, "Send error Write2 = %d\n", rc); } else if (resp_buf_type == 0) { /* presumably this can not happen, but best to be safe */ rc = -EIO; } else { WRITE_RSP *pSMBr = (WRITE_RSP *)iov[0].iov_base; *nbytes = le16_to_cpu(pSMBr->CountHigh); *nbytes = (*nbytes) << 16; *nbytes += le16_to_cpu(pSMBr->Count); /* * Mask off high 16 bits when bytes written as returned by the * server is greater than bytes requested by the client. OS/2 * servers are known to set incorrect CountHigh values. */ if (*nbytes > count) *nbytes &= 0xFFFF; } /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ if (resp_buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (resp_buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon, const __u16 netfid, const __u8 lock_type, const __u32 num_unlock, const __u32 num_lock, LOCKING_ANDX_RANGE *buf) { int rc = 0; LOCK_REQ *pSMB = NULL; struct kvec iov[2]; int resp_buf_type; __u16 count; cifs_dbg(FYI, "cifs_lockv num lock %d num unlock %d\n", num_lock, num_unlock); rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB); if (rc) return rc; pSMB->Timeout = 0; pSMB->NumberOfLocks = cpu_to_le16(num_lock); pSMB->NumberOfUnlocks = cpu_to_le16(num_unlock); pSMB->LockType = lock_type; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; /* netfid stays le */ count = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE); inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4 - (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE); iov[1].iov_base = (char *)buf; iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE); cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP); if (rc) cifs_dbg(FYI, "Send error in cifs_lockv = %d\n", rc); return rc; } int CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon, const __u16 smb_file_id, const __u32 netpid, const __u64 len, const __u64 offset, const __u32 numUnlock, const __u32 numLock, const __u8 lockType, const bool waitFlag, const __u8 oplock_level) { int rc = 0; LOCK_REQ *pSMB = NULL; /* LOCK_RSP *pSMBr = NULL; */ /* No response data other than rc to parse */ int bytes_returned; int flags = 0; __u16 count; cifs_dbg(FYI, "CIFSSMBLock timeout %d numLock %d\n", (int)waitFlag, numLock); rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB); if (rc) return rc; if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) { /* no response expected */ flags = CIFS_ASYNC_OP | CIFS_OBREAK_OP; pSMB->Timeout = 0; } else if (waitFlag) { flags = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */ } else { pSMB->Timeout = 0; } pSMB->NumberOfLocks = cpu_to_le16(numLock); pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock); pSMB->LockType = lockType; pSMB->OplockLevel = oplock_level; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = smb_file_id; /* netfid stays le */ if ((numLock != 0) || (numUnlock != 0)) { pSMB->Locks[0].Pid = cpu_to_le16(netpid); /* BB where to store pid high? */ pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len); pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32)); pSMB->Locks[0].OffsetLow = cpu_to_le32((u32)offset); pSMB->Locks[0].OffsetHigh = cpu_to_le32((u32)(offset>>32)); count = sizeof(LOCKING_ANDX_RANGE); } else { /* oplock break */ count = 0; } inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); if (waitFlag) { rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMB, &bytes_returned); cifs_small_buf_release(pSMB); } else { rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, flags); /* SMB buffer freed by function above */ } cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); if (rc) cifs_dbg(FYI, "Send error in Lock = %d\n", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon, const __u16 smb_file_id, const __u32 netpid, const loff_t start_offset, const __u64 len, struct file_lock *pLockData, const __u16 lock_type, const bool waitFlag) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; struct cifs_posix_lock *parm_data; int rc = 0; int timeout = 0; int bytes_returned = 0; int resp_buf_type = 0; __u16 params, param_offset, offset, byte_count, count; struct kvec iov[1]; cifs_dbg(FYI, "Posix Lock\n"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB; params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; count = sizeof(struct cifs_posix_lock); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */ pSMB->SetupCount = 1; pSMB->Reserved3 = 0; if (pLockData) pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); else pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); parm_data = (struct cifs_posix_lock *) (((char *) &pSMB->hdr.Protocol) + offset); parm_data->lock_type = cpu_to_le16(lock_type); if (waitFlag) { timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ parm_data->lock_flags = cpu_to_le16(1); pSMB->Timeout = cpu_to_le32(-1); } else pSMB->Timeout = 0; parm_data->pid = cpu_to_le32(netpid); parm_data->start = cpu_to_le64(start_offset); parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = smb_file_id; pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); if (waitFlag) { rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned); } else { iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, &resp_buf_type, timeout); pSMB = NULL; /* request buf already freed by SendReceive2. Do not try to free it twice below on exit */ pSMBr = (struct smb_com_transaction2_sfi_rsp *)iov[0].iov_base; } if (rc) { cifs_dbg(FYI, "Send error in Posix Lock = %d\n", rc); } else if (pLockData) { /* lock structure can be returned on get */ __u16 data_offset; __u16 data_count; rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(*parm_data)) { rc = -EIO; /* bad smb */ goto plk_err_exit; } data_offset = le16_to_cpu(pSMBr->t2.DataOffset); data_count = le16_to_cpu(pSMBr->t2.DataCount); if (data_count < sizeof(struct cifs_posix_lock)) { rc = -EIO; goto plk_err_exit; } parm_data = (struct cifs_posix_lock *) ((char *)&pSMBr->hdr.Protocol + data_offset); if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_UNLCK)) pLockData->fl_type = F_UNLCK; else { if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_RDLCK)) pLockData->fl_type = F_RDLCK; else if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_WRLCK)) pLockData->fl_type = F_WRLCK; pLockData->fl_start = le64_to_cpu(parm_data->start); pLockData->fl_end = pLockData->fl_start + le64_to_cpu(parm_data->length) - 1; pLockData->fl_pid = le32_to_cpu(parm_data->pid); } } plk_err_exit: if (pSMB) cifs_small_buf_release(pSMB); if (resp_buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (resp_buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBClose(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id) { int rc = 0; CLOSE_REQ *pSMB = NULL; cifs_dbg(FYI, "In CIFSSMBClose\n"); /* do not retry on dead session on close */ rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB); if (rc == -EAGAIN) return 0; if (rc) return rc; pSMB->FileID = (__u16) smb_file_id; pSMB->LastWriteTime = 0xFFFFFFFF; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_closes); if (rc) { if (rc != -EINTR) { /* EINTR is expected when user ctl-c to kill app */ cifs_dbg(VFS, "Send error in Close = %d\n", rc); } } /* Since session is dead, file will be closed on server already */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSSMBFlush(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id) { int rc = 0; FLUSH_REQ *pSMB = NULL; cifs_dbg(FYI, "In CIFSSMBFlush\n"); rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB); if (rc) return rc; pSMB->FileID = (__u16) smb_file_id; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_flushes); if (rc) cifs_dbg(VFS, "Send error in Flush = %d\n", rc); return rc; } int CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb) { int rc = 0; RENAME_REQ *pSMB = NULL; RENAME_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; cifs_dbg(FYI, "In CIFSSMBRename\n"); renameRetry: rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->BufferFormat = 0x04; pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName, from_name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->OldFileName[name_len] = 0x04; /* pad */ /* protocol requires ASCII signature byte on Unicode string */ pSMB->OldFileName[name_len + 1] = 0x00; name_len2 = cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2], to_name, PATH_MAX, cifs_sb->local_nls, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(from_name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->OldFileName, from_name, name_len); name_len2 = strnlen(to_name, PATH_MAX); name_len2++; /* trailing null */ pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2); name_len2++; /* trailing null */ name_len2++; /* signature byte */ } count = 1 /* 1st signature byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_renames); if (rc) cifs_dbg(FYI, "Send error in rename = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto renameRetry; return rc; } int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *pTcon, int netfid, const char *target_name, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; struct set_file_rename *rename_info; char *data_offset; char dummy_string[30]; int rc = 0; int bytes_returned = 0; int len_of_str; __u16 params, param_offset, offset, count, byte_count; cifs_dbg(FYI, "Rename to File by handle\n"); rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; rename_info = (struct set_file_rename *) data_offset; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */ pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); /* construct random name ".cifs_tmp<inodenum><mid>" */ rename_info->overwrite = cpu_to_le32(1); rename_info->root_fid = 0; /* unicode only call */ if (target_name == NULL) { sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid); len_of_str = cifsConvertToUTF16((__le16 *)rename_info->target_name, dummy_string, 24, nls_codepage, remap); } else { len_of_str = cifsConvertToUTF16((__le16 *)rename_info->target_name, target_name, PATH_MAX, nls_codepage, remap); } rename_info->target_name_len = cpu_to_le32(2 * len_of_str); count = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str); byte_count += count; pSMB->DataCount = cpu_to_le16(count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->Fid = netfid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_RENAME_INFORMATION); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&pTcon->stats.cifs_stats.num_t2renames); if (rc) cifs_dbg(FYI, "Send error in Rename (by file handle) = %d\n", rc); cifs_buf_release(pSMB); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBCopy(const unsigned int xid, struct cifs_tcon *tcon, const char *fromName, const __u16 target_tid, const char *toName, const int flags, const struct nls_table *nls_codepage, int remap) { int rc = 0; COPY_REQ *pSMB = NULL; COPY_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; cifs_dbg(FYI, "In CIFSSMBCopy\n"); copyRetry: rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->BufferFormat = 0x04; pSMB->Tid2 = target_tid; pSMB->Flags = cpu_to_le16(flags & COPY_TREE); if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName, fromName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->OldFileName[name_len] = 0x04; /* pad */ /* protocol requires ASCII signature byte on Unicode string */ pSMB->OldFileName[name_len + 1] = 0x00; name_len2 = cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2], toName, PATH_MAX, nls_codepage, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fromName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->OldFileName, fromName, name_len); name_len2 = strnlen(toName, PATH_MAX); name_len2++; /* trailing null */ pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); name_len2++; /* trailing null */ name_len2++; /* signature byte */ } count = 1 /* 1st signature byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in copy = %d with %d files copied\n", rc, le16_to_cpu(pSMBr->CopyCount)); } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto copyRetry; return rc; } int CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; char *data_offset; int name_len; int name_len_target; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cifs_dbg(FYI, "In Symlink Unix style\n"); createSymLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName, /* find define for this maxpathcomponent */ PATH_MAX, nls_codepage); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fromName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fromName, name_len); } params = 6 + name_len; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len_target = cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX /* find define for this maxpathcomponent */ , nls_codepage); name_len_target++; /* trailing null */ name_len_target *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len_target = strnlen(toName, PATH_MAX); name_len_target++; /* trailing null */ strncpy(data_offset, toName, name_len_target); } pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max on data count below from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + name_len_target; pSMB->DataCount = cpu_to_le16(name_len_target); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_LINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_symlinks); if (rc) cifs_dbg(FYI, "Send error in SetPathInfo create symlink = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto createSymLinkRetry; return rc; } int CIFSUnixCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; char *data_offset; int name_len; int name_len_target; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cifs_dbg(FYI, "In Create Hard link Unix style\n"); createHardLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, toName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(toName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, toName, name_len); } params = 6 + name_len; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len_target = cifsConvertToUTF16((__le16 *) data_offset, fromName, PATH_MAX, nls_codepage, remap); name_len_target++; /* trailing null */ name_len_target *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len_target = strnlen(fromName, PATH_MAX); name_len_target++; /* trailing null */ strncpy(data_offset, fromName, name_len_target); } pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max on data count below from sess*/ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + name_len_target; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->DataCount = cpu_to_le16(name_len_target); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_HLINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks); if (rc) cifs_dbg(FYI, "Send error in SetPathInfo (hard link) = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto createHardLinkRetry; return rc; } int CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb) { int rc = 0; NT_RENAME_REQ *pSMB = NULL; RENAME_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; cifs_dbg(FYI, "In CIFSCreateHardLink\n"); winCreateHardLinkRetry: rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->Flags = cpu_to_le16(CREATE_HARD_LINK); pSMB->ClusterCount = 0; pSMB->BufferFormat = 0x04; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName, from_name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; /* protocol specifies ASCII buffer format (0x04) for unicode */ pSMB->OldFileName[name_len] = 0x04; pSMB->OldFileName[name_len + 1] = 0x00; /* pad */ name_len2 = cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2], to_name, PATH_MAX, cifs_sb->local_nls, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(from_name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->OldFileName, from_name, name_len); name_len2 = strnlen(to_name, PATH_MAX); name_len2++; /* trailing null */ pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2); name_len2++; /* trailing null */ name_len2++; /* signature byte */ } count = 1 /* string type byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks); if (rc) cifs_dbg(FYI, "Send error in hard link (NT rename) = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto winCreateHardLinkRetry; return rc; } int CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char **symlinkinfo, const struct nls_table *nls_codepage) { /* SMB_QUERY_FILE_UNIX_LINK */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; char *data_start; cifs_dbg(FYI, "In QPathSymLinkInfo (Unix) for path %s\n", searchName); querySymLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_LINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QuerySymLinkInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) rc = -EIO; else { bool is_unicode; u16 count = le16_to_cpu(pSMBr->t2.DataCount); data_start = ((char *) &pSMBr->hdr.Protocol) + le16_to_cpu(pSMBr->t2.DataOffset); if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* BB FIXME investigate remapping reserved chars here */ *symlinkinfo = cifs_strndup_from_utf16(data_start, count, is_unicode, nls_codepage); if (!*symlinkinfo) rc = -ENOMEM; } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto querySymLinkRetry; return rc; } /* * Recent Windows versions now create symlinks more frequently * and they use the "reparse point" mechanism below. We can of course * do symlinks nicely to Samba and other servers which support the * CIFS Unix Extensions and we can also do SFU symlinks and "client only" * "MF" symlinks optionally, but for recent Windows we really need to * reenable the code below and fix the cifs_symlink callers to handle this. * In the interim this code has been moved to its own config option so * it is not compiled in by default until callers fixed up and more tested. */ int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, char **symlinkinfo, const struct nls_table *nls_codepage) { int rc = 0; int bytes_returned; struct smb_com_transaction_ioctl_req *pSMB; struct smb_com_transaction_ioctl_rsp *pSMBr; bool is_unicode; unsigned int sub_len; char *sub_start; struct reparse_symlink_data *reparse_buf; struct reparse_posix_data *posix_buf; __u32 data_offset, data_count; char *end_of_smb; cifs_dbg(FYI, "In Windows reparse style QueryLink for fid %u\n", fid); rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->TotalParameterCount = 0 ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le32(2); /* BB find exact data count max from sess structure BB */ pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00); pSMB->MaxSetupCount = 4; pSMB->Reserved = 0; pSMB->ParameterOffset = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 4; pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT); pSMB->IsFsctl = 1; /* FSCTL */ pSMB->IsRootFlag = 0; pSMB->Fid = fid; /* file handle always le */ pSMB->ByteCount = 0; rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QueryReparseLinkInfo = %d\n", rc); goto qreparse_out; } data_offset = le32_to_cpu(pSMBr->DataOffset); data_count = le32_to_cpu(pSMBr->DataCount); if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) { /* BB also check enough total bytes returned */ rc = -EIO; /* bad smb */ goto qreparse_out; } if (!data_count || (data_count > 2048)) { rc = -EIO; cifs_dbg(FYI, "Invalid return data count on get reparse info ioctl\n"); goto qreparse_out; } end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount; reparse_buf = (struct reparse_symlink_data *) ((char *)&pSMBr->hdr.Protocol + data_offset); if ((char *)reparse_buf >= end_of_smb) { rc = -EIO; goto qreparse_out; } if (reparse_buf->ReparseTag == cpu_to_le32(IO_REPARSE_TAG_NFS)) { cifs_dbg(FYI, "NFS style reparse tag\n"); posix_buf = (struct reparse_posix_data *)reparse_buf; if (posix_buf->InodeType != cpu_to_le64(NFS_SPECFILE_LNK)) { cifs_dbg(FYI, "unsupported file type 0x%llx\n", le64_to_cpu(posix_buf->InodeType)); rc = -EOPNOTSUPP; goto qreparse_out; } is_unicode = true; sub_len = le16_to_cpu(reparse_buf->ReparseDataLength); if (posix_buf->PathBuffer + sub_len > end_of_smb) { cifs_dbg(FYI, "reparse buf beyond SMB\n"); rc = -EIO; goto qreparse_out; } *symlinkinfo = cifs_strndup_from_utf16(posix_buf->PathBuffer, sub_len, is_unicode, nls_codepage); goto qreparse_out; } else if (reparse_buf->ReparseTag != cpu_to_le32(IO_REPARSE_TAG_SYMLINK)) { rc = -EOPNOTSUPP; goto qreparse_out; } /* Reparse tag is NTFS symlink */ sub_start = le16_to_cpu(reparse_buf->SubstituteNameOffset) + reparse_buf->PathBuffer; sub_len = le16_to_cpu(reparse_buf->SubstituteNameLength); if (sub_start + sub_len > end_of_smb) { cifs_dbg(FYI, "reparse buf beyond SMB\n"); rc = -EIO; goto qreparse_out; } if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* BB FIXME investigate remapping reserved chars here */ *symlinkinfo = cifs_strndup_from_utf16(sub_start, sub_len, is_unicode, nls_codepage); if (!*symlinkinfo) rc = -ENOMEM; qreparse_out: cifs_buf_release(pSMB); /* * Note: On -EAGAIN error only caller can retry on handle based calls * since file handle passed in no longer valid. */ return rc; } int CIFSSMB_set_compression(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid) { int rc = 0; int bytes_returned; struct smb_com_transaction_compr_ioctl_req *pSMB; struct smb_com_transaction_ioctl_rsp *pSMBr; cifs_dbg(FYI, "Set compression for %u\n", fid); rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->compression_state = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); pSMB->TotalParameterCount = 0; pSMB->TotalDataCount = __constant_cpu_to_le32(2); pSMB->MaxParameterCount = 0; pSMB->MaxDataCount = 0; pSMB->MaxSetupCount = 4; pSMB->Reserved = 0; pSMB->ParameterOffset = 0; pSMB->DataCount = __constant_cpu_to_le32(2); pSMB->DataOffset = cpu_to_le32(offsetof(struct smb_com_transaction_compr_ioctl_req, compression_state) - 4); /* 84 */ pSMB->SetupCount = 4; pSMB->SubCommand = __constant_cpu_to_le16(NT_TRANSACT_IOCTL); pSMB->ParameterCount = 0; pSMB->FunctionCode = __constant_cpu_to_le32(FSCTL_SET_COMPRESSION); pSMB->IsFsctl = 1; /* FSCTL */ pSMB->IsRootFlag = 0; pSMB->Fid = fid; /* file handle always le */ /* 3 byte pad, followed by 2 byte compress state */ pSMB->ByteCount = __constant_cpu_to_le16(5); inc_rfc1001_len(pSMB, 5); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "Send error in SetCompression = %d\n", rc); cifs_buf_release(pSMB); /* * Note: On -EAGAIN error only caller can retry on handle based calls * since file handle passed in no longer valid. */ return rc; } #ifdef CONFIG_CIFS_POSIX /*Convert an Access Control Entry from wire format to local POSIX xattr format*/ static void cifs_convert_ace(posix_acl_xattr_entry *ace, struct cifs_posix_ace *cifs_ace) { /* u8 cifs fields do not need le conversion */ ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm); ace->e_tag = cpu_to_le16(cifs_ace->cifs_e_tag); ace->e_id = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid)); /* cifs_dbg(FYI, "perm %d tag %d id %d\n", ace->e_perm, ace->e_tag, ace->e_id); */ return; } /* Convert ACL from CIFS POSIX wire format to local Linux POSIX ACL xattr */ static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen, const int acl_type, const int size_of_data_area) { int size = 0; int i; __u16 count; struct cifs_posix_ace *pACE; struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src; posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)trgt; if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION) return -EOPNOTSUPP; if (acl_type & ACL_TYPE_ACCESS) { count = le16_to_cpu(cifs_acl->access_entry_count); pACE = &cifs_acl->ace_array[0]; size = sizeof(struct cifs_posix_acl); size += sizeof(struct cifs_posix_ace) * count; /* check if we would go beyond end of SMB */ if (size_of_data_area < size) { cifs_dbg(FYI, "bad CIFS POSIX ACL size %d vs. %d\n", size_of_data_area, size); return -EINVAL; } } else if (acl_type & ACL_TYPE_DEFAULT) { count = le16_to_cpu(cifs_acl->access_entry_count); size = sizeof(struct cifs_posix_acl); size += sizeof(struct cifs_posix_ace) * count; /* skip past access ACEs to get to default ACEs */ pACE = &cifs_acl->ace_array[count]; count = le16_to_cpu(cifs_acl->default_entry_count); size += sizeof(struct cifs_posix_ace) * count; /* check if we would go beyond end of SMB */ if (size_of_data_area < size) return -EINVAL; } else { /* illegal type */ return -EINVAL; } size = posix_acl_xattr_size(count); if ((buflen == 0) || (local_acl == NULL)) { /* used to query ACL EA size */ } else if (size > buflen) { return -ERANGE; } else /* buffer big enough */ { local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION); for (i = 0; i < count ; i++) { cifs_convert_ace(&local_acl->a_entries[i], pACE); pACE++; } } return size; } static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace, const posix_acl_xattr_entry *local_ace) { __u16 rc = 0; /* 0 = ACL converted ok */ cifs_ace->cifs_e_perm = le16_to_cpu(local_ace->e_perm); cifs_ace->cifs_e_tag = le16_to_cpu(local_ace->e_tag); /* BB is there a better way to handle the large uid? */ if (local_ace->e_id == cpu_to_le32(-1)) { /* Probably no need to le convert -1 on any arch but can not hurt */ cifs_ace->cifs_uid = cpu_to_le64(-1); } else cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id)); /* cifs_dbg(FYI, "perm %d tag %d id %d\n", ace->e_perm, ace->e_tag, ace->e_id); */ return rc; } /* Convert ACL from local Linux POSIX xattr to CIFS POSIX ACL wire format */ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL, const int buflen, const int acl_type) { __u16 rc = 0; struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data; posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)pACL; int count; int i; if ((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL)) return 0; count = posix_acl_xattr_count((size_t)buflen); cifs_dbg(FYI, "setting acl with %d entries from buf of length %d and version of %d\n", count, buflen, le32_to_cpu(local_acl->a_version)); if (le32_to_cpu(local_acl->a_version) != 2) { cifs_dbg(FYI, "unknown POSIX ACL version %d\n", le32_to_cpu(local_acl->a_version)); return 0; } cifs_acl->version = cpu_to_le16(1); if (acl_type == ACL_TYPE_ACCESS) { cifs_acl->access_entry_count = cpu_to_le16(count); cifs_acl->default_entry_count = __constant_cpu_to_le16(0xFFFF); } else if (acl_type == ACL_TYPE_DEFAULT) { cifs_acl->default_entry_count = cpu_to_le16(count); cifs_acl->access_entry_count = __constant_cpu_to_le16(0xFFFF); } else { cifs_dbg(FYI, "unknown ACL type %d\n", acl_type); return 0; } for (i = 0; i < count; i++) { rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &local_acl->a_entries[i]); if (rc != 0) { /* ACE not converted */ break; } } if (rc == 0) { rc = (__u16)(count * sizeof(struct cifs_posix_ace)); rc += sizeof(struct cifs_posix_acl); /* BB add check to make sure ACL does not overflow SMB */ } return rc; } int CIFSSMBGetPosixACL(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char *acl_inf, const int buflen, const int acl_type, const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_POSIX_ACL */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; cifs_dbg(FYI, "In GetPosixACL (Unix) for path %s\n", searchName); queryAclRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->FileName[name_len] = 0; pSMB->FileName[name_len+1] = 0; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max data count below from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_ACL); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get); if (rc) { cifs_dbg(FYI, "Send error in Query POSIX ACL = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); rc = cifs_copy_posix_acl(acl_inf, (char *)&pSMBr->hdr.Protocol+data_offset, buflen, acl_type, count); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto queryAclRetry; return rc; } int CIFSSMBSetPosixACL(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *fileName, const char *local_acl, const int buflen, const int acl_type, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; char *parm_data; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, byte_count, data_count, param_offset, offset; cifs_dbg(FYI, "In SetPosixACL (Unix) for path %s\n", fileName); setAclRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB size from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; parm_data = ((char *) &pSMB->hdr.Protocol) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); /* convert to on the wire format for POSIX ACL */ data_count = ACL_to_cifs_posix(parm_data, local_acl, buflen, acl_type); if (data_count == 0) { rc = -EOPNOTSUPP; goto setACLerrorExit; } pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_ACL); byte_count = 3 /* pad */ + params + data_count; pSMB->DataCount = cpu_to_le16(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "Set POSIX ACL returned %d\n", rc); setACLerrorExit: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setAclRetry; return rc; } /* BB fix tabs in this function FIXME BB */ int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, const int netfid, __u64 *pExtAttrBits, __u64 *pMask) { int rc = 0; struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int bytes_returned; __u16 params, byte_count; cifs_dbg(FYI, "In GetExtAttr\n"); if (tcon == NULL) return -ENODEV; GetExtAttrRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(4000); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); pSMB->t2.ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "error %d in GetExtAttr\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) /* If rc should we check for EOPNOSUPP and disable the srvino flag? or in caller? */ rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); struct file_chattr_info *pfinfo; /* BB Do we need a cast or hash here ? */ if (count != 16) { cifs_dbg(FYI, "Illegal size ret in GetExtAttr\n"); rc = -EIO; goto GetExtAttrOut; } pfinfo = (struct file_chattr_info *) (data_offset + (char *) &pSMBr->hdr.Protocol); *pExtAttrBits = le64_to_cpu(pfinfo->mode); *pMask = le64_to_cpu(pfinfo->mask); } } GetExtAttrOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto GetExtAttrRetry; return rc; } #endif /* CONFIG_POSIX */ #ifdef CONFIG_CIFS_ACL /* * Initialize NT TRANSACT SMB into small smb request buffer. This assumes that * all NT TRANSACTS that we init here have total parm and data under about 400 * bytes (to fit in small cifs buffer size), which is the case so far, it * easily fits. NB: Setup words themselves and ByteCount MaxSetupCount (size of * returned setup area) and MaxParameterCount (returned parms size) must be set * by caller */ static int smb_init_nttransact(const __u16 sub_command, const int setup_count, const int parm_len, struct cifs_tcon *tcon, void **ret_buf) { int rc; __u32 temp_offset; struct smb_com_ntransact_req *pSMB; rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon, (void **)&pSMB); if (rc) return rc; *ret_buf = (void *)pSMB; pSMB->Reserved = 0; pSMB->TotalParameterCount = cpu_to_le32(parm_len); pSMB->TotalDataCount = 0; pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->DataCount = pSMB->TotalDataCount; temp_offset = offsetof(struct smb_com_ntransact_req, Parms) + (setup_count * 2) - 4 /* for rfc1001 length itself */; pSMB->ParameterOffset = cpu_to_le32(temp_offset); pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len); pSMB->SetupCount = setup_count; /* no need to le convert byte fields */ pSMB->SubCommand = cpu_to_le16(sub_command); return 0; } static int validate_ntransact(char *buf, char **ppparm, char **ppdata, __u32 *pparmlen, __u32 *pdatalen) { char *end_of_smb; __u32 data_count, data_offset, parm_count, parm_offset; struct smb_com_ntransact_rsp *pSMBr; u16 bcc; *pdatalen = 0; *pparmlen = 0; if (buf == NULL) return -EINVAL; pSMBr = (struct smb_com_ntransact_rsp *)buf; bcc = get_bcc(&pSMBr->hdr); end_of_smb = 2 /* sizeof byte count */ + bcc + (char *)&pSMBr->ByteCount; data_offset = le32_to_cpu(pSMBr->DataOffset); data_count = le32_to_cpu(pSMBr->DataCount); parm_offset = le32_to_cpu(pSMBr->ParameterOffset); parm_count = le32_to_cpu(pSMBr->ParameterCount); *ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset; *ppdata = (char *)&pSMBr->hdr.Protocol + data_offset; /* should we also check that parm and data areas do not overlap? */ if (*ppparm > end_of_smb) { cifs_dbg(FYI, "parms start after end of smb\n"); return -EINVAL; } else if (parm_count + *ppparm > end_of_smb) { cifs_dbg(FYI, "parm end after end of smb\n"); return -EINVAL; } else if (*ppdata > end_of_smb) { cifs_dbg(FYI, "data starts after end of smb\n"); return -EINVAL; } else if (data_count + *ppdata > end_of_smb) { cifs_dbg(FYI, "data %p + count %d (%p) past smb end %p start %p\n", *ppdata, data_count, (data_count + *ppdata), end_of_smb, pSMBr); return -EINVAL; } else if (parm_count + data_count > bcc) { cifs_dbg(FYI, "parm count and data count larger than SMB\n"); return -EINVAL; } *pdatalen = data_count; *pparmlen = parm_count; return 0; } /* Get Security Descriptor (by handle) from remote server for a file or dir */ int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, struct cifs_ntsd **acl_inf, __u32 *pbuflen) { int rc = 0; int buf_type = 0; QUERY_SEC_DESC_REQ *pSMB; struct kvec iov[1]; cifs_dbg(FYI, "GetCifsACL\n"); *pbuflen = 0; *acl_inf = NULL; rc = smb_init_nttransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0, 8 /* parm len */, tcon, (void **) &pSMB); if (rc) return rc; pSMB->MaxParameterCount = cpu_to_le32(4); /* BB TEST with big acls that might need to be e.g. larger than 16K */ pSMB->MaxSetupCount = 0; pSMB->Fid = fid; /* file handle always le */ pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP | CIFS_ACL_DACL); pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */ inc_rfc1001_len(pSMB, 11); iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get); if (rc) { cifs_dbg(FYI, "Send error in QuerySecDesc = %d\n", rc); } else { /* decode response */ __le32 *parm; __u32 parm_len; __u32 acl_len; struct smb_com_ntransact_rsp *pSMBr; char *pdata; /* validate_nttransact */ rc = validate_ntransact(iov[0].iov_base, (char **)&parm, &pdata, &parm_len, pbuflen); if (rc) goto qsec_out; pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base; cifs_dbg(FYI, "smb %p parm %p data %p\n", pSMBr, parm, *acl_inf); if (le32_to_cpu(pSMBr->ParameterCount) != 4) { rc = -EIO; /* bad smb */ *pbuflen = 0; goto qsec_out; } /* BB check that data area is minimum length and as big as acl_len */ acl_len = le32_to_cpu(*parm); if (acl_len != *pbuflen) { cifs_dbg(VFS, "acl length %d does not match %d\n", acl_len, *pbuflen); if (*pbuflen > acl_len) *pbuflen = acl_len; } /* check if buffer is big enough for the acl header followed by the smallest SID */ if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) || (*pbuflen >= 64 * 1024)) { cifs_dbg(VFS, "bad acl length %d\n", *pbuflen); rc = -EINVAL; *pbuflen = 0; } else { *acl_inf = kmemdup(pdata, *pbuflen, GFP_KERNEL); if (*acl_inf == NULL) { *pbuflen = 0; rc = -ENOMEM; } } } qsec_out: if (buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ return rc; } int CIFSSMBSetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, struct cifs_ntsd *pntsd, __u32 acllen, int aclflag) { __u16 byte_count, param_count, data_count, param_offset, data_offset; int rc = 0; int bytes_returned = 0; SET_SEC_DESC_REQ *pSMB = NULL; void *pSMBr; setCifsAclRetry: rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB, &pSMBr); if (rc) return rc; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; param_count = 8; param_offset = offsetof(struct smb_com_transaction_ssec_req, Fid) - 4; data_count = acllen; data_offset = param_offset + param_count; byte_count = 3 /* pad */ + param_count; pSMB->DataCount = cpu_to_le32(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->MaxParameterCount = cpu_to_le32(4); pSMB->MaxDataCount = cpu_to_le32(16384); pSMB->ParameterCount = cpu_to_le32(param_count); pSMB->ParameterOffset = cpu_to_le32(param_offset); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->DataOffset = cpu_to_le32(data_offset); pSMB->SetupCount = 0; pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_SET_SECURITY_DESC); pSMB->ByteCount = cpu_to_le16(byte_count+data_count); pSMB->Fid = fid; /* file handle always le */ pSMB->Reserved2 = 0; pSMB->AclFlags = cpu_to_le32(aclflag); if (pntsd && acllen) { memcpy((char *)pSMBr + offsetof(struct smb_hdr, Protocol) + data_offset, pntsd, acllen); inc_rfc1001_len(pSMB, byte_count + data_count); } else inc_rfc1001_len(pSMB, byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_dbg(FYI, "SetCIFSACL bytes_returned: %d, rc: %d\n", bytes_returned, rc); if (rc) cifs_dbg(FYI, "Set CIFS ACL returned %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setCifsAclRetry; return (rc); } #endif /* CONFIG_CIFS_ACL */ /* Legacy Query Path Information call for lookup to old servers such as Win9x/WinME */ int SMBQueryInformation(const unsigned int xid, struct cifs_tcon *tcon, const char *search_name, FILE_ALL_INFO *data, const struct nls_table *nls_codepage, int remap) { QUERY_INFORMATION_REQ *pSMB; QUERY_INFORMATION_RSP *pSMBr; int rc = 0; int bytes_returned; int name_len; cifs_dbg(FYI, "In SMBQPath path %s\n", search_name); QInfRetry: rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, search_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = strnlen(search_name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, search_name, name_len); } pSMB->BufferFormat = 0x04; name_len++; /* account for buffer type byte */ inc_rfc1001_len(pSMB, (__u16)name_len); pSMB->ByteCount = cpu_to_le16(name_len); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QueryInfo = %d\n", rc); } else if (data) { struct timespec ts; __u32 time = le32_to_cpu(pSMBr->last_write_time); /* decode response */ /* BB FIXME - add time zone adjustment BB */ memset(data, 0, sizeof(FILE_ALL_INFO)); ts.tv_nsec = 0; ts.tv_sec = time; /* decode time fields */ data->ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(ts)); data->LastWriteTime = data->ChangeTime; data->LastAccessTime = 0; data->AllocationSize = cpu_to_le64(le32_to_cpu(pSMBr->size)); data->EndOfFile = data->AllocationSize; data->Attributes = cpu_to_le32(le16_to_cpu(pSMBr->attr)); } else rc = -EIO; /* bad buffer passed in */ cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QInfRetry; return rc; } int CIFSSMBQFileInfo(const unsigned int xid, struct cifs_tcon *tcon, u16 netfid, FILE_ALL_INFO *pFindData) { struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int rc = 0; int bytes_returned; __u16 params, byte_count; QFileInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); pSMB->t2.ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QFileInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) /* BB add auto retry on EOPNOTSUPP? */ rc = -EIO; else if (get_bcc(&pSMBr->hdr) < 40) rc = -EIO; /* bad smb */ else if (pFindData) { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_ALL_INFO)); } else rc = -ENOMEM; } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFileInfoRetry; return rc; } int CIFSSMBQPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const char *search_name, FILE_ALL_INFO *data, int legacy /* old style infolevel */, const struct nls_table *nls_codepage, int remap) { /* level 263 SMB_QUERY_FILE_ALL_INFO */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; /* cifs_dbg(FYI, "In QPathInfo path %s\n", search_name); */ QPathInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, search_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(search_name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, search_name, name_len); } params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; if (legacy) pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD); else pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) /* BB add auto retry on EOPNOTSUPP? */ rc = -EIO; else if (!legacy && get_bcc(&pSMBr->hdr) < 40) rc = -EIO; /* bad smb */ else if (legacy && get_bcc(&pSMBr->hdr) < 24) rc = -EIO; /* 24 or 26 expected but we do not read last field */ else if (data) { int size; __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); /* * On legacy responses we do not read the last field, * EAsize, fortunately since it varies by subdialect and * also note it differs on Set vs Get, ie two bytes or 4 * bytes depending but we don't care here. */ if (legacy) size = sizeof(FILE_INFO_STANDARD); else size = sizeof(FILE_ALL_INFO); memcpy((char *) data, (char *) &pSMBr->hdr.Protocol + data_offset, size); } else rc = -ENOMEM; } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QPathInfoRetry; return rc; } int CIFSSMBUnixQFileInfo(const unsigned int xid, struct cifs_tcon *tcon, u16 netfid, FILE_UNIX_BASIC_INFO *pFindData) { struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int rc = 0; int bytes_returned; __u16 params, byte_count; UnixQFileInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); pSMB->t2.ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) { cifs_dbg(VFS, "Malformed FILE_UNIX_BASIC_INFO response. Unix Extensions can be disabled on mount by specifying the nosfu mount option.\n"); rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_UNIX_BASIC_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto UnixQFileInfoRetry; return rc; } int CIFSSMBUnixQPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, FILE_UNIX_BASIC_INFO *pFindData, const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_FILE_UNIX_BASIC */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned = 0; int name_len; __u16 params, byte_count; cifs_dbg(FYI, "In QPathInfo (Unix) the path %s\n", searchName); UnixQPathInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) { cifs_dbg(VFS, "Malformed FILE_UNIX_BASIC_INFO response. Unix Extensions can be disabled on mount by specifying the nosfu mount option.\n"); rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_UNIX_BASIC_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto UnixQPathInfoRetry; return rc; } /* xid, tcon, searchName and codepage are input parms, rest are returned */ int CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon, const char *searchName, struct cifs_sb_info *cifs_sb, __u16 *pnetfid, __u16 search_flags, struct cifs_search_info *psrch_inf, bool msearch) { /* level 257 SMB_ */ TRANSACTION2_FFIRST_REQ *pSMB = NULL; TRANSACTION2_FFIRST_RSP *pSMBr = NULL; T2_FFIRST_RSP_PARMS *parms; int rc = 0; int bytes_returned = 0; int name_len, remap; __u16 params, byte_count; struct nls_table *nls_codepage; cifs_dbg(FYI, "In FindFirst for %s\n", searchName); findFirstRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; nls_codepage = cifs_sb->local_nls; remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); /* We can not add the asterik earlier in case it got remapped to 0xF03A as if it were part of the directory name instead of a wildcard */ name_len *= 2; if (msearch) { pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb); pSMB->FileName[name_len+1] = 0; pSMB->FileName[name_len+2] = '*'; pSMB->FileName[name_len+3] = 0; name_len += 4; /* now the trailing null */ /* null terminate just in case */ pSMB->FileName[name_len] = 0; pSMB->FileName[name_len+1] = 0; name_len += 2; } } else { /* BB add check for overrun of SMB buf BB */ name_len = strnlen(searchName, PATH_MAX); /* BB fix here and in unicode clause above ie if (name_len > buffersize-header) free buffer exit; BB */ strncpy(pSMB->FileName, searchName, name_len); if (msearch) { pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb); pSMB->FileName[name_len+1] = '*'; pSMB->FileName[name_len+2] = 0; name_len += 3; } } params = 12 + name_len /* includes null */ ; pSMB->TotalDataCount = 0; /* no EAs */ pSMB->MaxParameterCount = cpu_to_le16(10); pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; /* one byte, no need to make endian neutral */ pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST); pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO)); pSMB->SearchFlags = cpu_to_le16(search_flags); pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level); /* BB what should we set StorageType to? Does it matter? BB */ pSMB->SearchStorageType = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_ffirst); if (rc) {/* BB add logic to retry regular search if Unix search rejected unexpectedly by server */ /* BB Add code to handle unsupported level rc */ cifs_dbg(FYI, "Error in FindFirst = %d\n", rc); cifs_buf_release(pSMB); /* BB eventually could optimize out free and realloc of buf */ /* for this case */ if (rc == -EAGAIN) goto findFirstRetry; } else { /* decode response */ /* BB remember to free buffer if error BB */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc == 0) { unsigned int lnoff; if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) psrch_inf->unicode = true; else psrch_inf->unicode = false; psrch_inf->ntwrk_buf_start = (char *)pSMBr; psrch_inf->smallBuf = 0; psrch_inf->srch_entries_start = (char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset); parms = (T2_FFIRST_RSP_PARMS *)((char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.ParameterOffset)); if (parms->EndofSearch) psrch_inf->endOfSearch = true; else psrch_inf->endOfSearch = false; psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount); psrch_inf->index_of_last_entry = 2 /* skip . and .. */ + psrch_inf->entries_in_buffer; lnoff = le16_to_cpu(parms->LastNameOffset); if (CIFSMaxBufSize < lnoff) { cifs_dbg(VFS, "ignoring corrupt resume name\n"); psrch_inf->last_entry = NULL; return rc; } psrch_inf->last_entry = psrch_inf->srch_entries_start + lnoff; if (pnetfid) *pnetfid = parms->SearchHandle; } else { cifs_buf_release(pSMB); } } return rc; } int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon, __u16 searchHandle, __u16 search_flags, struct cifs_search_info *psrch_inf) { TRANSACTION2_FNEXT_REQ *pSMB = NULL; TRANSACTION2_FNEXT_RSP *pSMBr = NULL; T2_FNEXT_RSP_PARMS *parms; char *response_data; int rc = 0; int bytes_returned; unsigned int name_len; __u16 params, byte_count; cifs_dbg(FYI, "In FindNext\n"); if (psrch_inf->endOfSearch) return -ENOENT; rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 14; /* includes 2 bytes of null string, converted to LE below*/ byte_count = 0; pSMB->TotalDataCount = 0; /* no EAs */ pSMB->MaxParameterCount = cpu_to_le16(8); pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_fnext_req,SearchHandle) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_NEXT); pSMB->SearchHandle = searchHandle; /* always kept as le */ pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO)); pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level); pSMB->ResumeKey = psrch_inf->resume_key; pSMB->SearchFlags = cpu_to_le16(search_flags); name_len = psrch_inf->resume_name_len; params += name_len; if (name_len < PATH_MAX) { memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len); byte_count += name_len; /* 14 byte parm len above enough for 2 byte null terminator */ pSMB->ResumeFileName[name_len] = 0; pSMB->ResumeFileName[name_len+1] = 0; } else { rc = -EINVAL; goto FNext2_err_exit; } byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->stats.cifs_stats.num_fnext); if (rc) { if (rc == -EBADF) { psrch_inf->endOfSearch = true; cifs_buf_release(pSMB); rc = 0; /* search probably was closed at end of search*/ } else cifs_dbg(FYI, "FindNext returned = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc == 0) { unsigned int lnoff; /* BB fixme add lock for file (srch_info) struct here */ if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) psrch_inf->unicode = true; else psrch_inf->unicode = false; response_data = (char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.ParameterOffset); parms = (T2_FNEXT_RSP_PARMS *)response_data; response_data = (char *)&pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset); if (psrch_inf->smallBuf) cifs_small_buf_release( psrch_inf->ntwrk_buf_start); else cifs_buf_release(psrch_inf->ntwrk_buf_start); psrch_inf->srch_entries_start = response_data; psrch_inf->ntwrk_buf_start = (char *)pSMB; psrch_inf->smallBuf = 0; if (parms->EndofSearch) psrch_inf->endOfSearch = true; else psrch_inf->endOfSearch = false; psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount); psrch_inf->index_of_last_entry += psrch_inf->entries_in_buffer; lnoff = le16_to_cpu(parms->LastNameOffset); if (CIFSMaxBufSize < lnoff) { cifs_dbg(VFS, "ignoring corrupt resume name\n"); psrch_inf->last_entry = NULL; return rc; } else psrch_inf->last_entry = psrch_inf->srch_entries_start + lnoff; /* cifs_dbg(FYI, "fnxt2 entries in buf %d index_of_last %d\n", psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */ /* BB fixme add unlock here */ } } /* BB On error, should we leave previous search buf (and count and last entry fields) intact or free the previous one? */ /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ FNext2_err_exit: if (rc != 0) cifs_buf_release(pSMB); return rc; } int CIFSFindClose(const unsigned int xid, struct cifs_tcon *tcon, const __u16 searchHandle) { int rc = 0; FINDCLOSE_REQ *pSMB = NULL; cifs_dbg(FYI, "In CIFSSMBFindClose\n"); rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB); /* no sense returning error if session restarted as file handle has been closed */ if (rc == -EAGAIN) return 0; if (rc) return rc; pSMB->FileID = searchHandle; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); if (rc) cifs_dbg(VFS, "Send error in FindClose = %d\n", rc); cifs_stats_inc(&tcon->stats.cifs_stats.num_fclose); /* Since session is dead, search handle closed on server already */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSGetSrvInodeNumber(const unsigned int xid, struct cifs_tcon *tcon, const char *search_name, __u64 *inode_number, const struct nls_table *nls_codepage, int remap) { int rc = 0; TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int name_len, bytes_returned; __u16 params, byte_count; cifs_dbg(FYI, "In GetSrvInodeNum for %s\n", search_name); if (tcon == NULL) return -ENODEV; GetInodeNumberRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, search_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(search_name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, search_name, name_len); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max data count below from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_INTERNAL_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "error %d in QueryInternalInfo\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) /* If rc should we check for EOPNOSUPP and disable the srvino flag? or in caller? */ rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); struct file_internal_info *pfinfo; /* BB Do we need a cast or hash here ? */ if (count < 8) { cifs_dbg(FYI, "Illegal size ret in QryIntrnlInf\n"); rc = -EIO; goto GetInodeNumOut; } pfinfo = (struct file_internal_info *) (data_offset + (char *) &pSMBr->hdr.Protocol); *inode_number = le64_to_cpu(pfinfo->UniqueId); } } GetInodeNumOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto GetInodeNumberRetry; return rc; } /* parses DFS refferal V3 structure * caller is responsible for freeing target_nodes * returns: * on success - 0 * on failure - errno */ static int parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, unsigned int *num_of_nodes, struct dfs_info3_param **target_nodes, const struct nls_table *nls_codepage, int remap, const char *searchName) { int i, rc = 0; char *data_end; bool is_unicode; struct dfs_referral_level_3 *ref; if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; *num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals); if (*num_of_nodes < 1) { cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n", *num_of_nodes); rc = -EINVAL; goto parse_DFS_referrals_exit; } ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals); if (ref->VersionNumber != cpu_to_le16(3)) { cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n", le16_to_cpu(ref->VersionNumber)); rc = -EINVAL; goto parse_DFS_referrals_exit; } /* get the upper boundary of the resp buffer */ data_end = (char *)(&(pSMBr->PathConsumed)) + le16_to_cpu(pSMBr->t2.DataCount); cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n", *num_of_nodes, le32_to_cpu(pSMBr->DFSFlags)); *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param), GFP_KERNEL); if (*target_nodes == NULL) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } /* collect necessary data from referrals */ for (i = 0; i < *num_of_nodes; i++) { char *temp; int max_len; struct dfs_info3_param *node = (*target_nodes)+i; node->flags = le32_to_cpu(pSMBr->DFSFlags); if (is_unicode) { __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, GFP_KERNEL); if (tmp == NULL) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } cifsConvertToUTF16((__le16 *) tmp, searchName, PATH_MAX, nls_codepage, remap); node->path_consumed = cifs_utf16_bytes(tmp, le16_to_cpu(pSMBr->PathConsumed), nls_codepage); kfree(tmp); } else node->path_consumed = le16_to_cpu(pSMBr->PathConsumed); node->server_type = le16_to_cpu(ref->ServerType); node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); /* copy DfsPath */ temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); max_len = data_end - temp; node->path_name = cifs_strndup_from_utf16(temp, max_len, is_unicode, nls_codepage); if (!node->path_name) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } /* copy link target UNC */ temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); max_len = data_end - temp; node->node_name = cifs_strndup_from_utf16(temp, max_len, is_unicode, nls_codepage); if (!node->node_name) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } ref++; } parse_DFS_referrals_exit: if (rc) { free_dfs_info_array(*target_nodes, *num_of_nodes); *target_nodes = NULL; *num_of_nodes = 0; } return rc; } int CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses, const char *search_name, struct dfs_info3_param **target_nodes, unsigned int *num_of_nodes, const struct nls_table *nls_codepage, int remap) { /* TRANS2_GET_DFS_REFERRAL */ TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL; TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; *num_of_nodes = 0; *target_nodes = NULL; cifs_dbg(FYI, "In GetDFSRefer the path %s\n", search_name); if (ses == NULL) return -ENODEV; getDFSRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, NULL, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; /* server pointer checked in called function, but should never be null here anyway */ pSMB->hdr.Mid = get_next_mid(ses->server); pSMB->hdr.Tid = ses->ipc_tid; pSMB->hdr.Uid = ses->Suid; if (ses->capabilities & CAP_STATUS32) pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS; if (ses->capabilities & CAP_DFS) pSMB->hdr.Flags2 |= SMBFLG2_DFS; if (ses->capabilities & CAP_UNICODE) { pSMB->hdr.Flags2 |= SMBFLG2_UNICODE; name_len = cifsConvertToUTF16((__le16 *) pSMB->RequestFileName, search_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(search_name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->RequestFileName, search_name, name_len); } if (ses->server && ses->server->sign) pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->hdr.Uid = ses->Suid; params = 2 /* level */ + name_len /*includes null */ ; pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = 0; /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_GET_DFS_REFERRAL); byte_count = params + 3 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->MaxReferralLevel = cpu_to_le16(3); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in GetDFSRefer = %d\n", rc); goto GetDFSRefExit; } rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB Also check if enough total bytes returned? */ if (rc || get_bcc(&pSMBr->hdr) < 17) { rc = -EIO; /* bad smb */ goto GetDFSRefExit; } cifs_dbg(FYI, "Decoding GetDFSRefer response BCC: %d Offset %d\n", get_bcc(&pSMBr->hdr), le16_to_cpu(pSMBr->t2.DataOffset)); /* parse returned result into more usable form */ rc = parse_DFS_referrals(pSMBr, num_of_nodes, target_nodes, nls_codepage, remap, search_name); GetDFSRefExit: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto getDFSRetry; return rc; } /* Query File System Info such as free space to old servers such as Win 9x */ int SMBOldQFSInfo(const unsigned int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_ALLOC_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "OldQFSInfo\n"); oldQFSInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_INFO_ALLOCATION); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QFSInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 18) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); cifs_dbg(FYI, "qfsinf resp BCC: %d Offset %d\n", get_bcc(&pSMBr->hdr), data_offset); response_data = (FILE_SYSTEM_ALLOC_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le16_to_cpu(response_data->BytesPerSector) * le32_to_cpu(response_data-> SectorsPerAllocationUnit); FSData->f_blocks = le32_to_cpu(response_data->TotalAllocationUnits); FSData->f_bfree = FSData->f_bavail = le32_to_cpu(response_data->FreeAllocationUnits); cifs_dbg(FYI, "Blocks: %lld Free: %lld Block size %ld\n", (unsigned long long)FSData->f_blocks, (unsigned long long)FSData->f_bfree, FSData->f_bsize); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto oldQFSInfoRetry; return rc; } int CIFSSMBQFSInfo(const unsigned int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "In QFSInfo\n"); QFSInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_SIZE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QFSInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 24) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le32_to_cpu(response_data->BytesPerSector) * le32_to_cpu(response_data-> SectorsPerAllocationUnit); FSData->f_blocks = le64_to_cpu(response_data->TotalAllocationUnits); FSData->f_bfree = FSData->f_bavail = le64_to_cpu(response_data->FreeAllocationUnits); cifs_dbg(FYI, "Blocks: %lld Free: %lld Block size %ld\n", (unsigned long long)FSData->f_blocks, (unsigned long long)FSData->f_bfree, FSData->f_bsize); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSInfoRetry; return rc; } int CIFSSMBQFSAttributeInfo(const unsigned int xid, struct cifs_tcon *tcon) { /* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_ATTRIBUTE_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "In QFSAttributeInfo\n"); QFSAttributeRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_ATTRIBUTE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(VFS, "Send error in QFSAttributeInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { /* BB also check if enough bytes returned */ rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_ATTRIBUTE_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsAttrInfo, response_data, sizeof(FILE_SYSTEM_ATTRIBUTE_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSAttributeRetry; return rc; } int CIFSSMBQFSDeviceInfo(const unsigned int xid, struct cifs_tcon *tcon) { /* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_DEVICE_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "In QFSDeviceInfo\n"); QFSDeviceRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_DEVICE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QFSDeviceInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_SYSTEM_DEVICE_INFO)) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_DEVICE_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsDevInfo, response_data, sizeof(FILE_SYSTEM_DEVICE_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSDeviceRetry; return rc; } int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon) { /* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_UNIX_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "In QFSUnixInfo\n"); QFSUnixRetry: rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof(struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_CIFS_UNIX_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(VFS, "Send error in QFSUnixInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_UNIX_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsUnixInfo, response_data, sizeof(FILE_SYSTEM_UNIX_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSUnixRetry; return rc; } int CIFSSMBSetFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon, __u64 cap) { /* level 0x200 SMB_SET_CIFS_UNIX_INFO */ TRANSACTION2_SETFSI_REQ *pSMB = NULL; TRANSACTION2_SETFSI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cifs_dbg(FYI, "In SETFSUnixInfo\n"); SETFSUnixRetry: /* BB switch to small buf init to save memory */ rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 4; /* 2 bytes zero followed by info level. */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum) - 4; offset = param_offset + params; pSMB->MaxParameterCount = cpu_to_le16(4); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FS_INFORMATION); byte_count = 1 /* pad */ + params + 12; pSMB->DataCount = cpu_to_le16(12); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); /* Params. */ pSMB->FileNum = 0; pSMB->InformationLevel = cpu_to_le16(SMB_SET_CIFS_UNIX_INFO); /* Data. */ pSMB->ClientUnixMajor = cpu_to_le16(CIFS_UNIX_MAJOR_VERSION); pSMB->ClientUnixMinor = cpu_to_le16(CIFS_UNIX_MINOR_VERSION); pSMB->ClientUnixCap = cpu_to_le64(cap); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(VFS, "Send error in SETFSUnixInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) rc = -EIO; /* bad smb */ } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SETFSUnixRetry; return rc; } int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_POSIX_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cifs_dbg(FYI, "In QFSPosixInfo\n"); QFSPosixRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof(struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_FS_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QFSUnixInfo = %d\n", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_POSIX_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le32_to_cpu(response_data->BlockSize); FSData->f_blocks = le64_to_cpu(response_data->TotalBlocks); FSData->f_bfree = le64_to_cpu(response_data->BlocksAvail); if (response_data->UserBlocksAvail == cpu_to_le64(-1)) { FSData->f_bavail = FSData->f_bfree; } else { FSData->f_bavail = le64_to_cpu(response_data->UserBlocksAvail); } if (response_data->TotalFileNodes != cpu_to_le64(-1)) FSData->f_files = le64_to_cpu(response_data->TotalFileNodes); if (response_data->FreeFileNodes != cpu_to_le64(-1)) FSData->f_ffree = le64_to_cpu(response_data->FreeFileNodes); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSPosixRetry; return rc; } /* * We can not use write of zero bytes trick to set file size due to need for * large file support. Also note that this SetPathInfo is preferred to * SetFileInfo based method in next routine which is only needed to work around * a sharing violation bugin Samba which this routine can run into. */ int CIFSSMBSetEOF(const unsigned int xid, struct cifs_tcon *tcon, const char *file_name, __u64 size, struct cifs_sb_info *cifs_sb, bool set_allocation) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; struct file_end_of_file_info *parm_data; int name_len; int rc = 0; int bytes_returned = 0; int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; __u16 params, byte_count, data_count, param_offset, offset; cifs_dbg(FYI, "In SetEOF\n"); SetEOFRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name, PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(file_name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, file_name, name_len); } params = 6 + name_len; data_count = sizeof(struct file_end_of_file_info); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(4100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; if (set_allocation) { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO); } else /* Set File Size */ { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO); } parm_data = (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) + offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + data_count; pSMB->DataCount = cpu_to_le16(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); parm_data->FileSize = cpu_to_le64(size); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "SetPathInfo (file size) returned %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetEOFRetry; return rc; } int CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, __u64 size, bool set_allocation) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct file_end_of_file_info *parm_data; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cifs_dbg(FYI, "SetFileSize (via SetFileInfo) %lld\n", (long long)size); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)cfile->pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(cfile->pid >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; count = sizeof(struct file_end_of_file_info); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); parm_data = (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) + offset); pSMB->DataOffset = cpu_to_le16(offset); parm_data->FileSize = cpu_to_le64(size); pSMB->Fid = cfile->fid.netfid; if (set_allocation) { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO); } else /* Set File Size */ { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO); } pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); if (rc) { cifs_dbg(FYI, "Send error in SetFileInfo (SetFileSize) = %d\n", rc); } /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } /* Some legacy servers such as NT4 require that the file times be set on an open handle, rather than by pathname - this is awkward due to potential access conflicts on the open, but it is unavoidable for these old servers since the only other choice is to go from 100 nanosecond DCE time and resort to the original setpathinfo level which takes the ancient DOS time format with 2 second granularity */ int CIFSSMBSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon, const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cifs_dbg(FYI, "Set Times (via SetFileInfo)\n"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *)pSMB + offsetof(struct smb_hdr, Protocol) + offset; count = sizeof(FILE_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); if (rc) cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBSetFileDisposition(const unsigned int xid, struct cifs_tcon *tcon, bool delete_file, __u16 fid, __u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cifs_dbg(FYI, "Set File Disposition (via SetFileInfo)\n"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; count = 1; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_DISPOSITION_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); *data_offset = delete_file ? 1 : 0; rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); if (rc) cifs_dbg(FYI, "Send error in SetFileDisposition = %d\n", rc); return rc; } int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const FILE_BASIC_INFO *data, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; char *data_offset; __u16 params, param_offset, offset, byte_count, count; cifs_dbg(FYI, "In SetTimes\n"); SetTimesRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; count = sizeof(FILE_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "SetPathInfo (times) returned %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetTimesRetry; return rc; } /* Can not be used to set time stamps yet (due to old DOS time format) */ /* Can be used to set attributes */ #if 0 /* Possibly not needed - since it turns out that strangely NT4 has a bug handling it anyway and NT4 was what we thought it would be needed for Do not delete it until we prove whether needed for Win9x though */ int CIFSSMBSetAttrLegacy(unsigned int xid, struct cifs_tcon *tcon, char *fileName, __u16 dos_attrs, const struct nls_table *nls_codepage) { SETATTR_REQ *pSMB = NULL; SETATTR_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; cifs_dbg(FYI, "In SetAttrLegacy\n"); SetAttrLgcyRetry: rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = ConvertToUTF16((__le16 *) pSMB->fileName, fileName, PATH_MAX, nls_codepage); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->fileName, fileName, name_len); } pSMB->attr = cpu_to_le16(dos_attrs); pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "Error in LegacySetAttr = %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetAttrLgcyRetry; return rc; } #endif /* temporarily unneeded SetAttr legacy function */ static void cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset, const struct cifs_unix_set_info_args *args) { u64 uid = NO_CHANGE_64, gid = NO_CHANGE_64; u64 mode = args->mode; if (uid_valid(args->uid)) uid = from_kuid(&init_user_ns, args->uid); if (gid_valid(args->gid)) gid = from_kgid(&init_user_ns, args->gid); /* * Samba server ignores set of file size to zero due to bugs in some * older clients, but we should be precise - we use SetFileSize to * set file size and do not want to truncate file size to zero * accidentally as happened on one Samba server beta by putting * zero instead of -1 here */ data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64); data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64); data_offset->LastStatusChange = cpu_to_le64(args->ctime); data_offset->LastAccessTime = cpu_to_le64(args->atime); data_offset->LastModificationTime = cpu_to_le64(args->mtime); data_offset->Uid = cpu_to_le64(uid); data_offset->Gid = cpu_to_le64(gid); /* better to leave device as zero when it is */ data_offset->DevMajor = cpu_to_le64(MAJOR(args->device)); data_offset->DevMinor = cpu_to_le64(MINOR(args->device)); data_offset->Permissions = cpu_to_le64(mode); if (S_ISREG(mode)) data_offset->Type = cpu_to_le32(UNIX_FILE); else if (S_ISDIR(mode)) data_offset->Type = cpu_to_le32(UNIX_DIR); else if (S_ISLNK(mode)) data_offset->Type = cpu_to_le32(UNIX_SYMLINK); else if (S_ISCHR(mode)) data_offset->Type = cpu_to_le32(UNIX_CHARDEV); else if (S_ISBLK(mode)) data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV); else if (S_ISFIFO(mode)) data_offset->Type = cpu_to_le32(UNIX_FIFO); else if (S_ISSOCK(mode)) data_offset->Type = cpu_to_le32(UNIX_SOCKET); } int CIFSSMBUnixSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon, const struct cifs_unix_set_info_args *args, u16 fid, u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; int rc = 0; u16 params, param_offset, offset, byte_count, count; cifs_dbg(FYI, "Set Unix Info (via SetFileInfo)\n"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *)pSMB + offsetof(struct smb_hdr, Protocol) + offset; count = sizeof(FILE_UNIX_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); cifs_fill_unix_set_info((FILE_UNIX_BASIC_INFO *)data_offset, args); rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); if (rc) cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBUnixSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const char *file_name, const struct cifs_unix_set_info_args *args, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; FILE_UNIX_BASIC_INFO *data_offset; __u16 params, param_offset, offset, count, byte_count; cifs_dbg(FYI, "In SetUID/GID/Mode\n"); setPermsRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(file_name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, file_name, name_len); } params = 6 + name_len; count = sizeof(FILE_UNIX_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (FILE_UNIX_BASIC_INFO *) ((char *) &pSMB->hdr.Protocol + offset); memset(data_offset, 0, count); pSMB->DataOffset = cpu_to_le16(offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->ParameterCount = cpu_to_le16(params); pSMB->DataCount = cpu_to_le16(count); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->TotalDataCount = pSMB->DataCount; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); cifs_fill_unix_set_info(data_offset, args); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "SetPathInfo (perms) returned %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setPermsRetry; return rc; } #ifdef CONFIG_CIFS_XATTR /* * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common * function used by listxattr and getxattr type calls. When ea_name is set, * it looks for that attribute name and stuffs that value into the EAData * buffer. When ea_name is NULL, it stuffs a list of attribute names into the * buffer. In both cases, the return value is either the length of the * resulting data or a negative error code. If EAData is a NULL pointer then * the data isn't copied to it, but the length is returned. */ ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, const unsigned char *ea_name, char *EAData, size_t buf_size, const struct nls_table *nls_codepage, int remap) { /* BB assumes one setup word */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int list_len; struct fealist *ea_response_data; struct fea *temp_fea; char *temp_ptr; char *end_of_smb; __u16 params, byte_count, data_offset; unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0; cifs_dbg(FYI, "In Query All EAs path %s\n", searchName); QAllEAsRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { list_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); list_len++; /* trailing null */ list_len *= 2; } else { /* BB improve the check for buffer overruns BB */ list_len = strnlen(searchName, PATH_MAX); list_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, list_len); } params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cifs_dbg(FYI, "Send error in QueryAllEAs = %d\n", rc); goto QAllEAsOut; } /* BB also check enough total bytes returned */ /* BB we need to improve the validity checking of these trans2 responses */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 4) { rc = -EIO; /* bad smb */ goto QAllEAsOut; } /* check that length of list is not more than bcc */ /* check that each entry does not go beyond length of list */ /* check that each element of each entry does not go beyond end of list */ /* validate_trans2_offsets() */ /* BB check if start of smb + data_offset > &bcc+ bcc */ data_offset = le16_to_cpu(pSMBr->t2.DataOffset); ea_response_data = (struct fealist *) (((char *) &pSMBr->hdr.Protocol) + data_offset); list_len = le32_to_cpu(ea_response_data->list_len); cifs_dbg(FYI, "ea length %d\n", list_len); if (list_len <= 8) { cifs_dbg(FYI, "empty EA list returned from server\n"); goto QAllEAsOut; } /* make sure list_len doesn't go past end of SMB */ end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr); if ((char *)ea_response_data + list_len > end_of_smb) { cifs_dbg(FYI, "EA list appears to go beyond SMB\n"); rc = -EIO; goto QAllEAsOut; } /* account for ea list len */ list_len -= 4; temp_fea = ea_response_data->list; temp_ptr = (char *)temp_fea; while (list_len > 0) { unsigned int name_len; __u16 value_len; list_len -= 4; temp_ptr += 4; /* make sure we can read name_len and value_len */ if (list_len < 0) { cifs_dbg(FYI, "EA entry goes beyond length of list\n"); rc = -EIO; goto QAllEAsOut; } name_len = temp_fea->name_len; value_len = le16_to_cpu(temp_fea->value_len); list_len -= name_len + 1 + value_len; if (list_len < 0) { cifs_dbg(FYI, "EA entry goes beyond length of list\n"); rc = -EIO; goto QAllEAsOut; } if (ea_name) { if (ea_name_len == name_len && memcmp(ea_name, temp_ptr, name_len) == 0) { temp_ptr += name_len + 1; rc = value_len; if (buf_size == 0) goto QAllEAsOut; if ((size_t)value_len > buf_size) { rc = -ERANGE; goto QAllEAsOut; } memcpy(EAData, temp_ptr, value_len); goto QAllEAsOut; } } else { /* account for prefix user. and trailing null */ rc += (5 + 1 + name_len); if (rc < (int) buf_size) { memcpy(EAData, "user.", 5); EAData += 5; memcpy(EAData, temp_ptr, name_len); EAData += name_len; /* null terminate name */ *EAData = 0; ++EAData; } else if (buf_size == 0) { /* skip copy - calc size only */ } else { /* stop before overrun buffer */ rc = -ERANGE; break; } } temp_ptr += name_len + 1 + value_len; temp_fea = (struct fea *)temp_ptr; } /* didn't find the named attribute */ if (ea_name) rc = -ENODATA; QAllEAsOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QAllEAsRetry; return (ssize_t)rc; } int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const char *ea_name, const void *ea_value, const __u16 ea_value_len, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; struct fealist *parm_data; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, byte_count, offset, count; cifs_dbg(FYI, "In SetEA\n"); SetEARetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; /* done calculating parms using name_len of file name, now use name_len to calculate length of ea name we are going to create in the inode xattrs */ if (ea_name == NULL) name_len = 0; else name_len = strnlen(ea_name, 255); count = sizeof(*parm_data) + ea_value_len + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_EA); parm_data = (struct fealist *) (((char *) &pSMB->hdr.Protocol) + offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); parm_data->list_len = cpu_to_le32(count); parm_data->list[0].EA_flags = 0; /* we checked above that name len is less than 255 */ parm_data->list[0].name_len = (__u8)name_len; /* EA names are always ASCII */ if (ea_name) strncpy(parm_data->list[0].name, ea_name, name_len); parm_data->list[0].name[name_len] = 0; parm_data->list[0].value_len = cpu_to_le16(ea_value_len); /* caller ensures that ea_value_len is less than 64K but we need to ensure that it fits within the smb */ /*BB add length check to see if it would fit in negotiated SMB buffer size BB */ /* if (ea_value_len > buffer_size - 512 (enough for header)) */ if (ea_value_len) memcpy(parm_data->list[0].name+name_len+1, ea_value, ea_value_len); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cifs_dbg(FYI, "SetPathInfo (EA) returned %d\n", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetEARetry; return rc; } #endif #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* BB unused temporarily */ /* * Years ago the kernel added a "dnotify" function for Samba server, * to allow network clients (such as Windows) to display updated * lists of files in directory listings automatically when * files are added by one user when another user has the * same directory open on their desktop. The Linux cifs kernel * client hooked into the kernel side of this interface for * the same reason, but ironically when the VFS moved from * "dnotify" to "inotify" it became harder to plug in Linux * network file system clients (the most obvious use case * for notify interfaces is when multiple users can update * the contents of the same directory - exactly what network * file systems can do) although the server (Samba) could * still use it. For the short term we leave the worker * function ifdeffed out (below) until inotify is fixed * in the VFS to make it easier to plug in network file * system clients. If inotify turns out to be permanently * incompatible for network fs clients, we could instead simply * expose this config flag by adding a future cifs (and smb2) notify ioctl. */ int CIFSSMBNotify(const unsigned int xid, struct cifs_tcon *tcon, const int notify_subdirs, const __u16 netfid, __u32 filter, struct file *pfile, int multishot, const struct nls_table *nls_codepage) { int rc = 0; struct smb_com_transaction_change_notify_req *pSMB = NULL; struct smb_com_ntransaction_change_notify_rsp *pSMBr = NULL; struct dir_notify_req *dnotify_req; int bytes_returned; cifs_dbg(FYI, "In CIFSSMBNotify for file handle %d\n", (int)netfid); rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->TotalParameterCount = 0 ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le32(2); pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00); pSMB->MaxSetupCount = 4; pSMB->Reserved = 0; pSMB->ParameterOffset = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 4; /* single byte does not need le conversion */ pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE); pSMB->ParameterCount = pSMB->TotalParameterCount; if (notify_subdirs) pSMB->WatchTree = 1; /* one byte - no le conversion needed */ pSMB->Reserved2 = 0; pSMB->CompletionFilter = cpu_to_le32(filter); pSMB->Fid = netfid; /* file handle always le */ pSMB->ByteCount = 0; rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_ASYNC_OP); if (rc) { cifs_dbg(FYI, "Error in Notify = %d\n", rc); } else { /* Add file to outstanding requests */ /* BB change to kmem cache alloc */ dnotify_req = kmalloc( sizeof(struct dir_notify_req), GFP_KERNEL); if (dnotify_req) { dnotify_req->Pid = pSMB->hdr.Pid; dnotify_req->PidHigh = pSMB->hdr.PidHigh; dnotify_req->Mid = pSMB->hdr.Mid; dnotify_req->Tid = pSMB->hdr.Tid; dnotify_req->Uid = pSMB->hdr.Uid; dnotify_req->netfid = netfid; dnotify_req->pfile = pfile; dnotify_req->filter = filter; dnotify_req->multishot = multishot; spin_lock(&GlobalMid_Lock); list_add_tail(&dnotify_req->lhead, &GlobalDnotifyReqList); spin_unlock(&GlobalMid_Lock); } else rc = -ENOMEM; } cifs_buf_release(pSMB); return rc; } #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
gpl-2.0
articu/linux
arch/mips/kernel/smp.c
225
11712
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) 2000, 2001 Kanoj Sarcar * Copyright (C) 2000, 2001 Ralf Baechle * Copyright (C) 2000, 2001 Silicon Graphics, Inc. * Copyright (C) 2000, 2001, 2003 Broadcom Corporation */ #include <linux/cache.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/threads.h> #include <linux/module.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/cpu.h> #include <linux/err.h> #include <linux/ftrace.h> #include <linux/atomic.h> #include <asm/cpu.h> #include <asm/processor.h> #include <asm/idle.h> #include <asm/r4k-timer.h> #include <asm/mmu_context.h> #include <asm/time.h> #include <asm/setup.h> cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ EXPORT_SYMBOL(__cpu_number_map); int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ EXPORT_SYMBOL(__cpu_logical_map); /* Number of TCs (or siblings in Intel speak) per CPU core */ int smp_num_siblings = 1; EXPORT_SYMBOL(smp_num_siblings); /* representing the TCs (or siblings in Intel speak) of each logical CPU */ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_sibling_map); /* representing the core map of multi-core chips of each logical CPU */ cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); /* * A logcal cpu mask containing only one VPE per core to * reduce the number of IPIs on large MT systems. */ cpumask_t cpu_foreign_map __read_mostly; EXPORT_SYMBOL(cpu_foreign_map); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; /* representing cpus for which core maps can be computed */ static cpumask_t cpu_core_setup_map; cpumask_t cpu_coherent_mask; static inline void set_cpu_sibling_map(int cpu) { int i; cpumask_set_cpu(cpu, &cpu_sibling_setup_map); if (smp_num_siblings > 1) { for_each_cpu(i, &cpu_sibling_setup_map) { if (cpu_data[cpu].package == cpu_data[i].package && cpu_data[cpu].core == cpu_data[i].core) { cpumask_set_cpu(i, &cpu_sibling_map[cpu]); cpumask_set_cpu(cpu, &cpu_sibling_map[i]); } } } else cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); } static inline void set_cpu_core_map(int cpu) { int i; cpumask_set_cpu(cpu, &cpu_core_setup_map); for_each_cpu(i, &cpu_core_setup_map) { if (cpu_data[cpu].package == cpu_data[i].package) { cpumask_set_cpu(i, &cpu_core_map[cpu]); cpumask_set_cpu(cpu, &cpu_core_map[i]); } } } /* * Calculate a new cpu_foreign_map mask whenever a * new cpu appears or disappears. */ static inline void calculate_cpu_foreign_map(void) { int i, k, core_present; cpumask_t temp_foreign_map; /* Re-calculate the mask */ for_each_online_cpu(i) { core_present = 0; for_each_cpu(k, &temp_foreign_map) if (cpu_data[i].package == cpu_data[k].package && cpu_data[i].core == cpu_data[k].core) core_present = 1; if (!core_present) cpumask_set_cpu(i, &temp_foreign_map); } cpumask_copy(&cpu_foreign_map, &temp_foreign_map); } struct plat_smp_ops *mp_ops; EXPORT_SYMBOL(mp_ops); void register_smp_ops(struct plat_smp_ops *ops) { if (mp_ops) printk(KERN_WARNING "Overriding previously set SMP ops\n"); mp_ops = ops; } /* * First C code run on the secondary CPUs after being started up by * the master. */ asmlinkage void start_secondary(void) { unsigned int cpu; cpu_probe(); per_cpu_trap_init(false); mips_clockevent_init(); mp_ops->init_secondary(); cpu_report(); /* * XXX parity protection should be folded in here when it's converted * to an option instead of something based on .cputype */ calibrate_delay(); preempt_disable(); cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); set_cpu_online(cpu, true); set_cpu_sibling_map(cpu); set_cpu_core_map(cpu); calculate_cpu_foreign_map(); cpumask_set_cpu(cpu, &cpu_callin_map); synchronise_count_slave(cpu); /* * irq will be enabled in ->smp_finish(), enabling it too early * is dangerous. */ WARN_ON_ONCE(!irqs_disabled()); mp_ops->smp_finish(); cpu_startup_entry(CPUHP_ONLINE); } static void stop_this_cpu(void *dummy) { /* * Remove this CPU. Be a bit slow here and * set the bits for every online CPU so we don't miss * any IPI whilst taking this VPE down. */ cpumask_copy(&cpu_foreign_map, cpu_online_mask); /* Make it visible to every other CPU */ smp_mb(); set_cpu_online(smp_processor_id(), false); calculate_cpu_foreign_map(); local_irq_disable(); while (1); } void smp_send_stop(void) { smp_call_function(stop_this_cpu, NULL, 0); } void __init smp_cpus_done(unsigned int max_cpus) { } /* called from main before smp_init() */ void __init smp_prepare_cpus(unsigned int max_cpus) { init_new_context(current, &init_mm); current_thread_info()->cpu = 0; mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); set_cpu_core_map(0); calculate_cpu_foreign_map(); #ifndef CONFIG_HOTPLUG_CPU init_cpu_present(cpu_possible_mask); #endif cpumask_copy(&cpu_coherent_mask, cpu_possible_mask); } /* preload SMP state for boot cpu */ void smp_prepare_boot_cpu(void) { set_cpu_possible(0, true); set_cpu_online(0, true); cpumask_set_cpu(0, &cpu_callin_map); } int __cpu_up(unsigned int cpu, struct task_struct *tidle) { mp_ops->boot_secondary(cpu, tidle); /* * Trust is futile. We should really have timeouts ... */ while (!cpumask_test_cpu(cpu, &cpu_callin_map)) { udelay(100); schedule(); } synchronise_count_master(cpu); return 0; } /* Not really SMP stuff ... */ int setup_profiling_timer(unsigned int multiplier) { return 0; } static void flush_tlb_all_ipi(void *info) { local_flush_tlb_all(); } void flush_tlb_all(void) { on_each_cpu(flush_tlb_all_ipi, NULL, 1); } static void flush_tlb_mm_ipi(void *mm) { local_flush_tlb_mm((struct mm_struct *)mm); } /* * Special Variant of smp_call_function for use by TLB functions: * * o No return value * o collapses to normal function call on UP kernels * o collapses to normal function call on systems with a single shared * primary cache. */ static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) { smp_call_function(func, info, 1); } static inline void smp_on_each_tlb(void (*func) (void *info), void *info) { preempt_disable(); smp_on_other_tlbs(func, info); func(info); preempt_enable(); } /* * The following tlb flush calls are invoked when old translations are * being torn down, or pte attributes are changing. For single threaded * address spaces, a new context is obtained on the current cpu, and tlb * context on other cpus are invalidated to force a new context allocation * at switch_mm time, should the mm ever be used on other cpus. For * multithreaded address spaces, intercpu interrupts have to be sent. * Another case where intercpu interrupts are required is when the target * mm might be active on another cpu (eg debuggers doing the flushes on * behalf of debugees, kswapd stealing pages from another process etc). * Kanoj 07/00. */ void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, mm); } else { unsigned int cpu; for_each_online_cpu(cpu) { if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; } } local_flush_tlb_mm(mm); preempt_enable(); } struct flush_tlb_data { struct vm_area_struct *vma; unsigned long addr1; unsigned long addr2; }; static void flush_tlb_range_ipi(void *info) { struct flush_tlb_data *fd = info; local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { struct flush_tlb_data fd = { .vma = vma, .addr1 = start, .addr2 = end, }; smp_on_other_tlbs(flush_tlb_range_ipi, &fd); } else { unsigned int cpu; for_each_online_cpu(cpu) { if (cpu != smp_processor_id() && cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; } } local_flush_tlb_range(vma, start, end); preempt_enable(); } static void flush_tlb_kernel_range_ipi(void *info) { struct flush_tlb_data *fd = info; local_flush_tlb_kernel_range(fd->addr1, fd->addr2); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { struct flush_tlb_data fd = { .addr1 = start, .addr2 = end, }; on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); } static void flush_tlb_page_ipi(void *info) { struct flush_tlb_data *fd = info; local_flush_tlb_page(fd->vma, fd->addr1); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { preempt_disable(); if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { struct flush_tlb_data fd = { .vma = vma, .addr1 = page, }; smp_on_other_tlbs(flush_tlb_page_ipi, &fd); } else { unsigned int cpu; for_each_online_cpu(cpu) { if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) cpu_context(cpu, vma->vm_mm) = 0; } } local_flush_tlb_page(vma, page); preempt_enable(); } static void flush_tlb_one_ipi(void *info) { unsigned long vaddr = (unsigned long) info; local_flush_tlb_one(vaddr); } void flush_tlb_one(unsigned long vaddr) { smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); } EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_one); #if defined(CONFIG_KEXEC) void (*dump_ipi_function_ptr)(void *) = NULL; void dump_send_ipi(void (*dump_ipi_callback)(void *)) { int i; int cpu = smp_processor_id(); dump_ipi_function_ptr = dump_ipi_callback; smp_mb(); for_each_online_cpu(i) if (i != cpu) mp_ops->send_ipi_single(i, SMP_DUMP); } EXPORT_SYMBOL(dump_send_ipi); #endif #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd); void tick_broadcast(const struct cpumask *mask) { atomic_t *count; struct call_single_data *csd; int cpu; for_each_cpu(cpu, mask) { count = &per_cpu(tick_broadcast_count, cpu); csd = &per_cpu(tick_broadcast_csd, cpu); if (atomic_inc_return(count) == 1) smp_call_function_single_async(cpu, csd); } } static void tick_broadcast_callee(void *info) { int cpu = smp_processor_id(); tick_receive_broadcast(); atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); } static int __init tick_broadcast_init(void) { struct call_single_data *csd; int cpu; for (cpu = 0; cpu < NR_CPUS; cpu++) { csd = &per_cpu(tick_broadcast_csd, cpu); csd->func = tick_broadcast_callee; } return 0; } early_initcall(tick_broadcast_init); #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
gpl-2.0
telf/TDR_patch_series_1
drivers/gpu/drm/radeon/r600_cs.c
993
79181
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include <linux/kernel.h> #include <drm/drmP.h> #include "radeon.h" #include "r600d.h" #include "r600_reg_safe.h" static int r600_nomm; extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size); struct r600_cs_track { /* configuration we miror so that we use same code btw kms/ums */ u32 group_size; u32 nbanks; u32 npipes; /* value we track */ u32 sq_config; u32 log_nsamples; u32 nsamples; u32 cb_color_base_last[8]; struct radeon_bo *cb_color_bo[8]; u64 cb_color_bo_mc[8]; u64 cb_color_bo_offset[8]; struct radeon_bo *cb_color_frag_bo[8]; u64 cb_color_frag_offset[8]; struct radeon_bo *cb_color_tile_bo[8]; u64 cb_color_tile_offset[8]; u32 cb_color_mask[8]; u32 cb_color_info[8]; u32 cb_color_view[8]; u32 cb_color_size_idx[8]; /* unused */ u32 cb_target_mask; u32 cb_shader_mask; /* unused */ bool is_resolve; u32 cb_color_size[8]; u32 vgt_strmout_en; u32 vgt_strmout_buffer_en; struct radeon_bo *vgt_strmout_bo[4]; u64 vgt_strmout_bo_mc[4]; /* unused */ u32 vgt_strmout_bo_offset[4]; u32 vgt_strmout_size[4]; u32 db_depth_control; u32 db_depth_info; u32 db_depth_size_idx; u32 db_depth_view; u32 db_depth_size; u32 db_offset; struct radeon_bo *db_bo; u64 db_bo_mc; bool sx_misc_kill_all_prims; bool cb_dirty; bool db_dirty; bool streamout_dirty; struct radeon_bo *htile_bo; u64 htile_offset; u32 htile_surface; }; #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 } #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 } #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } struct gpu_formats { unsigned blockwidth; unsigned blockheight; unsigned blocksize; unsigned valid_color; enum radeon_family min_family; }; static const struct gpu_formats color_formats_table[] = { /* 8 bit */ FMT_8_BIT(V_038004_COLOR_8, 1), FMT_8_BIT(V_038004_COLOR_4_4, 1), FMT_8_BIT(V_038004_COLOR_3_3_2, 1), FMT_8_BIT(V_038004_FMT_1, 0), /* 16-bit */ FMT_16_BIT(V_038004_COLOR_16, 1), FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1), FMT_16_BIT(V_038004_COLOR_8_8, 1), FMT_16_BIT(V_038004_COLOR_5_6_5, 1), FMT_16_BIT(V_038004_COLOR_6_5_5, 1), FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1), FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1), FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1), /* 24-bit */ FMT_24_BIT(V_038004_FMT_8_8_8), /* 32-bit */ FMT_32_BIT(V_038004_COLOR_32, 1), FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_16_16, 1), FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_8_24, 1), FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_24_8, 1), FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_10_11_11, 1), FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_11_11_10, 1), FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1), FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1), FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1), FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1), FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0), FMT_32_BIT(V_038004_FMT_32_AS_8, 0), FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0), /* 48-bit */ FMT_48_BIT(V_038004_FMT_16_16_16), FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT), /* 64-bit */ FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1), FMT_64_BIT(V_038004_COLOR_32_32, 1), FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1), FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1), FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1), FMT_96_BIT(V_038004_FMT_32_32_32), FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT), /* 128-bit */ FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1), FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1), [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 }, [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 }, /* block compressed formats */ [V_038004_FMT_BC1] = { 4, 4, 8, 0 }, [V_038004_FMT_BC2] = { 4, 4, 16, 0 }, [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, [V_038004_FMT_BC5] = { 4, 4, 16, 0}, [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ /* The other Evergreen formats */ [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, }; bool r600_fmt_is_valid_color(u32 format) { if (format >= ARRAY_SIZE(color_formats_table)) return false; if (color_formats_table[format].valid_color) return true; return false; } bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family) { if (format >= ARRAY_SIZE(color_formats_table)) return false; if (family < color_formats_table[format].min_family) return false; if (color_formats_table[format].blockwidth > 0) return true; return false; } int r600_fmt_get_blocksize(u32 format) { if (format >= ARRAY_SIZE(color_formats_table)) return 0; return color_formats_table[format].blocksize; } int r600_fmt_get_nblocksx(u32 format, u32 w) { unsigned bw; if (format >= ARRAY_SIZE(color_formats_table)) return 0; bw = color_formats_table[format].blockwidth; if (bw == 0) return 0; return (w + bw - 1) / bw; } int r600_fmt_get_nblocksy(u32 format, u32 h) { unsigned bh; if (format >= ARRAY_SIZE(color_formats_table)) return 0; bh = color_formats_table[format].blockheight; if (bh == 0) return 0; return (h + bh - 1) / bh; } struct array_mode_checker { int array_mode; u32 group_size; u32 nbanks; u32 npipes; u32 nsamples; u32 blocksize; }; /* returns alignment in pixels for pitch/height/depth and bytes for base */ static int r600_get_array_mode_alignment(struct array_mode_checker *values, u32 *pitch_align, u32 *height_align, u32 *depth_align, u64 *base_align) { u32 tile_width = 8; u32 tile_height = 8; u32 macro_tile_width = values->nbanks; u32 macro_tile_height = values->npipes; u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples; u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; switch (values->array_mode) { case ARRAY_LINEAR_GENERAL: /* technically tile_width/_height for pitch/height */ *pitch_align = 1; /* tile_width */ *height_align = 1; /* tile_height */ *depth_align = 1; *base_align = 1; break; case ARRAY_LINEAR_ALIGNED: *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); *height_align = 1; *depth_align = 1; *base_align = values->group_size; break; case ARRAY_1D_TILED_THIN1: *pitch_align = max((u32)tile_width, (u32)(values->group_size / (tile_height * values->blocksize * values->nsamples))); *height_align = tile_height; *depth_align = 1; *base_align = values->group_size; break; case ARRAY_2D_TILED_THIN1: *pitch_align = max((u32)macro_tile_width * tile_width, (u32)((values->group_size * values->nbanks) / (values->blocksize * values->nsamples * tile_width))); *height_align = macro_tile_height * tile_height; *depth_align = 1; *base_align = max(macro_tile_bytes, (*pitch_align) * values->blocksize * (*height_align) * values->nsamples); break; default: return -EINVAL; } return 0; } static void r600_cs_track_init(struct r600_cs_track *track) { int i; /* assume DX9 mode */ track->sq_config = DX9_CONSTS; for (i = 0; i < 8; i++) { track->cb_color_base_last[i] = 0; track->cb_color_size[i] = 0; track->cb_color_size_idx[i] = 0; track->cb_color_info[i] = 0; track->cb_color_view[i] = 0xFFFFFFFF; track->cb_color_bo[i] = NULL; track->cb_color_bo_offset[i] = 0xFFFFFFFF; track->cb_color_bo_mc[i] = 0xFFFFFFFF; track->cb_color_frag_bo[i] = NULL; track->cb_color_frag_offset[i] = 0xFFFFFFFF; track->cb_color_tile_bo[i] = NULL; track->cb_color_tile_offset[i] = 0xFFFFFFFF; track->cb_color_mask[i] = 0xFFFFFFFF; } track->is_resolve = false; track->nsamples = 16; track->log_nsamples = 4; track->cb_target_mask = 0xFFFFFFFF; track->cb_shader_mask = 0xFFFFFFFF; track->cb_dirty = true; track->db_bo = NULL; track->db_bo_mc = 0xFFFFFFFF; /* assume the biggest format and that htile is enabled */ track->db_depth_info = 7 | (1 << 25); track->db_depth_view = 0xFFFFC000; track->db_depth_size = 0xFFFFFFFF; track->db_depth_size_idx = 0; track->db_depth_control = 0xFFFFFFFF; track->db_dirty = true; track->htile_bo = NULL; track->htile_offset = 0xFFFFFFFF; track->htile_surface = 0; for (i = 0; i < 4; i++) { track->vgt_strmout_size[i] = 0; track->vgt_strmout_bo[i] = NULL; track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF; track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF; } track->streamout_dirty = true; track->sx_misc_kill_all_prims = false; } static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) { struct r600_cs_track *track = p->track; u32 slice_tile_max, size, tmp; u32 height, height_align, pitch, pitch_align, depth_align; u64 base_offset, base_align; struct array_mode_checker array_check; volatile u32 *ib = p->ib.ptr; unsigned array_mode; u32 format; /* When resolve is used, the second colorbuffer has always 1 sample. */ unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; format = G_0280A0_FORMAT(track->cb_color_info[i]); if (!r600_fmt_is_valid_color(format)) { dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", __func__, __LINE__, format, i, track->cb_color_info[i]); return -EINVAL; } /* pitch in pixels */ pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; slice_tile_max *= 64; height = slice_tile_max / pitch; if (height > 8192) height = 8192; array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; array_check.array_mode = array_mode; array_check.group_size = track->group_size; array_check.nbanks = track->nbanks; array_check.npipes = track->npipes; array_check.nsamples = nsamples; array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, track->cb_color_info[i]); return -EINVAL; } switch (array_mode) { case V_0280A0_ARRAY_LINEAR_GENERAL: break; case V_0280A0_ARRAY_LINEAR_ALIGNED: break; case V_0280A0_ARRAY_1D_TILED_THIN1: /* avoid breaking userspace */ if (height > 7) height &= ~0x7; break; case V_0280A0_ARRAY_2D_TILED_THIN1: break; default: dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, track->cb_color_info[i]); return -EINVAL; } if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", __func__, __LINE__, pitch, pitch_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(height, height_align)) { dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", __func__, __LINE__, height, height_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, base_offset, base_align, array_mode); return -EINVAL; } /* check offset */ tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format) * nsamples; switch (array_mode) { default: case V_0280A0_ARRAY_LINEAR_GENERAL: case V_0280A0_ARRAY_LINEAR_ALIGNED: tmp += track->cb_color_view[i] & 0xFF; break; case V_0280A0_ARRAY_1D_TILED_THIN1: case V_0280A0_ARRAY_2D_TILED_THIN1: tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp; break; } if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { /* the initial DDX does bad things with the CB size occasionally */ /* it rounds up height too far for slice tile max but the BO is smaller */ /* r600c,g also seem to flush at bad times in some apps resulting in * bogus values here. So for linear just allow anything to avoid breaking * broken userspace. */ } else { dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n", __func__, i, array_mode, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]), pitch, height, r600_fmt_get_nblocksx(format, pitch), r600_fmt_get_nblocksy(format, height), r600_fmt_get_blocksize(format)); return -EINVAL; } } /* limit max tile */ tmp = (height * pitch) >> 6; if (tmp < slice_tile_max) slice_tile_max = tmp; tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); ib[track->cb_color_size_idx[i]] = tmp; /* FMASK/CMASK */ switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) { case V_0280A0_TILE_DISABLE: break; case V_0280A0_FRAG_ENABLE: if (track->nsamples > 1) { uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]); /* the tile size is 8x8, but the size is in units of bits. * for bytes, do just * 8. */ uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1); if (bytes + track->cb_color_frag_offset[i] > radeon_bo_size(track->cb_color_frag_bo[i])) { dev_warn(p->dev, "%s FMASK_TILE_MAX too large " "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", __func__, tile_max, bytes, track->cb_color_frag_offset[i], radeon_bo_size(track->cb_color_frag_bo[i])); return -EINVAL; } } /* fall through */ case V_0280A0_CLEAR_ENABLE: { uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); /* One block = 128x128 pixels, one 8x8 tile has 4 bits.. * (128*128) / (8*8) / 2 = 128 bytes per block. */ uint32_t bytes = (block_max + 1) * 128; if (bytes + track->cb_color_tile_offset[i] > radeon_bo_size(track->cb_color_tile_bo[i])) { dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", __func__, block_max, bytes, track->cb_color_tile_offset[i], radeon_bo_size(track->cb_color_tile_bo[i])); return -EINVAL; } break; } default: dev_warn(p->dev, "%s invalid tile mode\n", __func__); return -EINVAL; } return 0; } static int r600_cs_track_validate_db(struct radeon_cs_parser *p) { struct r600_cs_track *track = p->track; u32 nviews, bpe, ntiles, size, slice_tile_max, tmp; u32 height_align, pitch_align, depth_align; u32 pitch = 8192; u32 height = 8192; u64 base_offset, base_align; struct array_mode_checker array_check; int array_mode; volatile u32 *ib = p->ib.ptr; if (track->db_bo == NULL) { dev_warn(p->dev, "z/stencil with no depth buffer\n"); return -EINVAL; } switch (G_028010_FORMAT(track->db_depth_info)) { case V_028010_DEPTH_16: bpe = 2; break; case V_028010_DEPTH_X8_24: case V_028010_DEPTH_8_24: case V_028010_DEPTH_X8_24_FLOAT: case V_028010_DEPTH_8_24_FLOAT: case V_028010_DEPTH_32_FLOAT: bpe = 4; break; case V_028010_DEPTH_X24_8_32_FLOAT: bpe = 8; break; default: dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); return -EINVAL; } if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { if (!track->db_depth_size_idx) { dev_warn(p->dev, "z/stencil buffer size not set\n"); return -EINVAL; } tmp = radeon_bo_size(track->db_bo) - track->db_offset; tmp = (tmp / bpe) >> 6; if (!tmp) { dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", track->db_depth_size, bpe, track->db_offset, radeon_bo_size(track->db_bo)); return -EINVAL; } ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); } else { size = radeon_bo_size(track->db_bo); /* pitch in pixels */ pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; slice_tile_max *= 64; height = slice_tile_max / pitch; if (height > 8192) height = 8192; base_offset = track->db_bo_mc + track->db_offset; array_mode = G_028010_ARRAY_MODE(track->db_depth_info); array_check.array_mode = array_mode; array_check.group_size = track->group_size; array_check.nbanks = track->nbanks; array_check.npipes = track->npipes; array_check.nsamples = track->nsamples; array_check.blocksize = bpe; if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, G_028010_ARRAY_MODE(track->db_depth_info), track->db_depth_info); return -EINVAL; } switch (array_mode) { case V_028010_ARRAY_1D_TILED_THIN1: /* don't break userspace */ height &= ~0x7; break; case V_028010_ARRAY_2D_TILED_THIN1: break; default: dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, G_028010_ARRAY_MODE(track->db_depth_info), track->db_depth_info); return -EINVAL; } if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", __func__, __LINE__, pitch, pitch_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(height, height_align)) { dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", __func__, __LINE__, height, height_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__, base_offset, base_align, array_mode); return -EINVAL; } ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; tmp = ntiles * bpe * 64 * nviews * track->nsamples; if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", array_mode, track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, radeon_bo_size(track->db_bo)); return -EINVAL; } } /* hyperz */ if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { unsigned long size; unsigned nbx, nby; if (track->htile_bo == NULL) { dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", __func__, __LINE__, track->db_depth_info); return -EINVAL; } if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", __func__, __LINE__, track->db_depth_size); return -EINVAL; } nbx = pitch; nby = height; if (G_028D24_LINEAR(track->htile_surface)) { /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */ nbx = round_up(nbx, 16 * 8); /* nby is npipes htiles aligned == npipes * 8 pixel aligned */ nby = round_up(nby, track->npipes * 8); } else { /* always assume 8x8 htile */ /* align is htile align * 8, htile align vary according to * number of pipe and tile width and nby */ switch (track->npipes) { case 8: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = round_up(nbx, 64 * 8); nby = round_up(nby, 64 * 8); break; case 4: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = round_up(nbx, 64 * 8); nby = round_up(nby, 32 * 8); break; case 2: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = round_up(nbx, 32 * 8); nby = round_up(nby, 32 * 8); break; case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = round_up(nbx, 32 * 8); nby = round_up(nby, 16 * 8); break; default: dev_warn(p->dev, "%s:%d invalid num pipes %d\n", __func__, __LINE__, track->npipes); return -EINVAL; } } /* compute number of htile */ nbx = nbx >> 3; nby = nby >> 3; /* size must be aligned on npipes * 2K boundary */ size = roundup(nbx * nby * 4, track->npipes * (2 << 10)); size += track->htile_offset; if (size > radeon_bo_size(track->htile_bo)) { dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", __func__, __LINE__, radeon_bo_size(track->htile_bo), size, nbx, nby); return -EINVAL; } } track->db_dirty = false; return 0; } static int r600_cs_track_check(struct radeon_cs_parser *p) { struct r600_cs_track *track = p->track; u32 tmp; int r, i; /* on legacy kernel we don't perform advanced check */ if (p->rdev == NULL) return 0; /* check streamout */ if (track->streamout_dirty && track->vgt_strmout_en) { for (i = 0; i < 4; i++) { if (track->vgt_strmout_buffer_en & (1 << i)) { if (track->vgt_strmout_bo[i]) { u64 offset = (u64)track->vgt_strmout_bo_offset[i] + (u64)track->vgt_strmout_size[i]; if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", i, offset, radeon_bo_size(track->vgt_strmout_bo[i])); return -EINVAL; } } else { dev_warn(p->dev, "No buffer for streamout %d\n", i); return -EINVAL; } } } track->streamout_dirty = false; } if (track->sx_misc_kill_all_prims) return 0; /* check that we have a cb for each enabled target, we don't check * shader_mask because it seems mesa isn't always setting it :( */ if (track->cb_dirty) { tmp = track->cb_target_mask; /* We must check both colorbuffers for RESOLVE. */ if (track->is_resolve) { tmp |= 0xff; } for (i = 0; i < 8; i++) { u32 format = G_0280A0_FORMAT(track->cb_color_info[i]); if (format != V_0280A0_COLOR_INVALID && (tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ if (track->cb_color_bo[i] == NULL) { dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); return -EINVAL; } /* perform rewrite of CB_COLOR[0-7]_SIZE */ r = r600_cs_track_validate_cb(p, i); if (r) return r; } } track->cb_dirty = false; } /* Check depth buffer */ if (track->db_dirty && G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && (G_028800_STENCIL_ENABLE(track->db_depth_control) || G_028800_Z_ENABLE(track->db_depth_control))) { r = r600_cs_track_validate_db(p); if (r) return r; } return 0; } /** * r600_cs_packet_parse_vline() - parse userspace VLINE packet * @parser: parser structure holding parsing context. * * This is an R600-specific function for parsing VLINE packets. * Real work is done by r600_cs_common_vline_parse function. * Here we just set up ASIC-specific register table and call * the common implementation function. */ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) { static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END, AVIVO_D2MODE_VLINE_START_END}; static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS, AVIVO_D2MODE_VLINE_STATUS}; return r600_cs_common_vline_parse(p, vline_start_end, vline_status); } /** * r600_cs_common_vline_parse() - common vline parser * @parser: parser structure holding parsing context. * @vline_start_end: table of vline_start_end registers * @vline_status: table of vline_status registers * * Userspace sends a special sequence for VLINE waits. * PACKET0 - VLINE_START_END + value * PACKET3 - WAIT_REG_MEM poll vline status reg * RELOC (P3) - crtc_id in reloc. * * This function parses this and relocates the VLINE START END * and WAIT_REG_MEM packets to the correct crtc. * It also detects a switched off crtc and nulls out the * wait in that case. This function is common for all ASICs that * are R600 and newer. The parsing algorithm is the same, and only * differs in which registers are used. * * Caller is the ASIC-specific function which passes the parser * context and ASIC-specific register table */ int r600_cs_common_vline_parse(struct radeon_cs_parser *p, uint32_t *vline_start_end, uint32_t *vline_status) { struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; struct radeon_cs_packet p3reloc, wait_reg_mem; int crtc_id; int r; uint32_t header, h_idx, reg, wait_reg_mem_info; volatile uint32_t *ib; ib = p->ib.ptr; /* parse the WAIT_REG_MEM */ r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx); if (r) return r; /* check its a WAIT_REG_MEM */ if (wait_reg_mem.type != RADEON_PACKET_TYPE3 || wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); return -EINVAL; } wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); /* bit 4 is reg (0) or mem (1) */ if (wait_reg_mem_info & 0x10) { DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n"); return -EINVAL; } /* bit 8 is me (0) or pfp (1) */ if (wait_reg_mem_info & 0x100) { DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n"); return -EINVAL; } /* waiting for value to be equal */ if ((wait_reg_mem_info & 0x7) != 0x3) { DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); return -EINVAL; } if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) { DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); return -EINVAL; } if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) { DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); return -EINVAL; } /* jump over the NOP */ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); if (r) return r; h_idx = p->idx - 2; p->idx += wait_reg_mem.count + 2; p->idx += p3reloc.count + 2; header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); reg = R600_CP_PACKET0_GET_REG(header); crtc = drm_crtc_find(p->rdev->ddev, crtc_id); if (!crtc) { DRM_ERROR("cannot find crtc %d\n", crtc_id); return -ENOENT; } radeon_crtc = to_radeon_crtc(crtc); crtc_id = radeon_crtc->crtc_id; if (!crtc->enabled) { /* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ ib[h_idx + 2] = PACKET2(0); ib[h_idx + 3] = PACKET2(0); ib[h_idx + 4] = PACKET2(0); ib[h_idx + 5] = PACKET2(0); ib[h_idx + 6] = PACKET2(0); ib[h_idx + 7] = PACKET2(0); ib[h_idx + 8] = PACKET2(0); } else if (reg == vline_start_end[0]) { header &= ~R600_CP_PACKET0_REG_MASK; header |= vline_start_end[crtc_id] >> 2; ib[h_idx] = header; ib[h_idx + 4] = vline_status[crtc_id] >> 2; } else { DRM_ERROR("unknown crtc reloc\n"); return -EINVAL; } return 0; } static int r600_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg) { int r; switch (reg) { case AVIVO_D1MODE_VLINE_START_END: r = r600_cs_packet_parse_vline(p); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", idx, reg); return r; } break; default: printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", reg, idx); return -EINVAL; } return 0; } static int r600_cs_parse_packet0(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { unsigned reg, i; unsigned idx; int r; idx = pkt->idx + 1; reg = pkt->reg; for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { r = r600_packet0_check(p, pkt, idx, reg); if (r) { return r; } } return 0; } /** * r600_cs_check_reg() - check if register is authorized or not * @parser: parser structure holding parsing context * @reg: register we are testing * @idx: index into the cs buffer * * This function will test against r600_reg_safe_bm and return 0 * if register is safe. If register is not flag as safe this function * will test it against a list of register needind special handling. */ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) { struct r600_cs_track *track = (struct r600_cs_track *)p->track; struct radeon_bo_list *reloc; u32 m, i, tmp, *ib; int r; i = (reg >> 7); if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } m = 1 << ((reg >> 2) & 31); if (!(r600_reg_safe_bm[i] & m)) return 0; ib = p->ib.ptr; switch (reg) { /* force following reg to 0 in an attempt to disable out buffer * which will need us to better understand how it works to perform * security check on it (Jerome) */ case R_0288A8_SQ_ESGS_RING_ITEMSIZE: case R_008C44_SQ_ESGS_RING_SIZE: case R_0288B0_SQ_ESTMP_RING_ITEMSIZE: case R_008C54_SQ_ESTMP_RING_SIZE: case R_0288C0_SQ_FBUF_RING_ITEMSIZE: case R_008C74_SQ_FBUF_RING_SIZE: case R_0288B4_SQ_GSTMP_RING_ITEMSIZE: case R_008C5C_SQ_GSTMP_RING_SIZE: case R_0288AC_SQ_GSVS_RING_ITEMSIZE: case R_008C4C_SQ_GSVS_RING_SIZE: case R_0288BC_SQ_PSTMP_RING_ITEMSIZE: case R_008C6C_SQ_PSTMP_RING_SIZE: case R_0288C4_SQ_REDUC_RING_ITEMSIZE: case R_008C7C_SQ_REDUC_RING_SIZE: case R_0288B8_SQ_VSTMP_RING_ITEMSIZE: case R_008C64_SQ_VSTMP_RING_SIZE: case R_0288C8_SQ_GS_VERT_ITEMSIZE: /* get value to populate the IB don't remove */ /*tmp =radeon_get_ib_value(p, idx); ib[idx] = 0;*/ break; case SQ_ESGS_RING_BASE: case SQ_GSVS_RING_BASE: case SQ_ESTMP_RING_BASE: case SQ_GSTMP_RING_BASE: case SQ_PSTMP_RING_BASE: case SQ_VSTMP_RING_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case SQ_CONFIG: track->sq_config = radeon_get_ib_value(p, idx); break; case R_028800_DB_DEPTH_CONTROL: track->db_depth_control = radeon_get_ib_value(p, idx); track->db_dirty = true; break; case R_028010_DB_DEPTH_INFO: if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && radeon_cs_packet_next_is_pkt3_nop(p)) { r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->db_depth_info = radeon_get_ib_value(p, idx); ib[idx] &= C_028010_ARRAY_MODE; track->db_depth_info &= C_028010_ARRAY_MODE; if (reloc->tiling_flags & RADEON_TILING_MACRO) { ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); } else { ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); } } else { track->db_depth_info = radeon_get_ib_value(p, idx); } track->db_dirty = true; break; case R_028004_DB_DEPTH_VIEW: track->db_depth_view = radeon_get_ib_value(p, idx); track->db_dirty = true; break; case R_028000_DB_DEPTH_SIZE: track->db_depth_size = radeon_get_ib_value(p, idx); track->db_depth_size_idx = idx; track->db_dirty = true; break; case R_028AB0_VGT_STRMOUT_EN: track->vgt_strmout_en = radeon_get_ib_value(p, idx); track->streamout_dirty = true; break; case R_028B20_VGT_STRMOUT_BUFFER_EN: track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); track->streamout_dirty = true; break; case VGT_STRMOUT_BUFFER_BASE_0: case VGT_STRMOUT_BUFFER_BASE_1: case VGT_STRMOUT_BUFFER_BASE_2: case VGT_STRMOUT_BUFFER_BASE_3: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->vgt_strmout_bo[tmp] = reloc->robj; track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset; track->streamout_dirty = true; break; case VGT_STRMOUT_BUFFER_SIZE_0: case VGT_STRMOUT_BUFFER_SIZE_1: case VGT_STRMOUT_BUFFER_SIZE_2: case VGT_STRMOUT_BUFFER_SIZE_3: tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16; /* size in register is DWs, convert to bytes */ track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4; track->streamout_dirty = true; break; case CP_COHER_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_warn(p->dev, "missing reloc for CP_COHER_BASE " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case R_028238_CB_TARGET_MASK: track->cb_target_mask = radeon_get_ib_value(p, idx); track->cb_dirty = true; break; case R_02823C_CB_SHADER_MASK: track->cb_shader_mask = radeon_get_ib_value(p, idx); break; case R_028C04_PA_SC_AA_CONFIG: tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); track->log_nsamples = tmp; track->nsamples = 1 << tmp; track->cb_dirty = true; break; case R_028808_CB_COLOR_CONTROL: tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; track->cb_dirty = true; break; case R_0280A0_CB_COLOR0_INFO: case R_0280A4_CB_COLOR1_INFO: case R_0280A8_CB_COLOR2_INFO: case R_0280AC_CB_COLOR3_INFO: case R_0280B0_CB_COLOR4_INFO: case R_0280B4_CB_COLOR5_INFO: case R_0280B8_CB_COLOR6_INFO: case R_0280BC_CB_COLOR7_INFO: if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && radeon_cs_packet_next_is_pkt3_nop(p)) { r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); if (reloc->tiling_flags & RADEON_TILING_MACRO) { ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); } else if (reloc->tiling_flags & RADEON_TILING_MICRO) { ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); } } else { tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); } track->cb_dirty = true; break; case R_028080_CB_COLOR0_VIEW: case R_028084_CB_COLOR1_VIEW: case R_028088_CB_COLOR2_VIEW: case R_02808C_CB_COLOR3_VIEW: case R_028090_CB_COLOR4_VIEW: case R_028094_CB_COLOR5_VIEW: case R_028098_CB_COLOR6_VIEW: case R_02809C_CB_COLOR7_VIEW: tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4; track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); track->cb_dirty = true; break; case R_028060_CB_COLOR0_SIZE: case R_028064_CB_COLOR1_SIZE: case R_028068_CB_COLOR2_SIZE: case R_02806C_CB_COLOR3_SIZE: case R_028070_CB_COLOR4_SIZE: case R_028074_CB_COLOR5_SIZE: case R_028078_CB_COLOR6_SIZE: case R_02807C_CB_COLOR7_SIZE: tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); track->cb_color_size_idx[tmp] = idx; track->cb_dirty = true; break; /* This register were added late, there is userspace * which does provide relocation for those but set * 0 offset. In order to avoid breaking old userspace * we detect this and set address to point to last * CB_COLOR0_BASE, note that if userspace doesn't set * CB_COLOR0_BASE before this register we will report * error. Old userspace always set CB_COLOR0_BASE * before any of this. */ case R_0280E0_CB_COLOR0_FRAG: case R_0280E4_CB_COLOR1_FRAG: case R_0280E8_CB_COLOR2_FRAG: case R_0280EC_CB_COLOR3_FRAG: case R_0280F0_CB_COLOR4_FRAG: case R_0280F4_CB_COLOR5_FRAG: case R_0280F8_CB_COLOR6_FRAG: case R_0280FC_CB_COLOR7_FRAG: tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4; if (!radeon_cs_packet_next_is_pkt3_nop(p)) { if (!track->cb_color_base_last[tmp]) { dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); return -EINVAL; } track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; ib[idx] = track->cb_color_base_last[tmp]; } else { r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } track->cb_color_frag_bo[tmp] = reloc->robj; track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); } if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { track->cb_dirty = true; } break; case R_0280C0_CB_COLOR0_TILE: case R_0280C4_CB_COLOR1_TILE: case R_0280C8_CB_COLOR2_TILE: case R_0280CC_CB_COLOR3_TILE: case R_0280D0_CB_COLOR4_TILE: case R_0280D4_CB_COLOR5_TILE: case R_0280D8_CB_COLOR6_TILE: case R_0280DC_CB_COLOR7_TILE: tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4; if (!radeon_cs_packet_next_is_pkt3_nop(p)) { if (!track->cb_color_base_last[tmp]) { dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); return -EINVAL; } track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; ib[idx] = track->cb_color_base_last[tmp]; } else { r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } track->cb_color_tile_bo[tmp] = reloc->robj; track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); } if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { track->cb_dirty = true; } break; case R_028100_CB_COLOR0_MASK: case R_028104_CB_COLOR1_MASK: case R_028108_CB_COLOR2_MASK: case R_02810C_CB_COLOR3_MASK: case R_028110_CB_COLOR4_MASK: case R_028114_CB_COLOR5_MASK: case R_028118_CB_COLOR6_MASK: case R_02811C_CB_COLOR7_MASK: tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { track->cb_dirty = true; } break; case CB_COLOR0_BASE: case CB_COLOR1_BASE: case CB_COLOR2_BASE: case CB_COLOR3_BASE: case CB_COLOR4_BASE: case CB_COLOR5_BASE: case CB_COLOR6_BASE: case CB_COLOR7_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } tmp = (reg - CB_COLOR0_BASE) / 4; track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->cb_color_base_last[tmp] = ib[idx]; track->cb_color_bo[tmp] = reloc->robj; track->cb_color_bo_mc[tmp] = reloc->gpu_offset; track->cb_dirty = true; break; case DB_DEPTH_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->db_offset = radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->db_bo = reloc->robj; track->db_bo_mc = reloc->gpu_offset; track->db_dirty = true; break; case DB_HTILE_DATA_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } track->htile_offset = radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->htile_bo = reloc->robj; track->db_dirty = true; break; case DB_HTILE_SURFACE: track->htile_surface = radeon_get_ib_value(p, idx); /* force 8x8 htile width and height */ ib[idx] |= 3; track->db_dirty = true; break; case SQ_PGM_START_FS: case SQ_PGM_START_ES: case SQ_PGM_START_VS: case SQ_PGM_START_GS: case SQ_PGM_START_PS: case SQ_ALU_CONST_CACHE_GS_0: case SQ_ALU_CONST_CACHE_GS_1: case SQ_ALU_CONST_CACHE_GS_2: case SQ_ALU_CONST_CACHE_GS_3: case SQ_ALU_CONST_CACHE_GS_4: case SQ_ALU_CONST_CACHE_GS_5: case SQ_ALU_CONST_CACHE_GS_6: case SQ_ALU_CONST_CACHE_GS_7: case SQ_ALU_CONST_CACHE_GS_8: case SQ_ALU_CONST_CACHE_GS_9: case SQ_ALU_CONST_CACHE_GS_10: case SQ_ALU_CONST_CACHE_GS_11: case SQ_ALU_CONST_CACHE_GS_12: case SQ_ALU_CONST_CACHE_GS_13: case SQ_ALU_CONST_CACHE_GS_14: case SQ_ALU_CONST_CACHE_GS_15: case SQ_ALU_CONST_CACHE_PS_0: case SQ_ALU_CONST_CACHE_PS_1: case SQ_ALU_CONST_CACHE_PS_2: case SQ_ALU_CONST_CACHE_PS_3: case SQ_ALU_CONST_CACHE_PS_4: case SQ_ALU_CONST_CACHE_PS_5: case SQ_ALU_CONST_CACHE_PS_6: case SQ_ALU_CONST_CACHE_PS_7: case SQ_ALU_CONST_CACHE_PS_8: case SQ_ALU_CONST_CACHE_PS_9: case SQ_ALU_CONST_CACHE_PS_10: case SQ_ALU_CONST_CACHE_PS_11: case SQ_ALU_CONST_CACHE_PS_12: case SQ_ALU_CONST_CACHE_PS_13: case SQ_ALU_CONST_CACHE_PS_14: case SQ_ALU_CONST_CACHE_PS_15: case SQ_ALU_CONST_CACHE_VS_0: case SQ_ALU_CONST_CACHE_VS_1: case SQ_ALU_CONST_CACHE_VS_2: case SQ_ALU_CONST_CACHE_VS_3: case SQ_ALU_CONST_CACHE_VS_4: case SQ_ALU_CONST_CACHE_VS_5: case SQ_ALU_CONST_CACHE_VS_6: case SQ_ALU_CONST_CACHE_VS_7: case SQ_ALU_CONST_CACHE_VS_8: case SQ_ALU_CONST_CACHE_VS_9: case SQ_ALU_CONST_CACHE_VS_10: case SQ_ALU_CONST_CACHE_VS_11: case SQ_ALU_CONST_CACHE_VS_12: case SQ_ALU_CONST_CACHE_VS_13: case SQ_ALU_CONST_CACHE_VS_14: case SQ_ALU_CONST_CACHE_VS_15: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case SX_MEMORY_EXPORT_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { dev_warn(p->dev, "bad SET_CONFIG_REG " "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case SX_MISC: track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; break; default: dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } return 0; } unsigned r600_mip_minify(unsigned size, unsigned level) { unsigned val; val = max(1U, size >> level); if (level > 0) val = roundup_pow_of_two(val); return val; } static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format, unsigned block_align, unsigned height_align, unsigned base_align, unsigned *l0_size, unsigned *mipmap_size) { unsigned offset, i, level; unsigned width, height, depth, size; unsigned blocksize; unsigned nbx, nby; unsigned nlevels = llevel - blevel + 1; *l0_size = -1; blocksize = r600_fmt_get_blocksize(format); w0 = r600_mip_minify(w0, 0); h0 = r600_mip_minify(h0, 0); d0 = r600_mip_minify(d0, 0); for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { width = r600_mip_minify(w0, i); nbx = r600_fmt_get_nblocksx(format, width); nbx = round_up(nbx, block_align); height = r600_mip_minify(h0, i); nby = r600_fmt_get_nblocksy(format, height); nby = round_up(nby, height_align); depth = r600_mip_minify(d0, i); size = nbx * nby * blocksize * nsamples; if (nfaces) size *= nfaces; else size *= depth; if (i == 0) *l0_size = size; if (i == 0 || i == 1) offset = round_up(offset, base_align); offset += size; } *mipmap_size = offset; if (llevel == 0) *mipmap_size = *l0_size; if (!blevel) *mipmap_size -= *l0_size; } /** * r600_check_texture_resource() - check if register is authorized or not * @p: parser structure holding parsing context * @idx: index into the cs buffer * @texture: texture's bo structure * @mipmap: mipmap's bo structure * * This function will check that the resource has valid field and that * the texture and mipmap bo object are big enough to cover this resource. */ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, struct radeon_bo *texture, struct radeon_bo *mipmap, u64 base_offset, u64 mip_offset, u32 tiling_flags) { struct r600_cs_track *track = p->track; u32 dim, nfaces, llevel, blevel, w0, h0, d0; u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; u32 height_align, pitch, pitch_align, depth_align; u32 barray, larray; u64 base_align; struct array_mode_checker array_check; u32 format; bool is_array; /* on legacy kernel we don't perform advanced check */ if (p->rdev == NULL) return 0; /* convert to bytes */ base_offset <<= 8; mip_offset <<= 8; word0 = radeon_get_ib_value(p, idx + 0); if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (tiling_flags & RADEON_TILING_MACRO) word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); else if (tiling_flags & RADEON_TILING_MICRO) word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); } word1 = radeon_get_ib_value(p, idx + 1); word2 = radeon_get_ib_value(p, idx + 2) << 8; word3 = radeon_get_ib_value(p, idx + 3) << 8; word4 = radeon_get_ib_value(p, idx + 4); word5 = radeon_get_ib_value(p, idx + 5); dim = G_038000_DIM(word0); w0 = G_038000_TEX_WIDTH(word0) + 1; pitch = (G_038000_PITCH(word0) + 1) * 8; h0 = G_038004_TEX_HEIGHT(word1) + 1; d0 = G_038004_TEX_DEPTH(word1); format = G_038004_DATA_FORMAT(word1); blevel = G_038010_BASE_LEVEL(word4); llevel = G_038014_LAST_LEVEL(word5); /* pitch in texels */ array_check.array_mode = G_038000_TILE_MODE(word0); array_check.group_size = track->group_size; array_check.nbanks = track->nbanks; array_check.npipes = track->npipes; array_check.nsamples = 1; array_check.blocksize = r600_fmt_get_blocksize(format); nfaces = 1; is_array = false; switch (dim) { case V_038000_SQ_TEX_DIM_1D: case V_038000_SQ_TEX_DIM_2D: case V_038000_SQ_TEX_DIM_3D: break; case V_038000_SQ_TEX_DIM_CUBEMAP: if (p->family >= CHIP_RV770) nfaces = 8; else nfaces = 6; break; case V_038000_SQ_TEX_DIM_1D_ARRAY: case V_038000_SQ_TEX_DIM_2D_ARRAY: is_array = true; break; case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: is_array = true; /* fall through */ case V_038000_SQ_TEX_DIM_2D_MSAA: array_check.nsamples = 1 << llevel; llevel = 0; break; default: dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); return -EINVAL; } if (!r600_fmt_is_valid_texture(format, p->family)) { dev_warn(p->dev, "%s:%d texture invalid format %d\n", __func__, __LINE__, format); return -EINVAL; } if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", __func__, __LINE__, G_038000_TILE_MODE(word0)); return -EINVAL; } /* XXX check height as well... */ if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (!IS_ALIGNED(mip_offset, base_align)) { dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (blevel > llevel) { dev_warn(p->dev, "texture blevel %d > llevel %d\n", blevel, llevel); } if (is_array) { barray = G_038014_BASE_ARRAY(word5); larray = G_038014_LAST_ARRAY(word5); nfaces = larray - barray + 1; } r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format, pitch_align, height_align, base_align, &l0_size, &mipmap_size); /* using get ib will give us the offset into the texture bo */ if ((l0_size + word2) > radeon_bo_size(texture)) { dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", w0, h0, pitch_align, height_align, array_check.array_mode, format, word2, l0_size, radeon_bo_size(texture)); dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); return -EINVAL; } /* using get ib will give us the offset into the mipmap bo */ if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ } return 0; } static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) { u32 m, i; i = (reg >> 7); if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return false; } m = 1 << ((reg >> 2) & 31); if (!(r600_reg_safe_bm[i] & m)) return true; dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return false; } static int r600_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { struct radeon_bo_list *reloc; struct r600_cs_track *track; volatile u32 *ib; unsigned idx; unsigned i; unsigned start_reg, end_reg, reg; int r; u32 idx_value; track = (struct r600_cs_track *)p->track; ib = p->ib.ptr; idx = pkt->idx + 1; idx_value = radeon_get_ib_value(p, idx); switch (pkt->opcode) { case PACKET3_SET_PREDICATION: { int pred_op; int tmp; uint64_t offset; if (pkt->count != 1) { DRM_ERROR("bad SET PREDICATION\n"); return -EINVAL; } tmp = radeon_get_ib_value(p, idx + 1); pred_op = (tmp >> 16) & 0x7; /* for the clear predicate operation */ if (pred_op == 0) return 0; if (pred_op > 2) { DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad SET PREDICATION\n"); return -EINVAL; } offset = reloc->gpu_offset + (idx_value & 0xfffffff0) + ((u64)(tmp & 0xff) << 32); ib[idx + 0] = offset; ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); } break; case PACKET3_START_3D_CMDBUF: if (p->family >= CHIP_RV770 || pkt->count) { DRM_ERROR("bad START_3D\n"); return -EINVAL; } break; case PACKET3_CONTEXT_CONTROL: if (pkt->count != 1) { DRM_ERROR("bad CONTEXT_CONTROL\n"); return -EINVAL; } break; case PACKET3_INDEX_TYPE: case PACKET3_NUM_INSTANCES: if (pkt->count) { DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n"); return -EINVAL; } break; case PACKET3_DRAW_INDEX: { uint64_t offset; if (pkt->count != 3) { DRM_ERROR("bad DRAW_INDEX\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad DRAW_INDEX\n"); return -EINVAL; } offset = reloc->gpu_offset + idx_value + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); ib[idx+0] = offset; ib[idx+1] = upper_32_bits(offset) & 0xff; r = r600_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; } case PACKET3_DRAW_INDEX_AUTO: if (pkt->count != 1) { DRM_ERROR("bad DRAW_INDEX_AUTO\n"); return -EINVAL; } r = r600_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DRAW_INDEX_IMMD_BE: case PACKET3_DRAW_INDEX_IMMD: if (pkt->count < 2) { DRM_ERROR("bad DRAW_INDEX_IMMD\n"); return -EINVAL; } r = r600_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_WAIT_REG_MEM: if (pkt->count != 5) { DRM_ERROR("bad WAIT_REG_MEM\n"); return -EINVAL; } /* bit 4 is reg (0) or mem (1) */ if (idx_value & 0x10) { uint64_t offset; r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad WAIT_REG_MEM\n"); return -EINVAL; } offset = reloc->gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); ib[idx+2] = upper_32_bits(offset) & 0xff; } else if (idx_value & 0x100) { DRM_ERROR("cannot use PFP on REG wait\n"); return -EINVAL; } break; case PACKET3_CP_DMA: { u32 command, size; u64 offset, tmp; if (pkt->count != 4) { DRM_ERROR("bad CP DMA\n"); return -EINVAL; } command = radeon_get_ib_value(p, idx+4); size = command & 0x1fffff; if (command & PACKET3_CP_DMA_CMD_SAS) { /* src address space is register */ DRM_ERROR("CP DMA SAS not supported\n"); return -EINVAL; } else { if (command & PACKET3_CP_DMA_CMD_SAIC) { DRM_ERROR("CP DMA SAIC only supported for registers\n"); return -EINVAL; } /* src address space is memory */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad CP DMA SRC\n"); return -EINVAL; } tmp = radeon_get_ib_value(p, idx) + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx] = offset; ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); } if (command & PACKET3_CP_DMA_CMD_DAS) { /* dst address space is register */ DRM_ERROR("CP DMA DAS not supported\n"); return -EINVAL; } else { /* dst address space is memory */ if (command & PACKET3_CP_DMA_CMD_DAIC) { DRM_ERROR("CP DMA DAIC only supported for registers\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad CP DMA DST\n"); return -EINVAL; } tmp = radeon_get_ib_value(p, idx+2) + ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx+2] = offset; ib[idx+3] = upper_32_bits(offset) & 0xff; } break; } case PACKET3_SURFACE_SYNC: if (pkt->count != 3) { DRM_ERROR("bad SURFACE_SYNC\n"); return -EINVAL; } /* 0xffffffff/0x0 is flush all cache flag */ if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || radeon_get_ib_value(p, idx + 2) != 0) { r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad SURFACE_SYNC\n"); return -EINVAL; } ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); } break; case PACKET3_EVENT_WRITE: if (pkt->count != 2 && pkt->count != 0) { DRM_ERROR("bad EVENT_WRITE\n"); return -EINVAL; } if (pkt->count) { uint64_t offset; r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad EVENT_WRITE\n"); return -EINVAL; } offset = reloc->gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = offset & 0xfffffff8; ib[idx+2] = upper_32_bits(offset) & 0xff; } break; case PACKET3_EVENT_WRITE_EOP: { uint64_t offset; if (pkt->count != 4) { DRM_ERROR("bad EVENT_WRITE_EOP\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad EVENT_WRITE\n"); return -EINVAL; } offset = reloc->gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); ib[idx+1] = offset & 0xfffffffc; ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); break; } case PACKET3_SET_CONFIG_REG: start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || (start_reg >= PACKET3_SET_CONFIG_REG_END) || (end_reg >= PACKET3_SET_CONFIG_REG_END)) { DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); r = r600_cs_check_reg(p, reg, idx+1+i); if (r) return r; } break; case PACKET3_SET_CONTEXT_REG: start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || (start_reg >= PACKET3_SET_CONTEXT_REG_END) || (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); r = r600_cs_check_reg(p, reg, idx+1+i); if (r) return r; } break; case PACKET3_SET_RESOURCE: if (pkt->count % 7) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || (start_reg >= PACKET3_SET_RESOURCE_END) || (end_reg >= PACKET3_SET_RESOURCE_END)) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } for (i = 0; i < (pkt->count / 7); i++) { struct radeon_bo *texture, *mipmap; u32 size, offset, base_offset, mip_offset; switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { case SQ_TEX_VTX_VALID_TEXTURE: /* tex base */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (reloc->tiling_flags & RADEON_TILING_MACRO) ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); else if (reloc->tiling_flags & RADEON_TILING_MICRO) ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); } texture = reloc->robj; /* tex mip base */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); mipmap = reloc->robj; r = r600_check_texture_resource(p, idx+(i*7)+1, texture, mipmap, base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), reloc->tiling_flags); if (r) return r; ib[idx+1+(i*7)+2] += base_offset; ib[idx+1+(i*7)+3] += mip_offset; break; case SQ_TEX_VTX_VALID_BUFFER: { uint64_t offset64; /* vtx base */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1+(i*7)+0); size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1; if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { /* force size to size of the buffer */ dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", size + offset, radeon_bo_size(reloc->robj)); ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; } offset64 = reloc->gpu_offset + offset; ib[idx+1+(i*8)+0] = offset64; ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | (upper_32_bits(offset64) & 0xff); break; } case SQ_TEX_VTX_INVALID_TEXTURE: case SQ_TEX_VTX_INVALID_BUFFER: default: DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } } break; case PACKET3_SET_ALU_CONST: if (track->sq_config & DX9_CONSTS) { start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || (start_reg >= PACKET3_SET_ALU_CONST_END) || (end_reg >= PACKET3_SET_ALU_CONST_END)) { DRM_ERROR("bad SET_ALU_CONST\n"); return -EINVAL; } } break; case PACKET3_SET_BOOL_CONST: start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || (start_reg >= PACKET3_SET_BOOL_CONST_END) || (end_reg >= PACKET3_SET_BOOL_CONST_END)) { DRM_ERROR("bad SET_BOOL_CONST\n"); return -EINVAL; } break; case PACKET3_SET_LOOP_CONST: start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || (start_reg >= PACKET3_SET_LOOP_CONST_END) || (end_reg >= PACKET3_SET_LOOP_CONST_END)) { DRM_ERROR("bad SET_LOOP_CONST\n"); return -EINVAL; } break; case PACKET3_SET_CTL_CONST: start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || (start_reg >= PACKET3_SET_CTL_CONST_END) || (end_reg >= PACKET3_SET_CTL_CONST_END)) { DRM_ERROR("bad SET_CTL_CONST\n"); return -EINVAL; } break; case PACKET3_SET_SAMPLER: if (pkt->count % 3) { DRM_ERROR("bad SET_SAMPLER\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; end_reg = 4 * pkt->count + start_reg - 4; if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || (start_reg >= PACKET3_SET_SAMPLER_END) || (end_reg >= PACKET3_SET_SAMPLER_END)) { DRM_ERROR("bad SET_SAMPLER\n"); return -EINVAL; } break; case PACKET3_STRMOUT_BASE_UPDATE: /* RS780 and RS880 also need this */ if (p->family < CHIP_RS780) { DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); return -EINVAL; } if (pkt->count != 1) { DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n"); return -EINVAL; } if (idx_value > 3) { DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n"); return -EINVAL; } { u64 offset; r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n"); return -EINVAL; } if (reloc->robj != track->vgt_strmout_bo[idx_value]) { DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1) << 8; if (offset != track->vgt_strmout_bo_offset[idx_value]) { DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", offset, track->vgt_strmout_bo_offset[idx_value]); return -EINVAL; } if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n", offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); } break; case PACKET3_SURFACE_BASE_UPDATE: if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); return -EINVAL; } if (pkt->count) { DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); return -EINVAL; } break; case PACKET3_STRMOUT_BUFFER_UPDATE: if (pkt->count != 4) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); return -EINVAL; } /* Updating memory at DST_ADDRESS. */ if (idx_value & 0x1) { u64 offset; r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; } /* Reading data from SRC_ADDRESS. */ if (((idx_value >> 1) & 0x3) == 2) { u64 offset; r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; ib[idx+3] = offset; ib[idx+4] = upper_32_bits(offset) & 0xff; } break; case PACKET3_MEM_WRITE: { u64 offset; if (pkt->count != 3) { DRM_ERROR("bad MEM_WRITE (invalid count)\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+0); offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; if (offset & 0x7) { DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); return -EINVAL; } if ((offset + 8) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; ib[idx+0] = offset; ib[idx+1] = upper_32_bits(offset) & 0xff; break; } case PACKET3_COPY_DW: if (pkt->count != 4) { DRM_ERROR("bad COPY_DW (invalid count)\n"); return -EINVAL; } if (idx_value & 0x1) { u64 offset; /* SRC is memory. */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad COPY_DW (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; } else { /* SRC is a reg. */ reg = radeon_get_ib_value(p, idx+1) << 2; if (!r600_is_safe_reg(p, reg, idx+1)) return -EINVAL; } if (idx_value & 0x2) { u64 offset; /* DST is memory. */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; ib[idx+3] = offset; ib[idx+4] = upper_32_bits(offset) & 0xff; } else { /* DST is a reg. */ reg = radeon_get_ib_value(p, idx+3) << 2; if (!r600_is_safe_reg(p, reg, idx+3)) return -EINVAL; } break; case PACKET3_NOP: break; default: DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); return -EINVAL; } return 0; } int r600_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; struct r600_cs_track *track; int r; if (p->track == NULL) { /* initialize tracker, we are in kms */ track = kzalloc(sizeof(*track), GFP_KERNEL); if (track == NULL) return -ENOMEM; r600_cs_track_init(track); if (p->rdev->family < CHIP_RV770) { track->npipes = p->rdev->config.r600.tiling_npipes; track->nbanks = p->rdev->config.r600.tiling_nbanks; track->group_size = p->rdev->config.r600.tiling_group_size; } else if (p->rdev->family <= CHIP_RV740) { track->npipes = p->rdev->config.rv770.tiling_npipes; track->nbanks = p->rdev->config.rv770.tiling_nbanks; track->group_size = p->rdev->config.rv770.tiling_group_size; } p->track = track; } do { r = radeon_cs_packet_parse(p, &pkt, p->idx); if (r) { kfree(p->track); p->track = NULL; return r; } p->idx += pkt.count + 2; switch (pkt.type) { case RADEON_PACKET_TYPE0: r = r600_cs_parse_packet0(p, &pkt); break; case RADEON_PACKET_TYPE2: break; case RADEON_PACKET_TYPE3: r = r600_packet3_check(p, &pkt); break; default: DRM_ERROR("Unknown packet type %d !\n", pkt.type); kfree(p->track); p->track = NULL; return -EINVAL; } if (r) { kfree(p->track); p->track = NULL; return r; } } while (p->idx < p->chunk_ib->length_dw); #if 0 for (r = 0; r < p->ib.length_dw; r++) { printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); mdelay(1); } #endif kfree(p->track); p->track = NULL; return 0; } #ifdef CONFIG_DRM_RADEON_UMS /** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. * @error: error number * * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; kfree(parser->relocs); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); kfree(parser->chunks_array); } static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) { if (p->chunk_relocs == NULL) { return 0; } p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL); if (p->relocs == NULL) { return -ENOMEM; } return 0; } int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, unsigned family, u32 *ib, int *l) { struct radeon_cs_parser parser; struct radeon_cs_chunk *ib_chunk; struct r600_cs_track *track; int r; /* initialize tracker */ track = kzalloc(sizeof(*track), GFP_KERNEL); if (track == NULL) return -ENOMEM; r600_cs_track_init(track); r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size); /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; parser.dev = &dev->pdev->dev; parser.rdev = NULL; parser.family = family; parser.track = track; parser.ib.ptr = ib; r = radeon_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); r600_cs_parser_fini(&parser, r); return r; } r = r600_cs_parser_relocs_legacy(&parser); if (r) { DRM_ERROR("Failed to parse relocation !\n"); r600_cs_parser_fini(&parser, r); return r; } /* Copy the packet into the IB, the parser will read from the * input memory (cached) and write to the IB (which can be * uncached). */ ib_chunk = parser.chunk_ib; parser.ib.length_dw = ib_chunk->length_dw; *l = parser.ib.length_dw; if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) { r = -EFAULT; r600_cs_parser_fini(&parser, r); return r; } r = r600_cs_parse(&parser); if (r) { DRM_ERROR("Invalid command stream !\n"); r600_cs_parser_fini(&parser, r); return r; } r600_cs_parser_fini(&parser, r); return r; } void r600_cs_legacy_init(void) { r600_nomm = 1; } #endif /* * DMA */ /** * r600_dma_cs_next_reloc() - parse next reloc * @p: parser structure holding parsing context. * @cs_reloc: reloc informations * * Return the next reloc, do bo validation and compute * GPU offset using the provided start. **/ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, struct radeon_bo_list **cs_reloc) { struct radeon_cs_chunk *relocs_chunk; unsigned idx; *cs_reloc = NULL; if (p->chunk_relocs == NULL) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } relocs_chunk = p->chunk_relocs; idx = p->dma_reloc_idx; if (idx >= p->nrelocs) { DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", idx, p->nrelocs); return -EINVAL; } *cs_reloc = &p->relocs[idx]; p->dma_reloc_idx++; return 0; } #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28) #define GET_DMA_COUNT(h) ((h) & 0x0000ffff) #define GET_DMA_T(h) (((h) & 0x00800000) >> 23) /** * r600_dma_cs_parse() - parse the DMA IB * @p: parser structure holding parsing context. * * Parses the DMA IB from the CS ioctl and updates * the GPU addresses based on the reloc information and * checks for errors. (R6xx-R7xx) * Returns 0 for success and an error on failure. **/ int r600_dma_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_chunk *ib_chunk = p->chunk_ib; struct radeon_bo_list *src_reloc, *dst_reloc; u32 header, cmd, count, tiled; volatile u32 *ib = p->ib.ptr; u32 idx, idx_value; u64 src_offset, dst_offset; int r; do { if (p->idx >= ib_chunk->length_dw) { DRM_ERROR("Can not parse packet at %d after CS end %d !\n", p->idx, ib_chunk->length_dw); return -EINVAL; } idx = p->idx; header = radeon_get_ib_value(p, idx); cmd = GET_DMA_CMD(header); count = GET_DMA_COUNT(header); tiled = GET_DMA_T(header); switch (cmd) { case DMA_PACKET_WRITE: r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_WRITE\n"); return -EINVAL; } if (tiled) { dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); p->idx += count + 5; } else { dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; p->idx += count + 3; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n", dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; case DMA_PACKET_COPY: r = r600_dma_cs_next_reloc(p, &src_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_COPY\n"); return -EINVAL; } if (tiled) { idx_value = radeon_get_ib_value(p, idx + 2); /* detile bit */ if (idx_value & (1 << 31)) { /* tiled src, linear dst */ src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); dst_offset = radeon_get_ib_value(p, idx+5); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; } else { /* linear src, tiled dst */ src_offset = radeon_get_ib_value(p, idx+5); src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc); ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); } p->idx += 7; } else { if (p->family >= CHIP_RV770) { src_offset = radeon_get_ib_value(p, idx+2); src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff; p->idx += 5; } else { src_offset = radeon_get_ib_value(p, idx+2); src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff; ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16; p->idx += 4; } } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n", dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; case DMA_PACKET_CONSTANT_FILL: if (p->family < CHIP_RV770) { DRM_ERROR("Constant Fill is 7xx only !\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { DRM_ERROR("bad DMA_PACKET_WRITE\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000; p->idx += 4; break; case DMA_PACKET_NOP: p->idx += 1; break; default: DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } } while (p->idx < p->chunk_ib->length_dw); #if 0 for (r = 0; r < p->ib->length_dw; r++) { printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); mdelay(1); } #endif return 0; }
gpl-2.0
czechop/kernel_milestone2
arch/powerpc/oprofile/common.c
1505
6411
/* * PPC 64 oprofile support: * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM * PPC 32 oprofile support: (based on PPC 64 support) * Copyright (C) Freescale Semiconductor, Inc 2004 * Author: Andy Fleming * * Based on alpha version. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/oprofile.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/errno.h> #include <asm/ptrace.h> #include <asm/system.h> #include <asm/pmc.h> #include <asm/cputable.h> #include <asm/oprofile_impl.h> #include <asm/firmware.h> static struct op_powerpc_model *model; static struct op_counter_config ctr[OP_MAX_COUNTER]; static struct op_system_config sys; static int op_per_cpu_rc; static void op_handle_interrupt(struct pt_regs *regs) { model->handle_interrupt(regs, ctr); } static void op_powerpc_cpu_setup(void *dummy) { int ret; ret = model->cpu_setup(ctr); if (ret != 0) op_per_cpu_rc = ret; } static int op_powerpc_setup(void) { int err; op_per_cpu_rc = 0; /* Grab the hardware */ err = reserve_pmc_hardware(op_handle_interrupt); if (err) return err; /* Pre-compute the values to stuff in the hardware registers. */ op_per_cpu_rc = model->reg_setup(ctr, &sys, model->num_counters); if (op_per_cpu_rc) goto out; /* Configure the registers on all cpus. If an error occurs on one * of the cpus, op_per_cpu_rc will be set to the error */ on_each_cpu(op_powerpc_cpu_setup, NULL, 1); out: if (op_per_cpu_rc) { /* error on setup release the performance counter hardware */ release_pmc_hardware(); } return op_per_cpu_rc; } static void op_powerpc_shutdown(void) { release_pmc_hardware(); } static void op_powerpc_cpu_start(void *dummy) { /* If any of the cpus have return an error, set the * global flag to the error so it can be returned * to the generic OProfile caller. */ int ret; ret = model->start(ctr); if (ret != 0) op_per_cpu_rc = ret; } static int op_powerpc_start(void) { op_per_cpu_rc = 0; if (model->global_start) return model->global_start(ctr); if (model->start) { on_each_cpu(op_powerpc_cpu_start, NULL, 1); return op_per_cpu_rc; } return -EIO; /* No start function is defined for this power architecture */ } static inline void op_powerpc_cpu_stop(void *dummy) { model->stop(); } static void op_powerpc_stop(void) { if (model->stop) on_each_cpu(op_powerpc_cpu_stop, NULL, 1); if (model->global_stop) model->global_stop(); } static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) { int i; #ifdef CONFIG_PPC64 /* * There is one mmcr0, mmcr1 and mmcra for setting the events for * all of the counters. */ oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); #ifdef CONFIG_OPROFILE_CELL /* create a file the user tool can check to see what level of profiling * support exits with this kernel. Initialize bit mask to indicate * what support the kernel has: * bit 0 - Supports SPU event profiling in addition to PPU * event and cycles; and SPU cycle profiling * bits 1-31 - Currently unused. * * If the file does not exist, then the kernel only supports SPU * cycle profiling, PPU event and cycle profiling. */ oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support); sys.cell_support = 0x1; /* Note, the user OProfile tool must check * that this bit is set before attempting to * user SPU event profiling. Older kernels * will not have this file, hence the user * tool is not allowed to do SPU event * profiling on older kernels. Older kernels * will accept SPU events but collected data * is garbage. */ #endif #endif for (i = 0; i < model->num_counters; ++i) { struct dentry *dir; char buf[4]; snprintf(buf, sizeof buf, "%d", i); dir = oprofilefs_mkdir(sb, root, buf); oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); /* * Classic PowerPC doesn't support per-counter * control like this, but the options are * expected, so they remain. For Freescale * Book-E style performance monitors, we do * support them. */ oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); } oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel); oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user); /* Default to tracing both kernel and user */ sys.enable_kernel = 1; sys.enable_user = 1; return 0; } int __init oprofile_arch_init(struct oprofile_operations *ops) { if (!cur_cpu_spec->oprofile_cpu_type) return -ENODEV; if (firmware_has_feature(FW_FEATURE_ISERIES)) return -ENODEV; switch (cur_cpu_spec->oprofile_type) { #ifdef CONFIG_PPC64 #ifdef CONFIG_OPROFILE_CELL case PPC_OPROFILE_CELL: if (firmware_has_feature(FW_FEATURE_LPAR)) return -ENODEV; model = &op_model_cell; ops->sync_start = model->sync_start; ops->sync_stop = model->sync_stop; break; #endif case PPC_OPROFILE_RS64: model = &op_model_rs64; break; case PPC_OPROFILE_POWER4: model = &op_model_power4; break; case PPC_OPROFILE_PA6T: model = &op_model_pa6t; break; #endif #ifdef CONFIG_6xx case PPC_OPROFILE_G4: model = &op_model_7450; break; #endif #if defined(CONFIG_FSL_EMB_PERFMON) case PPC_OPROFILE_FSL_EMB: model = &op_model_fsl_emb; break; #endif default: return -ENODEV; } model->num_counters = cur_cpu_spec->num_pmcs; ops->cpu_type = cur_cpu_spec->oprofile_cpu_type; ops->create_files = op_powerpc_create_files; ops->setup = op_powerpc_setup; ops->shutdown = op_powerpc_shutdown; ops->start = op_powerpc_start; ops->stop = op_powerpc_stop; ops->backtrace = op_powerpc_backtrace; printk(KERN_DEBUG "oprofile: using %s performance monitoring.\n", ops->cpu_type); return 0; } void oprofile_arch_exit(void) { }
gpl-2.0
attn1/cm-kernel-vivo-2.6.35
fs/proc/devices.c
1761
1434
#include <linux/fs.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> static int devinfo_show(struct seq_file *f, void *v) { int i = *(loff_t *) v; if (i < CHRDEV_MAJOR_HASH_SIZE) { if (i == 0) seq_printf(f, "Character devices:\n"); chrdev_show(f, i); } #ifdef CONFIG_BLOCK else { i -= CHRDEV_MAJOR_HASH_SIZE; if (i == 0) seq_printf(f, "\nBlock devices:\n"); blkdev_show(f, i); } #endif return 0; } static void *devinfo_start(struct seq_file *f, loff_t *pos) { if (*pos < (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE)) return pos; return NULL; } static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos) { (*pos)++; if (*pos >= (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE)) return NULL; return pos; } static void devinfo_stop(struct seq_file *f, void *v) { /* Nothing to do */ } static const struct seq_operations devinfo_ops = { .start = devinfo_start, .next = devinfo_next, .stop = devinfo_stop, .show = devinfo_show }; static int devinfo_open(struct inode *inode, struct file *filp) { return seq_open(filp, &devinfo_ops); } static const struct file_operations proc_devinfo_operations = { .open = devinfo_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init proc_devices_init(void) { proc_create("devices", 0, NULL, &proc_devinfo_operations); return 0; } module_init(proc_devices_init);
gpl-2.0
turtlepa/android_kernel_samsung_aries-galaxys4gmtd
arch/arm/mach-omap2/board-igep0020.c
1761
18637
/* * Copyright (C) 2009 Integration Software and Electronic Engineering. * * Modified from mach-omap2/board-generic.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/regulator/machine.h> #include <linux/regulator/fixed.h> #include <linux/i2c/twl.h> #include <linux/mmc/host.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <plat/board.h> #include <plat/common.h> #include <plat/gpmc.h> #include <plat/usb.h> #include <video/omapdss.h> #include <video/omap-panel-generic-dpi.h> #include <plat/onenand.h> #include "mux.h" #include "hsmmc.h" #include "sdram-numonyx-m65kxxxxam.h" #include "common-board-devices.h" #define IGEP2_SMSC911X_CS 5 #define IGEP2_SMSC911X_GPIO 176 #define IGEP2_GPIO_USBH_NRESET 24 #define IGEP2_GPIO_LED0_GREEN 26 #define IGEP2_GPIO_LED0_RED 27 #define IGEP2_GPIO_LED1_RED 28 #define IGEP2_GPIO_DVI_PUP 170 #define IGEP2_RB_GPIO_WIFI_NPD 94 #define IGEP2_RB_GPIO_WIFI_NRESET 95 #define IGEP2_RB_GPIO_BT_NRESET 137 #define IGEP2_RC_GPIO_WIFI_NPD 138 #define IGEP2_RC_GPIO_WIFI_NRESET 139 #define IGEP2_RC_GPIO_BT_NRESET 137 #define IGEP3_GPIO_LED0_GREEN 54 #define IGEP3_GPIO_LED0_RED 53 #define IGEP3_GPIO_LED1_RED 16 #define IGEP3_GPIO_USBH_NRESET 183 /* * IGEP2 Hardware Revision Table * * -------------------------------------------------------------------------- * | Id. | Hw Rev. | HW0 (28) | WIFI_NPD | WIFI_NRESET | BT_NRESET | * -------------------------------------------------------------------------- * | 0 | B | high | gpio94 | gpio95 | - | * | 0 | B/C (B-compatible) | high | gpio94 | gpio95 | gpio137 | * | 1 | C | low | gpio138 | gpio139 | gpio137 | * -------------------------------------------------------------------------- */ #define IGEP2_BOARD_HWREV_B 0 #define IGEP2_BOARD_HWREV_C 1 #define IGEP3_BOARD_HWREV 2 static u8 hwrev; static void __init igep2_get_revision(void) { u8 ret; if (machine_is_igep0030()) { hwrev = IGEP3_BOARD_HWREV; return; } omap_mux_init_gpio(IGEP2_GPIO_LED1_RED, OMAP_PIN_INPUT); if (gpio_request_one(IGEP2_GPIO_LED1_RED, GPIOF_IN, "GPIO_HW0_REV")) { pr_warning("IGEP2: Could not obtain gpio GPIO_HW0_REV\n"); pr_err("IGEP2: Unknown Hardware Revision\n"); return; } ret = gpio_get_value(IGEP2_GPIO_LED1_RED); if (ret == 0) { pr_info("IGEP2: Hardware Revision C (B-NON compatible)\n"); hwrev = IGEP2_BOARD_HWREV_C; } else if (ret == 1) { pr_info("IGEP2: Hardware Revision B/C (B compatible)\n"); hwrev = IGEP2_BOARD_HWREV_B; } else { pr_err("IGEP2: Unknown Hardware Revision\n"); hwrev = -1; } gpio_free(IGEP2_GPIO_LED1_RED); } #if defined(CONFIG_MTD_ONENAND_OMAP2) || \ defined(CONFIG_MTD_ONENAND_OMAP2_MODULE) #define ONENAND_MAP 0x20000000 /* NAND04GR4E1A ( x2 Flash built-in COMBO POP MEMORY ) * Since the device is equipped with two DataRAMs, and two-plane NAND * Flash memory array, these two component enables simultaneous program * of 4KiB. Plane1 has only even blocks such as block0, block2, block4 * while Plane2 has only odd blocks such as block1, block3, block5. * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048) */ static struct mtd_partition igep_onenand_partitions[] = { { .name = "X-Loader", .offset = 0, .size = 2 * (64*(2*2048)) }, { .name = "U-Boot", .offset = MTDPART_OFS_APPEND, .size = 6 * (64*(2*2048)), }, { .name = "Environment", .offset = MTDPART_OFS_APPEND, .size = 2 * (64*(2*2048)), }, { .name = "Kernel", .offset = MTDPART_OFS_APPEND, .size = 12 * (64*(2*2048)), }, { .name = "File System", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct omap_onenand_platform_data igep_onenand_data = { .parts = igep_onenand_partitions, .nr_parts = ARRAY_SIZE(igep_onenand_partitions), .dma_channel = -1, /* disable DMA in OMAP OneNAND driver */ }; static struct platform_device igep_onenand_device = { .name = "omap2-onenand", .id = -1, .dev = { .platform_data = &igep_onenand_data, }, }; static void __init igep_flash_init(void) { u8 cs = 0; u8 onenandcs = GPMC_CS_NUM + 1; for (cs = 0; cs < GPMC_CS_NUM; cs++) { u32 ret; ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); /* Check if NAND/oneNAND is configured */ if ((ret & 0xC00) == 0x800) /* NAND found */ pr_err("IGEP: Unsupported NAND found\n"); else { ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); if ((ret & 0x3F) == (ONENAND_MAP >> 24)) /* ONENAND found */ onenandcs = cs; } } if (onenandcs > GPMC_CS_NUM) { pr_err("IGEP: Unable to find configuration in GPMC\n"); return; } igep_onenand_data.cs = onenandcs; if (platform_device_register(&igep_onenand_device) < 0) pr_err("IGEP: Unable to register OneNAND device\n"); } #else static void __init igep_flash_init(void) {} #endif #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) #include <linux/smsc911x.h> #include <plat/gpmc-smsc911x.h> static struct omap_smsc911x_platform_data smsc911x_cfg = { .cs = IGEP2_SMSC911X_CS, .gpio_irq = IGEP2_SMSC911X_GPIO, .gpio_reset = -EINVAL, .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, }; static inline void __init igep2_init_smsc911x(void) { gpmc_smsc911x_init(&smsc911x_cfg); } #else static inline void __init igep2_init_smsc911x(void) { } #endif static struct regulator_consumer_supply igep_vmmc1_supply = REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"); /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */ static struct regulator_init_data igep_vmmc1 = { .constraints = { .min_uV = 1850000, .max_uV = 3150000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, .consumer_supplies = &igep_vmmc1_supply, }; static struct regulator_consumer_supply igep_vio_supply = REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"); static struct regulator_init_data igep_vio = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .apply_uV = 1, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 1, .consumer_supplies = &igep_vio_supply, }; static struct regulator_consumer_supply igep_vmmc2_supply = REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"); static struct regulator_init_data igep_vmmc2 = { .constraints = { .valid_modes_mask = REGULATOR_MODE_NORMAL, .always_on = 1, }, .num_consumer_supplies = 1, .consumer_supplies = &igep_vmmc2_supply, }; static struct fixed_voltage_config igep_vwlan = { .supply_name = "vwlan", .microvolts = 3300000, .gpio = -EINVAL, .enabled_at_boot = 1, .init_data = &igep_vmmc2, }; static struct platform_device igep_vwlan_device = { .name = "reg-fixed-voltage", .id = 0, .dev = { .platform_data = &igep_vwlan, }, }; static struct omap2_hsmmc_info mmc[] = { { .mmc = 1, .caps = MMC_CAP_4_BIT_DATA, .gpio_cd = -EINVAL, .gpio_wp = -EINVAL, }, #if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE) { .mmc = 2, .caps = MMC_CAP_4_BIT_DATA, .gpio_cd = -EINVAL, .gpio_wp = -EINVAL, }, #endif {} /* Terminator */ }; #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) #include <linux/leds.h> static struct gpio_led igep_gpio_leds[] = { [0] = { .name = "gpio-led:red:d0", .default_trigger = "default-off" }, [1] = { .name = "gpio-led:green:d0", .default_trigger = "default-off", }, [2] = { .name = "gpio-led:red:d1", .default_trigger = "default-off", }, [3] = { .name = "gpio-led:green:d1", .default_trigger = "heartbeat", .gpio = -EINVAL, /* gets replaced */ .active_low = 1, }, }; static struct gpio_led_platform_data igep_led_pdata = { .leds = igep_gpio_leds, .num_leds = ARRAY_SIZE(igep_gpio_leds), }; static struct platform_device igep_led_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &igep_led_pdata, }, }; static void __init igep_leds_init(void) { if (machine_is_igep0020()) { igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED; igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN; igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED; } else { igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED; igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN; igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED; } platform_device_register(&igep_led_device); } #else static struct gpio igep_gpio_leds[] __initdata = { { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:red:d0" }, { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:green:d0" }, { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:red:d1" }, }; static inline void igep_leds_init(void) { int i; if (machine_is_igep0020()) { igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED; igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN; igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED; } else { igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED; igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN; igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED; } if (gpio_request_array(igep_gpio_leds, ARRAY_SIZE(igep_gpio_leds))) { pr_warning("IGEP v2: Could not obtain leds gpios\n"); return; } for (i = 0; i < ARRAY_SIZE(igep_gpio_leds); i++) gpio_export(igep_gpio_leds[i].gpio, 0); } #endif static struct gpio igep2_twl_gpios[] = { { -EINVAL, GPIOF_IN, "GPIO_EHCI_NOC" }, { -EINVAL, GPIOF_OUT_INIT_LOW, "GPIO_USBH_CPEN" }, }; static int igep_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { int ret; /* gpio + 0 is "mmc0_cd" (input/IRQ) */ mmc[0].gpio_cd = gpio + 0; omap2_hsmmc_init(mmc); /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE) ret = gpio_request_one(gpio + TWL4030_GPIO_MAX + 1, GPIOF_OUT_INIT_HIGH, "gpio-led:green:d1"); if (ret == 0) gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0); else pr_warning("IGEP: Could not obtain gpio GPIO_LED1_GREEN\n"); #else igep_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1; #endif if (machine_is_igep0030()) return 0; /* * REVISIT: need ehci-omap hooks for external VBUS * power switch and overcurrent detect */ igep2_twl_gpios[0].gpio = gpio + 1; /* TWL4030_GPIO_MAX + 0 == ledA, GPIO_USBH_CPEN (out, active low) */ igep2_twl_gpios[1].gpio = gpio + TWL4030_GPIO_MAX; ret = gpio_request_array(igep2_twl_gpios, ARRAY_SIZE(igep2_twl_gpios)); if (ret < 0) pr_err("IGEP2: Could not obtain gpio for USBH_CPEN"); return 0; }; static struct twl4030_gpio_platform_data igep_twl4030_gpio_pdata = { .gpio_base = OMAP_MAX_GPIO_LINES, .irq_base = TWL4030_GPIO_IRQ_BASE, .irq_end = TWL4030_GPIO_IRQ_END, .use_leds = true, .setup = igep_twl_gpio_setup, }; static struct twl4030_usb_data igep_usb_data = { .usb_mode = T2_USB_MODE_ULPI, }; static int igep2_enable_dvi(struct omap_dss_device *dssdev) { gpio_direction_output(IGEP2_GPIO_DVI_PUP, 1); return 0; } static void igep2_disable_dvi(struct omap_dss_device *dssdev) { gpio_direction_output(IGEP2_GPIO_DVI_PUP, 0); } static struct panel_generic_dpi_data dvi_panel = { .name = "generic", .platform_enable = igep2_enable_dvi, .platform_disable = igep2_disable_dvi, }; static struct omap_dss_device igep2_dvi_device = { .type = OMAP_DISPLAY_TYPE_DPI, .name = "dvi", .driver_name = "generic_dpi_panel", .data = &dvi_panel, .phy.dpi.data_lines = 24, }; static struct omap_dss_device *igep2_dss_devices[] = { &igep2_dvi_device }; static struct omap_dss_board_info igep2_dss_data = { .num_devices = ARRAY_SIZE(igep2_dss_devices), .devices = igep2_dss_devices, .default_device = &igep2_dvi_device, }; static struct regulator_consumer_supply igep2_vpll2_supplies[] = { REGULATOR_SUPPLY("vdds_dsi", "omapdss"), REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"), }; static struct regulator_init_data igep2_vpll2 = { .constraints = { .name = "VDVI", .min_uV = 1800000, .max_uV = 1800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(igep2_vpll2_supplies), .consumer_supplies = igep2_vpll2_supplies, }; static void __init igep2_display_init(void) { int err = gpio_request_one(IGEP2_GPIO_DVI_PUP, GPIOF_OUT_INIT_HIGH, "GPIO_DVI_PUP"); if (err) pr_err("IGEP v2: Could not obtain gpio GPIO_DVI_PUP\n"); } static struct platform_device *igep_devices[] __initdata = { &igep_vwlan_device, }; static void __init igep_init_early(void) { omap2_init_common_infrastructure(); omap2_init_common_devices(m65kxxxxam_sdrc_params, m65kxxxxam_sdrc_params); } static struct twl4030_codec_audio_data igep2_audio_data; static struct twl4030_codec_data igep2_codec_data = { .audio_mclk = 26000000, .audio = &igep2_audio_data, }; static int igep2_keymap[] = { KEY(0, 0, KEY_LEFT), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_A), KEY(0, 3, KEY_B), KEY(1, 0, KEY_DOWN), KEY(1, 1, KEY_UP), KEY(1, 2, KEY_E), KEY(1, 3, KEY_F), KEY(2, 0, KEY_ENTER), KEY(2, 1, KEY_I), KEY(2, 2, KEY_J), KEY(2, 3, KEY_K), KEY(3, 0, KEY_M), KEY(3, 1, KEY_N), KEY(3, 2, KEY_O), KEY(3, 3, KEY_P) }; static struct matrix_keymap_data igep2_keymap_data = { .keymap = igep2_keymap, .keymap_size = ARRAY_SIZE(igep2_keymap), }; static struct twl4030_keypad_data igep2_keypad_pdata = { .keymap_data = &igep2_keymap_data, .rows = 4, .cols = 4, .rep = 1, }; static struct twl4030_platform_data igep_twldata = { .irq_base = TWL4030_IRQ_BASE, .irq_end = TWL4030_IRQ_END, /* platform_data for children goes here */ .usb = &igep_usb_data, .gpio = &igep_twl4030_gpio_pdata, .vmmc1 = &igep_vmmc1, .vio = &igep_vio, }; static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = { { I2C_BOARD_INFO("eeprom", 0x50), }, }; static void __init igep_i2c_init(void) { int ret; if (machine_is_igep0020()) { /* * Bus 3 is attached to the DVI port where devices like the * pico DLP projector don't work reliably with 400kHz */ ret = omap_register_i2c_bus(3, 100, igep2_i2c3_boardinfo, ARRAY_SIZE(igep2_i2c3_boardinfo)); if (ret) pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret); igep_twldata.codec = &igep2_codec_data; igep_twldata.keypad = &igep2_keypad_pdata; igep_twldata.vpll2 = &igep2_vpll2; } omap3_pmic_init("twl4030", &igep_twldata); } static const struct usbhs_omap_board_data igep2_usbhs_bdata __initconst = { .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = IGEP2_GPIO_USBH_NRESET, .reset_gpio_port[1] = -EINVAL, .reset_gpio_port[2] = -EINVAL, }; static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = { .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = -EINVAL, .reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET, .reset_gpio_port[2] = -EINVAL, }; #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { { .reg_offset = OMAP_MUX_TERMINATOR }, }; #endif #if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE) static struct gpio igep_wlan_bt_gpios[] __initdata = { { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NPD" }, { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NRESET" }, { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_BT_NRESET" }, }; static void __init igep_wlan_bt_init(void) { int err; /* GPIO's for WLAN-BT combo depends on hardware revision */ if (hwrev == IGEP2_BOARD_HWREV_B) { igep_wlan_bt_gpios[0].gpio = IGEP2_RB_GPIO_WIFI_NPD; igep_wlan_bt_gpios[1].gpio = IGEP2_RB_GPIO_WIFI_NRESET; igep_wlan_bt_gpios[2].gpio = IGEP2_RB_GPIO_BT_NRESET; } else if (hwrev == IGEP2_BOARD_HWREV_C || machine_is_igep0030()) { igep_wlan_bt_gpios[0].gpio = IGEP2_RC_GPIO_WIFI_NPD; igep_wlan_bt_gpios[1].gpio = IGEP2_RC_GPIO_WIFI_NRESET; igep_wlan_bt_gpios[2].gpio = IGEP2_RC_GPIO_BT_NRESET; } else return; err = gpio_request_array(igep_wlan_bt_gpios, ARRAY_SIZE(igep_wlan_bt_gpios)); if (err) { pr_warning("IGEP2: Could not obtain WIFI/BT gpios\n"); return; } gpio_export(igep_wlan_bt_gpios[0].gpio, 0); gpio_export(igep_wlan_bt_gpios[1].gpio, 0); gpio_export(igep_wlan_bt_gpios[2].gpio, 0); gpio_set_value(igep_wlan_bt_gpios[1].gpio, 0); udelay(10); gpio_set_value(igep_wlan_bt_gpios[1].gpio, 1); } #else static inline void __init igep_wlan_bt_init(void) { } #endif static void __init igep_init(void) { omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); /* Get IGEP2 hardware revision */ igep2_get_revision(); /* Register I2C busses and drivers */ igep_i2c_init(); platform_add_devices(igep_devices, ARRAY_SIZE(igep_devices)); omap_serial_init(); usb_musb_init(NULL); igep_flash_init(); igep_leds_init(); /* * WLAN-BT combo module from MuRata which has a Marvell WLAN * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface. */ igep_wlan_bt_init(); if (machine_is_igep0020()) { omap_display_init(&igep2_dss_data); igep2_display_init(); igep2_init_smsc911x(); usbhs_init(&igep2_usbhs_bdata); } else { usbhs_init(&igep3_usbhs_bdata); } } MACHINE_START(IGEP0020, "IGEP v2 board") .boot_params = 0x80000100, .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = igep_init_early, .init_irq = omap_init_irq, .init_machine = igep_init, .timer = &omap_timer, MACHINE_END MACHINE_START(IGEP0030, "IGEP OMAP3 module") .boot_params = 0x80000100, .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = igep_init_early, .init_irq = omap_init_irq, .init_machine = igep_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
humberos/android_kernel_samsung_smdk4412
arch/arm/mach-msm/clock.c
2017
4490
/* arch/arm/mach-msm/clock.c * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/pm_qos_params.h> #include <linux/mutex.h> #include <linux/clk.h> #include <linux/string.h> #include <linux/module.h> #include <linux/clkdev.h> #include "clock.h" static DEFINE_MUTEX(clocks_mutex); static DEFINE_SPINLOCK(clocks_lock); static LIST_HEAD(clocks); /* * Standard clock functions defined in include/linux/clk.h */ int clk_enable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); clk->count++; if (clk->count == 1) clk->ops->enable(clk->id); spin_unlock_irqrestore(&clocks_lock, flags); return 0; } EXPORT_SYMBOL(clk_enable); void clk_disable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); BUG_ON(clk->count == 0); clk->count--; if (clk->count == 0) clk->ops->disable(clk->id); spin_unlock_irqrestore(&clocks_lock, flags); } EXPORT_SYMBOL(clk_disable); int clk_reset(struct clk *clk, enum clk_reset_action action) { return clk->ops->reset(clk->remote_id, action); } EXPORT_SYMBOL(clk_reset); unsigned long clk_get_rate(struct clk *clk) { return clk->ops->get_rate(clk->id); } EXPORT_SYMBOL(clk_get_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { int ret; if (clk->flags & CLKFLAG_MAX) { ret = clk->ops->set_max_rate(clk->id, rate); if (ret) return ret; } if (clk->flags & CLKFLAG_MIN) { ret = clk->ops->set_min_rate(clk->id, rate); if (ret) return ret; } if (clk->flags & CLKFLAG_MAX || clk->flags & CLKFLAG_MIN) return ret; return clk->ops->set_rate(clk->id, rate); } EXPORT_SYMBOL(clk_set_rate); long clk_round_rate(struct clk *clk, unsigned long rate) { return clk->ops->round_rate(clk->id, rate); } EXPORT_SYMBOL(clk_round_rate); int clk_set_min_rate(struct clk *clk, unsigned long rate) { return clk->ops->set_min_rate(clk->id, rate); } EXPORT_SYMBOL(clk_set_min_rate); int clk_set_max_rate(struct clk *clk, unsigned long rate) { return clk->ops->set_max_rate(clk->id, rate); } EXPORT_SYMBOL(clk_set_max_rate); int clk_set_parent(struct clk *clk, struct clk *parent) { return -ENOSYS; } EXPORT_SYMBOL(clk_set_parent); struct clk *clk_get_parent(struct clk *clk) { return ERR_PTR(-ENOSYS); } EXPORT_SYMBOL(clk_get_parent); int clk_set_flags(struct clk *clk, unsigned long flags) { if (clk == NULL || IS_ERR(clk)) return -EINVAL; return clk->ops->set_flags(clk->id, flags); } EXPORT_SYMBOL(clk_set_flags); /* EBI1 is the only shared clock that several clients want to vote on as of * this commit. If this changes in the future, then it might be better to * make clk_min_rate handle the voting or make ebi1_clk_set_min_rate more * generic to support different clocks. */ static struct clk *ebi1_clk; void __init msm_clock_init(struct clk_lookup *clock_tbl, unsigned num_clocks) { unsigned n; mutex_lock(&clocks_mutex); for (n = 0; n < num_clocks; n++) { clkdev_add(&clock_tbl[n]); list_add_tail(&clock_tbl[n].clk->list, &clocks); } mutex_unlock(&clocks_mutex); ebi1_clk = clk_get(NULL, "ebi1_clk"); BUG_ON(ebi1_clk == NULL); } /* The bootloader and/or AMSS may have left various clocks enabled. * Disable any clocks that belong to us (CLKFLAG_AUTO_OFF) but have * not been explicitly enabled by a clk_enable() call. */ static int __init clock_late_init(void) { unsigned long flags; struct clk *clk; unsigned count = 0; clock_debug_init(); mutex_lock(&clocks_mutex); list_for_each_entry(clk, &clocks, list) { clock_debug_add(clk); if (clk->flags & CLKFLAG_AUTO_OFF) { spin_lock_irqsave(&clocks_lock, flags); if (!clk->count) { count++; clk->ops->auto_off(clk->id); } spin_unlock_irqrestore(&clocks_lock, flags); } } mutex_unlock(&clocks_mutex); pr_info("clock_late_init() disabled %d unused clocks\n", count); return 0; } late_initcall(clock_late_init);
gpl-2.0
sgs3/GT-I9300_Kernel
arch/alpha/kernel/sys_sable.c
2273
17306
/* * linux/arch/alpha/kernel/sys_sable.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the Sable, Sable-Gamma, and Lynx systems. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/core_t2.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" DEFINE_SPINLOCK(sable_lynx_irq_lock); typedef struct irq_swizzle_struct { char irq_to_mask[64]; char mask_to_irq[64]; /* Note mask bit is true for DISABLED irqs. */ unsigned long shadow_mask; void (*update_irq_hw)(unsigned long bit, unsigned long mask); void (*ack_irq_hw)(unsigned long bit); } irq_swizzle_t; static irq_swizzle_t *sable_lynx_irq_swizzle; static void sable_lynx_init_irq(int nr_of_irqs); #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) /***********************************************************************/ /* * For SABLE, which is really baroque, we manage 40 IRQ's, but the * hardware really only supports 24, not via normal ISA PIC, * but cascaded custom 8259's, etc. * 0-7 (char at 536) * 8-15 (char at 53a) * 16-23 (char at 53c) * * Summary Registers (536/53a/53c): * * Bit Meaning Kernel IRQ *------------------------------------------ * 0 PCI slot 0 34 * 1 NCR810 (builtin) 33 * 2 TULIP (builtin) 32 * 3 mouse 12 * 4 PCI slot 1 35 * 5 PCI slot 2 36 * 6 keyboard 1 * 7 floppy 6 * 8 COM2 3 * 9 parallel port 7 *10 EISA irq 3 - *11 EISA irq 4 - *12 EISA irq 5 5 *13 EISA irq 6 - *14 EISA irq 7 - *15 COM1 4 *16 EISA irq 9 9 *17 EISA irq 10 10 *18 EISA irq 11 11 *19 EISA irq 12 - *20 EISA irq 13 - *21 EISA irq 14 14 *22 NC 15 *23 IIC - */ static void sable_update_irq_hw(unsigned long bit, unsigned long mask) { int port = 0x537; if (bit >= 16) { port = 0x53d; mask >>= 16; } else if (bit >= 8) { port = 0x53b; mask >>= 8; } outb(mask, port); } static void sable_ack_irq_hw(unsigned long bit) { int port, val1, val2; if (bit >= 16) { port = 0x53c; val1 = 0xE0 | (bit - 16); val2 = 0xE0 | 4; } else if (bit >= 8) { port = 0x53a; val1 = 0xE0 | (bit - 8); val2 = 0xE0 | 3; } else { port = 0x536; val1 = 0xE0 | (bit - 0); val2 = 0xE0 | 1; } outb(val1, port); /* ack the slave */ outb(val2, 0x534); /* ack the master */ } static irq_swizzle_t sable_irq_swizzle = { { -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */ -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */ -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 0-7 */ -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 8-15 */ 2, 1, 0, 4, 5, -1, -1, -1, /* pseudo PCI */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1 /* */ }, { 34, 33, 32, 12, 35, 36, 1, 6, /* mask 0-7 */ 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */ 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1 /* */ }, -1, sable_update_irq_hw, sable_ack_irq_hw }; static void __init sable_init_irq(void) { outb(-1, 0x537); /* slave 0 */ outb(-1, 0x53b); /* slave 1 */ outb(-1, 0x53d); /* slave 2 */ outb(0x44, 0x535); /* enable cascades in master */ sable_lynx_irq_swizzle = &sable_irq_swizzle; sable_lynx_init_irq(40); } /* * PCI Fixup configuration for ALPHA SABLE (2100). * * The device to slot mapping looks like: * * Slot Device * 0 TULIP * 1 SCSI * 2 PCI-EISA bridge * 3 none * 4 none * 5 none * 6 PCI on board slot 0 * 7 PCI on board slot 1 * 8 PCI on board slot 2 * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ /* * NOTE: the IRQ assignments below are arbitrary, but need to be consistent * with the values in the irq swizzling tables above. */ static int __init sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[9][5] __initdata = { /*INT INTA INTB INTC INTD */ { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */ { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */ { -1, -1, -1, -1, -1}, /* IdSel 2, SIO */ { -1, -1, -1, -1, -1}, /* IdSel 3, none */ { -1, -1, -1, -1, -1}, /* IdSel 4, none */ { -1, -1, -1, -1, -1}, /* IdSel 5, none */ { 32+2, 32+2, 32+2, 32+2, 32+2}, /* IdSel 6, slot 0 */ { 32+3, 32+3, 32+3, 32+3, 32+3}, /* IdSel 7, slot 1 */ { 32+4, 32+4, 32+4, 32+4, 32+4} /* IdSel 8, slot 2 */ }; long min_idsel = 0, max_idsel = 8, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } #endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) */ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) /***********************************************************************/ /* LYNX hardware specifics */ /* * For LYNX, which is also baroque, we manage 64 IRQs, via a custom IC. * * Bit Meaning Kernel IRQ *------------------------------------------ * 0 * 1 * 2 * 3 mouse 12 * 4 * 5 * 6 keyboard 1 * 7 floppy 6 * 8 COM2 3 * 9 parallel port 7 *10 EISA irq 3 - *11 EISA irq 4 - *12 EISA irq 5 5 *13 EISA irq 6 - *14 EISA irq 7 - *15 COM1 4 *16 EISA irq 9 9 *17 EISA irq 10 10 *18 EISA irq 11 11 *19 EISA irq 12 - *20 *21 EISA irq 14 14 *22 EISA irq 15 15 *23 IIC - *24 VGA (builtin) - *25 *26 *27 *28 NCR810 (builtin) 28 *29 *30 *31 *32 PCI 0 slot 4 A primary bus 32 *33 PCI 0 slot 4 B primary bus 33 *34 PCI 0 slot 4 C primary bus 34 *35 PCI 0 slot 4 D primary bus *36 PCI 0 slot 5 A primary bus *37 PCI 0 slot 5 B primary bus *38 PCI 0 slot 5 C primary bus *39 PCI 0 slot 5 D primary bus *40 PCI 0 slot 6 A primary bus *41 PCI 0 slot 6 B primary bus *42 PCI 0 slot 6 C primary bus *43 PCI 0 slot 6 D primary bus *44 PCI 0 slot 7 A primary bus *45 PCI 0 slot 7 B primary bus *46 PCI 0 slot 7 C primary bus *47 PCI 0 slot 7 D primary bus *48 PCI 0 slot 0 A secondary bus *49 PCI 0 slot 0 B secondary bus *50 PCI 0 slot 0 C secondary bus *51 PCI 0 slot 0 D secondary bus *52 PCI 0 slot 1 A secondary bus *53 PCI 0 slot 1 B secondary bus *54 PCI 0 slot 1 C secondary bus *55 PCI 0 slot 1 D secondary bus *56 PCI 0 slot 2 A secondary bus *57 PCI 0 slot 2 B secondary bus *58 PCI 0 slot 2 C secondary bus *59 PCI 0 slot 2 D secondary bus *60 PCI 0 slot 3 A secondary bus *61 PCI 0 slot 3 B secondary bus *62 PCI 0 slot 3 C secondary bus *63 PCI 0 slot 3 D secondary bus */ static void lynx_update_irq_hw(unsigned long bit, unsigned long mask) { /* * Write the AIR register on the T3/T4 with the * address of the IC mask register (offset 0x40) */ *(vulp)T2_AIR = 0x40; mb(); *(vulp)T2_AIR; /* re-read to force write */ mb(); *(vulp)T2_DIR = mask; mb(); mb(); } static void lynx_ack_irq_hw(unsigned long bit) { *(vulp)T2_VAR = (u_long) bit; mb(); mb(); } static irq_swizzle_t lynx_irq_swizzle = { { /* irq_to_mask */ -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */ -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */ -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo */ -1, -1, -1, -1, 28, -1, -1, -1, /* pseudo */ 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */ 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */ 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */ 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */ }, { /* mask_to_irq */ -1, -1, -1, 12, -1, -1, 1, 6, /* mask 0-7 */ 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */ 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */ -1, -1, -1, -1, 28, -1, -1, -1, /* mask 24-31 */ 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */ 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */ 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */ 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */ }, -1, lynx_update_irq_hw, lynx_ack_irq_hw }; static void __init lynx_init_irq(void) { sable_lynx_irq_swizzle = &lynx_irq_swizzle; sable_lynx_init_irq(64); } /* * PCI Fixup configuration for ALPHA LYNX (2100A) * * The device to slot mapping looks like: * * Slot Device * 0 none * 1 none * 2 PCI-EISA bridge * 3 PCI-PCI bridge * 4 NCR 810 (Demi-Lynx only) * 5 none * 6 PCI on board slot 4 * 7 PCI on board slot 5 * 8 PCI on board slot 6 * 9 PCI on board slot 7 * * And behind the PPB we have: * * 11 PCI on board slot 0 * 12 PCI on board slot 1 * 13 PCI on board slot 2 * 14 PCI on board slot 3 */ /* * NOTE: the IRQ assignments below are arbitrary, but need to be consistent * with the values in the irq swizzling tables above. */ static int __init lynx_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[19][5] __initdata = { /*INT INTA INTB INTC INTD */ { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */ { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */ { 28, 28, 28, 28, 28}, /* IdSel 15, NCR demi */ { -1, -1, -1, -1, -1}, /* IdSel 16, none */ { 32, 32, 33, 34, 35}, /* IdSel 17, slot 4 */ { 36, 36, 37, 38, 39}, /* IdSel 18, slot 5 */ { 40, 40, 41, 42, 43}, /* IdSel 19, slot 6 */ { 44, 44, 45, 46, 47}, /* IdSel 20, slot 7 */ { -1, -1, -1, -1, -1}, /* IdSel 22, none */ /* The following are actually behind the PPB. */ { -1, -1, -1, -1, -1}, /* IdSel 16 none */ { 28, 28, 28, 28, 28}, /* IdSel 17 NCR lynx */ { -1, -1, -1, -1, -1}, /* IdSel 18 none */ { -1, -1, -1, -1, -1}, /* IdSel 19 none */ { -1, -1, -1, -1, -1}, /* IdSel 20 none */ { -1, -1, -1, -1, -1}, /* IdSel 21 none */ { 48, 48, 49, 50, 51}, /* IdSel 22 slot 0 */ { 52, 52, 53, 54, 55}, /* IdSel 23 slot 1 */ { 56, 56, 57, 58, 59}, /* IdSel 24 slot 2 */ { 60, 60, 61, 62, 63} /* IdSel 25 slot 3 */ }; const long min_idsel = 2, max_idsel = 20, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static u8 __init lynx_swizzle(struct pci_dev *dev, u8 *pinp) { int slot, pin = *pinp; if (dev->bus->number == 0) { slot = PCI_SLOT(dev->devfn); } /* Check for the built-in bridge */ else if (PCI_SLOT(dev->bus->self->devfn) == 3) { slot = PCI_SLOT(dev->devfn) + 11; } else { /* Must be a card-based bridge. */ do { if (PCI_SLOT(dev->bus->self->devfn) == 3) { slot = PCI_SLOT(dev->devfn) + 11; break; } pin = pci_swizzle_interrupt_pin(dev, pin); /* Move up the chain of bridges. */ dev = dev->bus->self; /* Slot of the next bridge. */ slot = PCI_SLOT(dev->devfn); } while (dev->bus->self); } *pinp = pin; return slot; } #endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) */ /***********************************************************************/ /* GENERIC irq routines */ static inline void sable_lynx_enable_irq(struct irq_data *d) { unsigned long bit, mask; bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; spin_lock(&sable_lynx_irq_lock); mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); sable_lynx_irq_swizzle->update_irq_hw(bit, mask); spin_unlock(&sable_lynx_irq_lock); #if 0 printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n", __func__, mask, bit, irq); #endif } static void sable_lynx_disable_irq(struct irq_data *d) { unsigned long bit, mask; bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; spin_lock(&sable_lynx_irq_lock); mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; sable_lynx_irq_swizzle->update_irq_hw(bit, mask); spin_unlock(&sable_lynx_irq_lock); #if 0 printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n", __func__, mask, bit, irq); #endif } static void sable_lynx_mask_and_ack_irq(struct irq_data *d) { unsigned long bit, mask; bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; spin_lock(&sable_lynx_irq_lock); mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; sable_lynx_irq_swizzle->update_irq_hw(bit, mask); sable_lynx_irq_swizzle->ack_irq_hw(bit); spin_unlock(&sable_lynx_irq_lock); } static struct irq_chip sable_lynx_irq_type = { .name = "SABLE/LYNX", .irq_unmask = sable_lynx_enable_irq, .irq_mask = sable_lynx_disable_irq, .irq_mask_ack = sable_lynx_mask_and_ack_irq, }; static void sable_lynx_srm_device_interrupt(unsigned long vector) { /* Note that the vector reported by the SRM PALcode corresponds to the interrupt mask bits, but we have to manage via the so-called legacy IRQs for many common devices. */ int bit, irq; bit = (vector - 0x800) >> 4; irq = sable_lynx_irq_swizzle->mask_to_irq[bit]; #if 0 printk("%s: vector 0x%lx bit 0x%x irq 0x%x\n", __func__, vector, bit, irq); #endif handle_irq(irq); } static void __init sable_lynx_init_irq(int nr_of_irqs) { long i; for (i = 0; i < nr_of_irqs; ++i) { irq_set_chip_and_handler(i, &sable_lynx_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } common_init_isa_dma(); } static void __init sable_lynx_init_pci(void) { common_init_pci(); } /*****************************************************************/ /* * The System Vectors * * In order that T2_HAE_ADDRESS should be a constant, we play * these games with GAMMA_BIAS. */ #if defined(CONFIG_ALPHA_GENERIC) || \ (defined(CONFIG_ALPHA_SABLE) && !defined(CONFIG_ALPHA_GAMMA)) #undef GAMMA_BIAS #define GAMMA_BIAS 0 struct alpha_machine_vector sable_mv __initmv = { .vector_name = "Sable", DO_EV4_MMU, DO_DEFAULT_RTC, DO_T2_IO, .machine_check = t2_machine_check, .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = T2_DEFAULT_MEM_BASE, .nr_irqs = 40, .device_interrupt = sable_lynx_srm_device_interrupt, .init_arch = t2_init_arch, .init_irq = sable_init_irq, .init_rtc = common_init_rtc, .init_pci = sable_lynx_init_pci, .kill_arch = t2_kill_arch, .pci_map_irq = sable_map_irq, .pci_swizzle = common_swizzle, .sys = { .t2 = { .gamma_bias = 0 } } }; ALIAS_MV(sable) #endif /* GENERIC || (SABLE && !GAMMA) */ #if defined(CONFIG_ALPHA_GENERIC) || \ (defined(CONFIG_ALPHA_SABLE) && defined(CONFIG_ALPHA_GAMMA)) #undef GAMMA_BIAS #define GAMMA_BIAS _GAMMA_BIAS struct alpha_machine_vector sable_gamma_mv __initmv = { .vector_name = "Sable-Gamma", DO_EV5_MMU, DO_DEFAULT_RTC, DO_T2_IO, .machine_check = t2_machine_check, .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = T2_DEFAULT_MEM_BASE, .nr_irqs = 40, .device_interrupt = sable_lynx_srm_device_interrupt, .init_arch = t2_init_arch, .init_irq = sable_init_irq, .init_rtc = common_init_rtc, .init_pci = sable_lynx_init_pci, .kill_arch = t2_kill_arch, .pci_map_irq = sable_map_irq, .pci_swizzle = common_swizzle, .sys = { .t2 = { .gamma_bias = _GAMMA_BIAS } } }; ALIAS_MV(sable_gamma) #endif /* GENERIC || (SABLE && GAMMA) */ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) #undef GAMMA_BIAS #define GAMMA_BIAS _GAMMA_BIAS struct alpha_machine_vector lynx_mv __initmv = { .vector_name = "Lynx", DO_EV4_MMU, DO_DEFAULT_RTC, DO_T2_IO, .machine_check = t2_machine_check, .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = T2_DEFAULT_MEM_BASE, .nr_irqs = 64, .device_interrupt = sable_lynx_srm_device_interrupt, .init_arch = t2_init_arch, .init_irq = lynx_init_irq, .init_rtc = common_init_rtc, .init_pci = sable_lynx_init_pci, .kill_arch = t2_kill_arch, .pci_map_irq = lynx_map_irq, .pci_swizzle = lynx_swizzle, .sys = { .t2 = { .gamma_bias = _GAMMA_BIAS } } }; ALIAS_MV(lynx) #endif /* GENERIC || LYNX */
gpl-2.0
ambikadash/linux-fqt
sound/soc/blackfin/bf5xx-sport.c
2273
28245
/* * File: bf5xx_sport.c * Based on: * Author: Roy Huang <roy.huang@analog.com> * * Created: Tue Sep 21 10:52:42 CEST 2004 * Description: * Blackfin SPORT Driver * * Copyright 2004-2007 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/bug.h> #include <linux/module.h> #include <asm/portmux.h> #include <asm/dma.h> #include <asm/blackfin.h> #include <asm/cacheflush.h> #include "bf5xx-sport.h" /* delay between frame sync pulse and first data bit in multichannel mode */ #define FRAME_DELAY (1<<12) /* note: multichannel is in units of 8 channels, * tdm_count is # channels NOT / 8 ! */ int sport_set_multichannel(struct sport_device *sport, int tdm_count, u32 tx_mask, u32 rx_mask, int packed) { pr_debug("%s tdm_count=%d tx_mask:0x%08x rx_mask:0x%08x packed=%d\n", __func__, tdm_count, tx_mask, rx_mask, packed); if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN)) return -EBUSY; if (tdm_count & 0x7) return -EINVAL; if (tdm_count > 32) return -EINVAL; /* Only support less than 32 channels now */ if (tdm_count) { sport->regs->mcmc1 = ((tdm_count>>3)-1) << 12; sport->regs->mcmc2 = FRAME_DELAY | MCMEN | \ (packed ? (MCDTXPE|MCDRXPE) : 0); sport->regs->mtcs0 = tx_mask; sport->regs->mrcs0 = rx_mask; sport->regs->mtcs1 = 0; sport->regs->mrcs1 = 0; sport->regs->mtcs2 = 0; sport->regs->mrcs2 = 0; sport->regs->mtcs3 = 0; sport->regs->mrcs3 = 0; } else { sport->regs->mcmc1 = 0; sport->regs->mcmc2 = 0; sport->regs->mtcs0 = 0; sport->regs->mrcs0 = 0; } sport->regs->mtcs1 = 0; sport->regs->mtcs2 = 0; sport->regs->mtcs3 = 0; sport->regs->mrcs1 = 0; sport->regs->mrcs2 = 0; sport->regs->mrcs3 = 0; SSYNC(); return 0; } EXPORT_SYMBOL(sport_set_multichannel); int sport_config_rx(struct sport_device *sport, unsigned int rcr1, unsigned int rcr2, unsigned int clkdiv, unsigned int fsdiv) { if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN)) return -EBUSY; sport->regs->rcr1 = rcr1; sport->regs->rcr2 = rcr2; sport->regs->rclkdiv = clkdiv; sport->regs->rfsdiv = fsdiv; SSYNC(); return 0; } EXPORT_SYMBOL(sport_config_rx); int sport_config_tx(struct sport_device *sport, unsigned int tcr1, unsigned int tcr2, unsigned int clkdiv, unsigned int fsdiv) { if ((sport->regs->tcr1 & TSPEN) || (sport->regs->rcr1 & RSPEN)) return -EBUSY; sport->regs->tcr1 = tcr1; sport->regs->tcr2 = tcr2; sport->regs->tclkdiv = clkdiv; sport->regs->tfsdiv = fsdiv; SSYNC(); return 0; } EXPORT_SYMBOL(sport_config_tx); static void setup_desc(struct dmasg *desc, void *buf, int fragcount, size_t fragsize, unsigned int cfg, unsigned int x_count, unsigned int ycount, size_t wdsize) { int i; for (i = 0; i < fragcount; ++i) { desc[i].next_desc_addr = &(desc[i + 1]); desc[i].start_addr = (unsigned long)buf + i*fragsize; desc[i].cfg = cfg; desc[i].x_count = x_count; desc[i].x_modify = wdsize; desc[i].y_count = ycount; desc[i].y_modify = wdsize; } /* make circular */ desc[fragcount-1].next_desc_addr = desc; pr_debug("setup desc: desc0=%p, next0=%p, desc1=%p," "next1=%p\nx_count=%x,y_count=%x,addr=0x%lx,cfs=0x%x\n", desc, desc[0].next_desc_addr, desc+1, desc[1].next_desc_addr, desc[0].x_count, desc[0].y_count, desc[0].start_addr, desc[0].cfg); } static int sport_start(struct sport_device *sport) { enable_dma(sport->dma_rx_chan); enable_dma(sport->dma_tx_chan); sport->regs->rcr1 |= RSPEN; sport->regs->tcr1 |= TSPEN; SSYNC(); return 0; } static int sport_stop(struct sport_device *sport) { sport->regs->tcr1 &= ~TSPEN; sport->regs->rcr1 &= ~RSPEN; SSYNC(); disable_dma(sport->dma_rx_chan); disable_dma(sport->dma_tx_chan); return 0; } static inline int sport_hook_rx_dummy(struct sport_device *sport) { struct dmasg *desc, temp_desc; unsigned long flags; if (WARN_ON(!sport->dummy_rx_desc) || WARN_ON(sport->curr_rx_desc == sport->dummy_rx_desc)) return -EINVAL; /* Maybe the dummy buffer descriptor ring is damaged */ sport->dummy_rx_desc->next_desc_addr = sport->dummy_rx_desc + 1; local_irq_save(flags); desc = get_dma_next_desc_ptr(sport->dma_rx_chan); /* Copy the descriptor which will be damaged to backup */ temp_desc = *desc; desc->x_count = sport->dummy_count / 2; desc->y_count = 0; desc->next_desc_addr = sport->dummy_rx_desc; local_irq_restore(flags); /* Waiting for dummy buffer descriptor is already hooked*/ while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) - sizeof(struct dmasg)) != sport->dummy_rx_desc) continue; sport->curr_rx_desc = sport->dummy_rx_desc; /* Restore the damaged descriptor */ *desc = temp_desc; return 0; } static inline int sport_rx_dma_start(struct sport_device *sport, int dummy) { if (dummy) { sport->dummy_rx_desc->next_desc_addr = sport->dummy_rx_desc; sport->curr_rx_desc = sport->dummy_rx_desc; } else sport->curr_rx_desc = sport->dma_rx_desc; set_dma_next_desc_addr(sport->dma_rx_chan, sport->curr_rx_desc); set_dma_x_count(sport->dma_rx_chan, 0); set_dma_x_modify(sport->dma_rx_chan, 0); set_dma_config(sport->dma_rx_chan, (DMAFLOW_LARGE | NDSIZE_9 | \ WDSIZE_32 | WNR)); set_dma_curr_addr(sport->dma_rx_chan, sport->curr_rx_desc->start_addr); SSYNC(); return 0; } static inline int sport_tx_dma_start(struct sport_device *sport, int dummy) { if (dummy) { sport->dummy_tx_desc->next_desc_addr = sport->dummy_tx_desc; sport->curr_tx_desc = sport->dummy_tx_desc; } else sport->curr_tx_desc = sport->dma_tx_desc; set_dma_next_desc_addr(sport->dma_tx_chan, sport->curr_tx_desc); set_dma_x_count(sport->dma_tx_chan, 0); set_dma_x_modify(sport->dma_tx_chan, 0); set_dma_config(sport->dma_tx_chan, (DMAFLOW_LARGE | NDSIZE_9 | WDSIZE_32)); set_dma_curr_addr(sport->dma_tx_chan, sport->curr_tx_desc->start_addr); SSYNC(); return 0; } int sport_rx_start(struct sport_device *sport) { unsigned long flags; pr_debug("%s enter\n", __func__); if (sport->rx_run) return -EBUSY; if (sport->tx_run) { /* tx is running, rx is not running */ if (WARN_ON(!sport->dma_rx_desc) || WARN_ON(sport->curr_rx_desc != sport->dummy_rx_desc)) return -EINVAL; local_irq_save(flags); while ((get_dma_curr_desc_ptr(sport->dma_rx_chan) - sizeof(struct dmasg)) != sport->dummy_rx_desc) continue; sport->dummy_rx_desc->next_desc_addr = sport->dma_rx_desc; local_irq_restore(flags); sport->curr_rx_desc = sport->dma_rx_desc; } else { sport_tx_dma_start(sport, 1); sport_rx_dma_start(sport, 0); sport_start(sport); } sport->rx_run = 1; return 0; } EXPORT_SYMBOL(sport_rx_start); int sport_rx_stop(struct sport_device *sport) { pr_debug("%s enter\n", __func__); if (!sport->rx_run) return 0; if (sport->tx_run) { /* TX dma is still running, hook the dummy buffer */ sport_hook_rx_dummy(sport); } else { /* Both rx and tx dma will be stopped */ sport_stop(sport); sport->curr_rx_desc = NULL; sport->curr_tx_desc = NULL; } sport->rx_run = 0; return 0; } EXPORT_SYMBOL(sport_rx_stop); static inline int sport_hook_tx_dummy(struct sport_device *sport) { struct dmasg *desc, temp_desc; unsigned long flags; if (WARN_ON(!sport->dummy_tx_desc) || WARN_ON(sport->curr_tx_desc == sport->dummy_tx_desc)) return -EINVAL; sport->dummy_tx_desc->next_desc_addr = sport->dummy_tx_desc + 1; /* Shorten the time on last normal descriptor */ local_irq_save(flags); desc = get_dma_next_desc_ptr(sport->dma_tx_chan); /* Store the descriptor which will be damaged */ temp_desc = *desc; desc->x_count = sport->dummy_count / 2; desc->y_count = 0; desc->next_desc_addr = sport->dummy_tx_desc; local_irq_restore(flags); /* Waiting for dummy buffer descriptor is already hooked*/ while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) - \ sizeof(struct dmasg)) != sport->dummy_tx_desc) continue; sport->curr_tx_desc = sport->dummy_tx_desc; /* Restore the damaged descriptor */ *desc = temp_desc; return 0; } int sport_tx_start(struct sport_device *sport) { unsigned long flags; pr_debug("%s: tx_run:%d, rx_run:%d\n", __func__, sport->tx_run, sport->rx_run); if (sport->tx_run) return -EBUSY; if (sport->rx_run) { if (WARN_ON(!sport->dma_tx_desc) || WARN_ON(sport->curr_tx_desc != sport->dummy_tx_desc)) return -EINVAL; /* Hook the normal buffer descriptor */ local_irq_save(flags); while ((get_dma_curr_desc_ptr(sport->dma_tx_chan) - sizeof(struct dmasg)) != sport->dummy_tx_desc) continue; sport->dummy_tx_desc->next_desc_addr = sport->dma_tx_desc; local_irq_restore(flags); sport->curr_tx_desc = sport->dma_tx_desc; } else { sport_tx_dma_start(sport, 0); /* Let rx dma run the dummy buffer */ sport_rx_dma_start(sport, 1); sport_start(sport); } sport->tx_run = 1; return 0; } EXPORT_SYMBOL(sport_tx_start); int sport_tx_stop(struct sport_device *sport) { if (!sport->tx_run) return 0; if (sport->rx_run) { /* RX is still running, hook the dummy buffer */ sport_hook_tx_dummy(sport); } else { /* Both rx and tx dma stopped */ sport_stop(sport); sport->curr_rx_desc = NULL; sport->curr_tx_desc = NULL; } sport->tx_run = 0; return 0; } EXPORT_SYMBOL(sport_tx_stop); static inline int compute_wdsize(size_t wdsize) { switch (wdsize) { case 1: return WDSIZE_8; case 2: return WDSIZE_16; case 4: default: return WDSIZE_32; } } int sport_config_rx_dma(struct sport_device *sport, void *buf, int fragcount, size_t fragsize) { unsigned int x_count; unsigned int y_count; unsigned int cfg; dma_addr_t addr; pr_debug("%s buf:%p, frag:%d, fragsize:0x%lx\n", __func__, \ buf, fragcount, fragsize); x_count = fragsize / sport->wdsize; y_count = 0; /* for fragments larger than 64k words we use 2d dma, * denote fragecount as two numbers' mutliply and both of them * are less than 64k.*/ if (x_count >= 0x10000) { int i, count = x_count; for (i = 16; i > 0; i--) { x_count = 1 << i; if ((count & (x_count - 1)) == 0) { y_count = count >> i; if (y_count < 0x10000) break; } } if (i == 0) return -EINVAL; } pr_debug("%s(x_count:0x%x, y_count:0x%x)\n", __func__, x_count, y_count); if (sport->dma_rx_desc) dma_free_coherent(NULL, sport->rx_desc_bytes, sport->dma_rx_desc, 0); /* Allocate a new descritor ring as current one. */ sport->dma_rx_desc = dma_alloc_coherent(NULL, \ fragcount * sizeof(struct dmasg), &addr, 0); sport->rx_desc_bytes = fragcount * sizeof(struct dmasg); if (!sport->dma_rx_desc) { pr_err("Failed to allocate memory for rx desc\n"); return -ENOMEM; } sport->rx_buf = buf; sport->rx_fragsize = fragsize; sport->rx_frags = fragcount; cfg = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | WNR | \ (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */ if (y_count != 0) cfg |= DMA2D; setup_desc(sport->dma_rx_desc, buf, fragcount, fragsize, cfg|DMAEN, x_count, y_count, sport->wdsize); return 0; } EXPORT_SYMBOL(sport_config_rx_dma); int sport_config_tx_dma(struct sport_device *sport, void *buf, \ int fragcount, size_t fragsize) { unsigned int x_count; unsigned int y_count; unsigned int cfg; dma_addr_t addr; pr_debug("%s buf:%p, fragcount:%d, fragsize:0x%lx\n", __func__, buf, fragcount, fragsize); x_count = fragsize/sport->wdsize; y_count = 0; /* for fragments larger than 64k words we use 2d dma, * denote fragecount as two numbers' mutliply and both of them * are less than 64k.*/ if (x_count >= 0x10000) { int i, count = x_count; for (i = 16; i > 0; i--) { x_count = 1 << i; if ((count & (x_count - 1)) == 0) { y_count = count >> i; if (y_count < 0x10000) break; } } if (i == 0) return -EINVAL; } pr_debug("%s x_count:0x%x, y_count:0x%x\n", __func__, x_count, y_count); if (sport->dma_tx_desc) { dma_free_coherent(NULL, sport->tx_desc_bytes, \ sport->dma_tx_desc, 0); } sport->dma_tx_desc = dma_alloc_coherent(NULL, \ fragcount * sizeof(struct dmasg), &addr, 0); sport->tx_desc_bytes = fragcount * sizeof(struct dmasg); if (!sport->dma_tx_desc) { pr_err("Failed to allocate memory for tx desc\n"); return -ENOMEM; } sport->tx_buf = buf; sport->tx_fragsize = fragsize; sport->tx_frags = fragcount; cfg = 0x7000 | DI_EN | compute_wdsize(sport->wdsize) | \ (DESC_ELEMENT_COUNT << 8); /* large descriptor mode */ if (y_count != 0) cfg |= DMA2D; setup_desc(sport->dma_tx_desc, buf, fragcount, fragsize, cfg|DMAEN, x_count, y_count, sport->wdsize); return 0; } EXPORT_SYMBOL(sport_config_tx_dma); /* setup dummy dma descriptor ring, which don't generate interrupts, * the x_modify is set to 0 */ static int sport_config_rx_dummy(struct sport_device *sport) { struct dmasg *desc; unsigned config; pr_debug("%s entered\n", __func__); if (L1_DATA_A_LENGTH) desc = l1_data_sram_zalloc(2 * sizeof(*desc)); else { dma_addr_t addr; desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0); memset(desc, 0, 2 * sizeof(*desc)); } if (desc == NULL) { pr_err("Failed to allocate memory for dummy rx desc\n"); return -ENOMEM; } sport->dummy_rx_desc = desc; desc->start_addr = (unsigned long)sport->dummy_buf; config = DMAFLOW_LARGE | NDSIZE_9 | compute_wdsize(sport->wdsize) | WNR | DMAEN; desc->cfg = config; desc->x_count = sport->dummy_count/sport->wdsize; desc->x_modify = sport->wdsize; desc->y_count = 0; desc->y_modify = 0; memcpy(desc+1, desc, sizeof(*desc)); desc->next_desc_addr = desc + 1; desc[1].next_desc_addr = desc; return 0; } static int sport_config_tx_dummy(struct sport_device *sport) { struct dmasg *desc; unsigned int config; pr_debug("%s entered\n", __func__); if (L1_DATA_A_LENGTH) desc = l1_data_sram_zalloc(2 * sizeof(*desc)); else { dma_addr_t addr; desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0); memset(desc, 0, 2 * sizeof(*desc)); } if (!desc) { pr_err("Failed to allocate memory for dummy tx desc\n"); return -ENOMEM; } sport->dummy_tx_desc = desc; desc->start_addr = (unsigned long)sport->dummy_buf + \ sport->dummy_count; config = DMAFLOW_LARGE | NDSIZE_9 | compute_wdsize(sport->wdsize) | DMAEN; desc->cfg = config; desc->x_count = sport->dummy_count/sport->wdsize; desc->x_modify = sport->wdsize; desc->y_count = 0; desc->y_modify = 0; memcpy(desc+1, desc, sizeof(*desc)); desc->next_desc_addr = desc + 1; desc[1].next_desc_addr = desc; return 0; } unsigned long sport_curr_offset_rx(struct sport_device *sport) { unsigned long curr = get_dma_curr_addr(sport->dma_rx_chan); return (unsigned char *)curr - sport->rx_buf; } EXPORT_SYMBOL(sport_curr_offset_rx); unsigned long sport_curr_offset_tx(struct sport_device *sport) { unsigned long curr = get_dma_curr_addr(sport->dma_tx_chan); return (unsigned char *)curr - sport->tx_buf; } EXPORT_SYMBOL(sport_curr_offset_tx); void sport_incfrag(struct sport_device *sport, int *frag, int tx) { ++(*frag); if (tx == 1 && *frag == sport->tx_frags) *frag = 0; if (tx == 0 && *frag == sport->rx_frags) *frag = 0; } EXPORT_SYMBOL(sport_incfrag); void sport_decfrag(struct sport_device *sport, int *frag, int tx) { --(*frag); if (tx == 1 && *frag == 0) *frag = sport->tx_frags; if (tx == 0 && *frag == 0) *frag = sport->rx_frags; } EXPORT_SYMBOL(sport_decfrag); static int sport_check_status(struct sport_device *sport, unsigned int *sport_stat, unsigned int *rx_stat, unsigned int *tx_stat) { int status = 0; if (sport_stat) { SSYNC(); status = sport->regs->stat; if (status & (TOVF|TUVF|ROVF|RUVF)) sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF)); SSYNC(); *sport_stat = status; } if (rx_stat) { SSYNC(); status = get_dma_curr_irqstat(sport->dma_rx_chan); if (status & (DMA_DONE|DMA_ERR)) clear_dma_irqstat(sport->dma_rx_chan); SSYNC(); *rx_stat = status; } if (tx_stat) { SSYNC(); status = get_dma_curr_irqstat(sport->dma_tx_chan); if (status & (DMA_DONE|DMA_ERR)) clear_dma_irqstat(sport->dma_tx_chan); SSYNC(); *tx_stat = status; } return 0; } int sport_dump_stat(struct sport_device *sport, char *buf, size_t len) { int ret; ret = snprintf(buf, len, "sts: 0x%04x\n" "rx dma %d sts: 0x%04x tx dma %d sts: 0x%04x\n", sport->regs->stat, sport->dma_rx_chan, get_dma_curr_irqstat(sport->dma_rx_chan), sport->dma_tx_chan, get_dma_curr_irqstat(sport->dma_tx_chan)); buf += ret; len -= ret; ret += snprintf(buf, len, "curr_rx_desc:0x%p, curr_tx_desc:0x%p\n" "dma_rx_desc:0x%p, dma_tx_desc:0x%p\n" "dummy_rx_desc:0x%p, dummy_tx_desc:0x%p\n", sport->curr_rx_desc, sport->curr_tx_desc, sport->dma_rx_desc, sport->dma_tx_desc, sport->dummy_rx_desc, sport->dummy_tx_desc); return ret; } static irqreturn_t rx_handler(int irq, void *dev_id) { unsigned int rx_stat; struct sport_device *sport = dev_id; pr_debug("%s enter\n", __func__); sport_check_status(sport, NULL, &rx_stat, NULL); if (!(rx_stat & DMA_DONE)) pr_err("rx dma is already stopped\n"); if (sport->rx_callback) { sport->rx_callback(sport->rx_data); return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t tx_handler(int irq, void *dev_id) { unsigned int tx_stat; struct sport_device *sport = dev_id; pr_debug("%s enter\n", __func__); sport_check_status(sport, NULL, NULL, &tx_stat); if (!(tx_stat & DMA_DONE)) { pr_err("tx dma is already stopped\n"); return IRQ_HANDLED; } if (sport->tx_callback) { sport->tx_callback(sport->tx_data); return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t err_handler(int irq, void *dev_id) { unsigned int status = 0; struct sport_device *sport = dev_id; pr_debug("%s\n", __func__); if (sport_check_status(sport, &status, NULL, NULL)) { pr_err("error checking status ??"); return IRQ_NONE; } if (status & (TOVF|TUVF|ROVF|RUVF)) { pr_info("sport status error:%s%s%s%s\n", status & TOVF ? " TOVF" : "", status & TUVF ? " TUVF" : "", status & ROVF ? " ROVF" : "", status & RUVF ? " RUVF" : ""); if (status & TOVF || status & TUVF) { disable_dma(sport->dma_tx_chan); if (sport->tx_run) sport_tx_dma_start(sport, 0); else sport_tx_dma_start(sport, 1); enable_dma(sport->dma_tx_chan); } else { disable_dma(sport->dma_rx_chan); if (sport->rx_run) sport_rx_dma_start(sport, 0); else sport_rx_dma_start(sport, 1); enable_dma(sport->dma_rx_chan); } } status = sport->regs->stat; if (status & (TOVF|TUVF|ROVF|RUVF)) sport->regs->stat = (status & (TOVF|TUVF|ROVF|RUVF)); SSYNC(); if (sport->err_callback) sport->err_callback(sport->err_data); return IRQ_HANDLED; } int sport_set_rx_callback(struct sport_device *sport, void (*rx_callback)(void *), void *rx_data) { if (WARN_ON(!rx_callback)) return -EINVAL; sport->rx_callback = rx_callback; sport->rx_data = rx_data; return 0; } EXPORT_SYMBOL(sport_set_rx_callback); int sport_set_tx_callback(struct sport_device *sport, void (*tx_callback)(void *), void *tx_data) { if (WARN_ON(!tx_callback)) return -EINVAL; sport->tx_callback = tx_callback; sport->tx_data = tx_data; return 0; } EXPORT_SYMBOL(sport_set_tx_callback); int sport_set_err_callback(struct sport_device *sport, void (*err_callback)(void *), void *err_data) { if (WARN_ON(!err_callback)) return -EINVAL; sport->err_callback = err_callback; sport->err_data = err_data; return 0; } EXPORT_SYMBOL(sport_set_err_callback); static int sport_config_pdev(struct platform_device *pdev, struct sport_param *param) { /* Extract settings from platform data */ struct device *dev = &pdev->dev; struct bfin_snd_platform_data *pdata = dev->platform_data; struct resource *res; param->num = pdev->id; if (!pdata) { dev_err(dev, "no platform_data\n"); return -ENODEV; } param->pin_req = pdata->pin_req; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "no MEM resource\n"); return -ENODEV; } param->regs = (struct sport_register *)res->start; /* first RX, then TX */ res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(dev, "no rx DMA resource\n"); return -ENODEV; } param->dma_rx_chan = res->start; res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!res) { dev_err(dev, "no tx DMA resource\n"); return -ENODEV; } param->dma_tx_chan = res->start; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "no irq resource\n"); return -ENODEV; } param->err_irq = res->start; return 0; } struct sport_device *sport_init(struct platform_device *pdev, unsigned int wdsize, unsigned int dummy_count, size_t priv_size) { struct device *dev = &pdev->dev; struct sport_param param; struct sport_device *sport; int ret; dev_dbg(dev, "%s enter\n", __func__); param.wdsize = wdsize; param.dummy_count = dummy_count; if (WARN_ON(param.wdsize == 0 || param.dummy_count == 0)) return NULL; ret = sport_config_pdev(pdev, &param); if (ret) return NULL; if (peripheral_request_list(param.pin_req, "soc-audio")) { dev_err(dev, "requesting Peripherals failed\n"); return NULL; } sport = kzalloc(sizeof(*sport), GFP_KERNEL); if (!sport) { dev_err(dev, "failed to allocate for sport device\n"); goto __init_err0; } sport->num = param.num; sport->dma_rx_chan = param.dma_rx_chan; sport->dma_tx_chan = param.dma_tx_chan; sport->err_irq = param.err_irq; sport->regs = param.regs; sport->pin_req = param.pin_req; if (request_dma(sport->dma_rx_chan, "SPORT RX Data") == -EBUSY) { dev_err(dev, "failed to request RX dma %d\n", sport->dma_rx_chan); goto __init_err1; } if (set_dma_callback(sport->dma_rx_chan, rx_handler, sport) != 0) { dev_err(dev, "failed to request RX irq %d\n", sport->dma_rx_chan); goto __init_err2; } if (request_dma(sport->dma_tx_chan, "SPORT TX Data") == -EBUSY) { dev_err(dev, "failed to request TX dma %d\n", sport->dma_tx_chan); goto __init_err2; } if (set_dma_callback(sport->dma_tx_chan, tx_handler, sport) != 0) { dev_err(dev, "failed to request TX irq %d\n", sport->dma_tx_chan); goto __init_err3; } if (request_irq(sport->err_irq, err_handler, IRQF_SHARED, "SPORT err", sport) < 0) { dev_err(dev, "failed to request err irq %d\n", sport->err_irq); goto __init_err3; } dev_info(dev, "dma rx:%d tx:%d, err irq:%d, regs:%p\n", sport->dma_rx_chan, sport->dma_tx_chan, sport->err_irq, sport->regs); sport->wdsize = param.wdsize; sport->dummy_count = param.dummy_count; sport->private_data = kzalloc(priv_size, GFP_KERNEL); if (!sport->private_data) { dev_err(dev, "could not alloc priv data %zu bytes\n", priv_size); goto __init_err4; } if (L1_DATA_A_LENGTH) sport->dummy_buf = l1_data_sram_zalloc(param.dummy_count * 2); else sport->dummy_buf = kzalloc(param.dummy_count * 2, GFP_KERNEL); if (sport->dummy_buf == NULL) { dev_err(dev, "failed to allocate dummy buffer\n"); goto __error1; } ret = sport_config_rx_dummy(sport); if (ret) { dev_err(dev, "failed to config rx dummy ring\n"); goto __error2; } ret = sport_config_tx_dummy(sport); if (ret) { dev_err(dev, "failed to config tx dummy ring\n"); goto __error3; } platform_set_drvdata(pdev, sport); return sport; __error3: if (L1_DATA_A_LENGTH) l1_data_sram_free(sport->dummy_rx_desc); else dma_free_coherent(NULL, 2*sizeof(struct dmasg), sport->dummy_rx_desc, 0); __error2: if (L1_DATA_A_LENGTH) l1_data_sram_free(sport->dummy_buf); else kfree(sport->dummy_buf); __error1: kfree(sport->private_data); __init_err4: free_irq(sport->err_irq, sport); __init_err3: free_dma(sport->dma_tx_chan); __init_err2: free_dma(sport->dma_rx_chan); __init_err1: kfree(sport); __init_err0: peripheral_free_list(param.pin_req); return NULL; } EXPORT_SYMBOL(sport_init); void sport_done(struct sport_device *sport) { if (sport == NULL) return; sport_stop(sport); if (sport->dma_rx_desc) dma_free_coherent(NULL, sport->rx_desc_bytes, sport->dma_rx_desc, 0); if (sport->dma_tx_desc) dma_free_coherent(NULL, sport->tx_desc_bytes, sport->dma_tx_desc, 0); #if L1_DATA_A_LENGTH != 0 l1_data_sram_free(sport->dummy_rx_desc); l1_data_sram_free(sport->dummy_tx_desc); l1_data_sram_free(sport->dummy_buf); #else dma_free_coherent(NULL, 2*sizeof(struct dmasg), sport->dummy_rx_desc, 0); dma_free_coherent(NULL, 2*sizeof(struct dmasg), sport->dummy_tx_desc, 0); kfree(sport->dummy_buf); #endif free_dma(sport->dma_rx_chan); free_dma(sport->dma_tx_chan); free_irq(sport->err_irq, sport); kfree(sport->private_data); peripheral_free_list(sport->pin_req); kfree(sport); } EXPORT_SYMBOL(sport_done); /* * It is only used to send several bytes when dma is not enabled * sport controller is configured but not enabled. * Multichannel cannot works with pio mode */ /* Used by ac97 to write and read codec register */ int sport_send_and_recv(struct sport_device *sport, u8 *out_data, \ u8 *in_data, int len) { unsigned short dma_config; unsigned short status; unsigned long flags; unsigned long wait = 0; pr_debug("%s enter, out_data:%p, in_data:%p len:%d\n", \ __func__, out_data, in_data, len); pr_debug("tcr1:0x%04x, tcr2:0x%04x, tclkdiv:0x%04x, tfsdiv:0x%04x\n" "mcmc1:0x%04x, mcmc2:0x%04x\n", sport->regs->tcr1, sport->regs->tcr2, sport->regs->tclkdiv, sport->regs->tfsdiv, sport->regs->mcmc1, sport->regs->mcmc2); flush_dcache_range((unsigned)out_data, (unsigned)(out_data + len)); /* Enable tx dma */ dma_config = (RESTART | WDSIZE_16 | DI_EN); set_dma_start_addr(sport->dma_tx_chan, (unsigned long)out_data); set_dma_x_count(sport->dma_tx_chan, len/2); set_dma_x_modify(sport->dma_tx_chan, 2); set_dma_config(sport->dma_tx_chan, dma_config); enable_dma(sport->dma_tx_chan); if (in_data != NULL) { invalidate_dcache_range((unsigned)in_data, \ (unsigned)(in_data + len)); /* Enable rx dma */ dma_config = (RESTART | WDSIZE_16 | WNR | DI_EN); set_dma_start_addr(sport->dma_rx_chan, (unsigned long)in_data); set_dma_x_count(sport->dma_rx_chan, len/2); set_dma_x_modify(sport->dma_rx_chan, 2); set_dma_config(sport->dma_rx_chan, dma_config); enable_dma(sport->dma_rx_chan); } local_irq_save(flags); sport->regs->tcr1 |= TSPEN; sport->regs->rcr1 |= RSPEN; SSYNC(); status = get_dma_curr_irqstat(sport->dma_tx_chan); while (status & DMA_RUN) { udelay(1); status = get_dma_curr_irqstat(sport->dma_tx_chan); pr_debug("DMA status:0x%04x\n", status); if (wait++ > 100) goto __over; } status = sport->regs->stat; wait = 0; while (!(status & TXHRE)) { pr_debug("sport status:0x%04x\n", status); udelay(1); status = *(unsigned short *)&sport->regs->stat; if (wait++ > 1000) goto __over; } /* Wait for the last byte sent out */ udelay(20); pr_debug("sport status:0x%04x\n", status); __over: sport->regs->tcr1 &= ~TSPEN; sport->regs->rcr1 &= ~RSPEN; SSYNC(); disable_dma(sport->dma_tx_chan); /* Clear the status */ clear_dma_irqstat(sport->dma_tx_chan); if (in_data != NULL) { disable_dma(sport->dma_rx_chan); clear_dma_irqstat(sport->dma_rx_chan); } SSYNC(); local_irq_restore(flags); return 0; } EXPORT_SYMBOL(sport_send_and_recv); MODULE_AUTHOR("Roy Huang"); MODULE_DESCRIPTION("SPORT driver for ADI Blackfin"); MODULE_LICENSE("GPL");
gpl-2.0
carvsdriver/android_kernel_samsung_n5110-common
drivers/lguest/lguest_user.c
2529
15746
/*P:200 This contains all the /dev/lguest code, whereby the userspace launcher * controls and communicates with the Guest. For example, the first write will * tell us the Guest's memory layout and entry point. A read will run the * Guest until something happens, such as a signal or the Guest doing a NOTIFY * out to the Launcher. :*/ #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/eventfd.h> #include <linux/file.h> #include <linux/slab.h> #include "lg.h" /*L:056 * Before we move on, let's jump ahead and look at what the kernel does when * it needs to look up the eventfds. That will complete our picture of how we * use RCU. * * The notification value is in cpu->pending_notify: we return true if it went * to an eventfd. */ bool send_notify_to_eventfd(struct lg_cpu *cpu) { unsigned int i; struct lg_eventfd_map *map; /* * This "rcu_read_lock()" helps track when someone is still looking at * the (RCU-using) eventfds array. It's not actually a lock at all; * indeed it's a noop in many configurations. (You didn't expect me to * explain all the RCU secrets here, did you?) */ rcu_read_lock(); /* * rcu_dereference is the counter-side of rcu_assign_pointer(); it * makes sure we don't access the memory pointed to by * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy, * but Alpha allows this! Paul McKenney points out that a really * aggressive compiler could have the same effect: * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html * * So play safe, use rcu_dereference to get the rcu-protected pointer: */ map = rcu_dereference(cpu->lg->eventfds); /* * Simple array search: even if they add an eventfd while we do this, * we'll continue to use the old array and just won't see the new one. */ for (i = 0; i < map->num; i++) { if (map->map[i].addr == cpu->pending_notify) { eventfd_signal(map->map[i].event, 1); cpu->pending_notify = 0; break; } } /* We're done with the rcu-protected variable cpu->lg->eventfds. */ rcu_read_unlock(); /* If we cleared the notification, it's because we found a match. */ return cpu->pending_notify == 0; } /*L:055 * One of the more tricksy tricks in the Linux Kernel is a technique called * Read Copy Update. Since one point of lguest is to teach lguest journeyers * about kernel coding, I use it here. (In case you're curious, other purposes * include learning about virtualization and instilling a deep appreciation for * simplicity and puppies). * * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we * add new eventfds without ever blocking readers from accessing the array. * The current Launcher only does this during boot, so that never happens. But * Read Copy Update is cool, and adding a lock risks damaging even more puppies * than this code does. * * We allocate a brand new one-larger array, copy the old one and add our new * element. Then we make the lg eventfd pointer point to the new array. * That's the easy part: now we need to free the old one, but we need to make * sure no slow CPU somewhere is still looking at it. That's what * synchronize_rcu does for us: waits until every CPU has indicated that it has * moved on to know it's no longer using the old one. * * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update. */ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) { struct lg_eventfd_map *new, *old = lg->eventfds; /* * We don't allow notifications on value 0 anyway (pending_notify of * 0 means "nothing pending"). */ if (!addr) return -EINVAL; /* * Replace the old array with the new one, carefully: others can * be accessing it at the same time. */ new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), GFP_KERNEL); if (!new) return -ENOMEM; /* First make identical copy. */ memcpy(new->map, old->map, sizeof(old->map[0]) * old->num); new->num = old->num; /* Now append new entry. */ new->map[new->num].addr = addr; new->map[new->num].event = eventfd_ctx_fdget(fd); if (IS_ERR(new->map[new->num].event)) { int err = PTR_ERR(new->map[new->num].event); kfree(new); return err; } new->num++; /* * Now put new one in place: rcu_assign_pointer() is a fancy way of * doing "lg->eventfds = new", but it uses memory barriers to make * absolutely sure that the contents of "new" written above is nailed * down before we actually do the assignment. * * We have to think about these kinds of things when we're operating on * live data without locks. */ rcu_assign_pointer(lg->eventfds, new); /* * We're not in a big hurry. Wait until no one's looking at old * version, then free it. */ synchronize_rcu(); kfree(old); return 0; } /*L:052 * Receiving notifications from the Guest is usually done by attaching a * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will * become readable when the Guest does an LHCALL_NOTIFY with that value. * * This is really convenient for processing each virtqueue in a separate * thread. */ static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) { unsigned long addr, fd; int err; if (get_user(addr, input) != 0) return -EFAULT; input++; if (get_user(fd, input) != 0) return -EFAULT; /* * Just make sure two callers don't add eventfds at once. We really * only need to lock against callers adding to the same Guest, so using * the Big Lguest Lock is overkill. But this is setup, not a fast path. */ mutex_lock(&lguest_lock); err = add_eventfd(lg, addr, fd); mutex_unlock(&lguest_lock); return err; } /*L:050 * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt * number to /dev/lguest. */ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) { unsigned long irq; if (get_user(irq, input) != 0) return -EFAULT; if (irq >= LGUEST_IRQS) return -EINVAL; /* * Next time the Guest runs, the core code will see if it can deliver * this interrupt. */ set_interrupt(cpu, irq); return 0; } /*L:040 * Once our Guest is initialized, the Launcher makes it run by reading * from /dev/lguest. */ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) { struct lguest *lg = file->private_data; struct lg_cpu *cpu; unsigned int cpu_id = *o; /* You must write LHREQ_INITIALIZE first! */ if (!lg) return -EINVAL; /* Watch out for arbitrary vcpu indexes! */ if (cpu_id >= lg->nr_cpus) return -EINVAL; cpu = &lg->cpus[cpu_id]; /* If you're not the task which owns the Guest, go away. */ if (current != cpu->tsk) return -EPERM; /* If the Guest is already dead, we indicate why */ if (lg->dead) { size_t len; /* lg->dead either contains an error code, or a string. */ if (IS_ERR(lg->dead)) return PTR_ERR(lg->dead); /* We can only return as much as the buffer they read with. */ len = min(size, strlen(lg->dead)+1); if (copy_to_user(user, lg->dead, len) != 0) return -EFAULT; return len; } /* * If we returned from read() last time because the Guest sent I/O, * clear the flag. */ if (cpu->pending_notify) cpu->pending_notify = 0; /* Run the Guest until something interesting happens. */ return run_guest(cpu, (unsigned long __user *)user); } /*L:025 * This actually initializes a CPU. For the moment, a Guest is only * uniprocessor, so "id" is always 0. */ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) { /* We have a limited number the number of CPUs in the lguest struct. */ if (id >= ARRAY_SIZE(cpu->lg->cpus)) return -EINVAL; /* Set up this CPU's id, and pointer back to the lguest struct. */ cpu->id = id; cpu->lg = container_of((cpu - id), struct lguest, cpus[0]); cpu->lg->nr_cpus++; /* Each CPU has a timer it can set. */ init_clockdev(cpu); /* * We need a complete page for the Guest registers: they are accessible * to the Guest and we can only grant it access to whole pages. */ cpu->regs_page = get_zeroed_page(GFP_KERNEL); if (!cpu->regs_page) return -ENOMEM; /* We actually put the registers at the bottom of the page. */ cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs); /* * Now we initialize the Guest's registers, handing it the start * address. */ lguest_arch_setup_regs(cpu, start_ip); /* * We keep a pointer to the Launcher task (ie. current task) for when * other Guests want to wake this one (eg. console input). */ cpu->tsk = current; /* * We need to keep a pointer to the Launcher's memory map, because if * the Launcher dies we need to clean it up. If we don't keep a * reference, it is destroyed before close() is called. */ cpu->mm = get_task_mm(cpu->tsk); /* * We remember which CPU's pages this Guest used last, for optimization * when the same Guest runs on the same CPU twice. */ cpu->last_pages = NULL; /* No error == success. */ return 0; } /*L:020 * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in * addition to the LHREQ_INITIALIZE value). These are: * * base: The start of the Guest-physical memory inside the Launcher memory. * * pfnlimit: The highest (Guest-physical) page number the Guest should be * allowed to access. The Guest memory lives inside the Launcher, so it sets * this to ensure the Guest can only reach its own memory. * * start: The first instruction to execute ("eip" in x86-speak). */ static int initialize(struct file *file, const unsigned long __user *input) { /* "struct lguest" contains all we (the Host) know about a Guest. */ struct lguest *lg; int err; unsigned long args[3]; /* * We grab the Big Lguest lock, which protects against multiple * simultaneous initializations. */ mutex_lock(&lguest_lock); /* You can't initialize twice! Close the device and start again... */ if (file->private_data) { err = -EBUSY; goto unlock; } if (copy_from_user(args, input, sizeof(args)) != 0) { err = -EFAULT; goto unlock; } lg = kzalloc(sizeof(*lg), GFP_KERNEL); if (!lg) { err = -ENOMEM; goto unlock; } lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL); if (!lg->eventfds) { err = -ENOMEM; goto free_lg; } lg->eventfds->num = 0; /* Populate the easy fields of our "struct lguest" */ lg->mem_base = (void __user *)args[0]; lg->pfn_limit = args[1]; /* This is the first cpu (cpu 0) and it will start booting at args[2] */ err = lg_cpu_start(&lg->cpus[0], 0, args[2]); if (err) goto free_eventfds; /* * Initialize the Guest's shadow page tables, using the toplevel * address the Launcher gave us. This allocates memory, so can fail. */ err = init_guest_pagetable(lg); if (err) goto free_regs; /* We keep our "struct lguest" in the file's private_data. */ file->private_data = lg; mutex_unlock(&lguest_lock); /* And because this is a write() call, we return the length used. */ return sizeof(args); free_regs: /* FIXME: This should be in free_vcpu */ free_page(lg->cpus[0].regs_page); free_eventfds: kfree(lg->eventfds); free_lg: kfree(lg); unlock: mutex_unlock(&lguest_lock); return err; } /*L:010 * The first operation the Launcher does must be a write. All writes * start with an unsigned long number: for the first write this must be * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use * writes of other values to send interrupts or set up receipt of notifications. * * Note that we overload the "offset" in the /dev/lguest file to indicate what * CPU number we're dealing with. Currently this is always 0 since we only * support uniprocessor Guests, but you can see the beginnings of SMP support * here. */ static ssize_t write(struct file *file, const char __user *in, size_t size, loff_t *off) { /* * Once the Guest is initialized, we hold the "struct lguest" in the * file private data. */ struct lguest *lg = file->private_data; const unsigned long __user *input = (const unsigned long __user *)in; unsigned long req; struct lg_cpu *uninitialized_var(cpu); unsigned int cpu_id = *off; /* The first value tells us what this request is. */ if (get_user(req, input) != 0) return -EFAULT; input++; /* If you haven't initialized, you must do that first. */ if (req != LHREQ_INITIALIZE) { if (!lg || (cpu_id >= lg->nr_cpus)) return -EINVAL; cpu = &lg->cpus[cpu_id]; /* Once the Guest is dead, you can only read() why it died. */ if (lg->dead) return -ENOENT; } switch (req) { case LHREQ_INITIALIZE: return initialize(file, input); case LHREQ_IRQ: return user_send_irq(cpu, input); case LHREQ_EVENTFD: return attach_eventfd(lg, input); default: return -EINVAL; } } /*L:060 * The final piece of interface code is the close() routine. It reverses * everything done in initialize(). This is usually called because the * Launcher exited. * * Note that the close routine returns 0 or a negative error number: it can't * really fail, but it can whine. I blame Sun for this wart, and K&R C for * letting them do it. :*/ static int close(struct inode *inode, struct file *file) { struct lguest *lg = file->private_data; unsigned int i; /* If we never successfully initialized, there's nothing to clean up */ if (!lg) return 0; /* * We need the big lock, to protect from inter-guest I/O and other * Launchers initializing guests. */ mutex_lock(&lguest_lock); /* Free up the shadow page tables for the Guest. */ free_guest_pagetable(lg); for (i = 0; i < lg->nr_cpus; i++) { /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */ hrtimer_cancel(&lg->cpus[i].hrt); /* We can free up the register page we allocated. */ free_page(lg->cpus[i].regs_page); /* * Now all the memory cleanups are done, it's safe to release * the Launcher's memory management structure. */ mmput(lg->cpus[i].mm); } /* Release any eventfds they registered. */ for (i = 0; i < lg->eventfds->num; i++) eventfd_ctx_put(lg->eventfds->map[i].event); kfree(lg->eventfds); /* * If lg->dead doesn't contain an error code it will be NULL or a * kmalloc()ed string, either of which is ok to hand to kfree(). */ if (!IS_ERR(lg->dead)) kfree(lg->dead); /* Free the memory allocated to the lguest_struct */ kfree(lg); /* Release lock and exit. */ mutex_unlock(&lguest_lock); return 0; } /*L:000 * Welcome to our journey through the Launcher! * * The Launcher is the Host userspace program which sets up, runs and services * the Guest. In fact, many comments in the Drivers which refer to "the Host" * doing things are inaccurate: the Launcher does all the device handling for * the Guest, but the Guest can't know that. * * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we * shall see more of that later. * * We begin our understanding with the Host kernel interface which the Launcher * uses: reading and writing a character device called /dev/lguest. All the * work happens in the read(), write() and close() routines: */ static const struct file_operations lguest_fops = { .owner = THIS_MODULE, .release = close, .write = write, .read = read, .llseek = default_llseek, }; /* * This is a textbook example of a "misc" character device. Populate a "struct * miscdevice" and register it with misc_register(). */ static struct miscdevice lguest_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "lguest", .fops = &lguest_fops, }; int __init lguest_device_init(void) { return misc_register(&lguest_dev); } void __exit lguest_device_remove(void) { misc_deregister(&lguest_dev); }
gpl-2.0
yu-validus/kernel_cyanogen_msm8916
drivers/net/wireless/rtlwifi/rtl8188ee/led.c
2785
4447
/****************************************************************************** * * Copyright(c) 2009-2013 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../pci.h" #include "reg.h" #include "led.h" static void rtl88ee_init_led(struct ieee80211_hw *hw, struct rtl_led *pled, enum rtl_led_pin ledpin) { pled->hw = hw; pled->ledpin = ledpin; pled->ledon = false; } void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) { u8 ledcfg; struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin); switch (pled->ledpin) { case LED_PIN_GPIO0: break; case LED_PIN_LED0: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6)); break; case LED_PIN_LED1: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } pled->ledon = true; } void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; u8 val; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin); switch (pled->ledpin) { case LED_PIN_GPIO0: break; case LED_PIN_LED0: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); ledcfg &= 0xf0; val = ledcfg | BIT(3) | BIT(5) | BIT(6); if (pcipriv->ledctl.led_opendrain == true) { rtl_write_byte(rtlpriv, REG_LEDCFG2, val); ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); val = ledcfg & 0xFE; rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, val); } else { rtl_write_byte(rtlpriv, REG_LEDCFG2, val); } break; case LED_PIN_LED1: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); ledcfg &= 0x10; rtl_write_byte(rtlpriv, REG_LEDCFG1, (ledcfg | BIT(3))); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } pled->ledon = false; } void rtl88ee_init_sw_leds(struct ieee80211_hw *hw) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); rtl88ee_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); rtl88ee_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); } static void rtl88ee_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: case LED_CTL_NO_LINK: rtl88ee_sw_led_on(hw, pLed0); break; case LED_CTL_POWER_OFF: rtl88ee_sw_led_off(hw, pLed0); break; default: break; } } void rtl88ee_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) && (ledaction == LED_CTL_TX || ledaction == LED_CTL_RX || ledaction == LED_CTL_SITE_SURVEY || ledaction == LED_CTL_LINK || ledaction == LED_CTL_NO_LINK || ledaction == LED_CTL_START_TO_LINK || ledaction == LED_CTL_POWER_ON)) { return; } RT_TRACE(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n", ledaction); rtl88ee_sw_led_control(hw, ledaction); }
gpl-2.0
yangjoo/kernel_samsung_smdk4412
drivers/media/dvb/frontends/stv0288.c
2785
13891
/* Driver for ST STV0288 demodulator Copyright (C) 2006 Georg Acher, BayCom GmbH, acher (at) baycom (dot) de for Reel Multimedia Copyright (C) 2008 TurboSight.com, Bob Liu <bob@turbosight.com> Copyright (C) 2008 Igor M. Liplianin <liplianin@me.by> Removed stb6000 specific tuner code and revised some procedures. 2010-09-01 Josef Pavlik <josef@pavlik.it> Fixed diseqc_msg, diseqc_burst and set_tone problems This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <asm/div64.h> #include "dvb_frontend.h" #include "stv0288.h" struct stv0288_state { struct i2c_adapter *i2c; const struct stv0288_config *config; struct dvb_frontend frontend; u8 initialised:1; u32 tuner_frequency; u32 symbol_rate; fe_code_rate_t fec_inner; int errmode; }; #define STATUS_BER 0 #define STATUS_UCBLOCKS 1 static int debug; static int debug_legacy_dish_switch; #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_DEBUG "stv0288: " args); \ } while (0) static int stv0288_writeregI(struct stv0288_state *state, u8 reg, u8 data) { int ret; u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, " "ret == %i)\n", __func__, reg, data, ret); return (ret != 1) ? -EREMOTEIO : 0; } static int stv0288_write(struct dvb_frontend *fe, const u8 buf[], int len) { struct stv0288_state *state = fe->demodulator_priv; if (len != 2) return -EINVAL; return stv0288_writeregI(state, buf[0], buf[1]); } static u8 stv0288_readreg(struct stv0288_state *state, u8 reg) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n", __func__, reg, ret); return b1[0]; } static int stv0288_set_symbolrate(struct dvb_frontend *fe, u32 srate) { struct stv0288_state *state = fe->demodulator_priv; unsigned int temp; unsigned char b[3]; if ((srate < 1000000) || (srate > 45000000)) return -EINVAL; temp = (unsigned int)srate / 1000; temp = temp * 32768; temp = temp / 25; temp = temp / 125; b[0] = (unsigned char)((temp >> 12) & 0xff); b[1] = (unsigned char)((temp >> 4) & 0xff); b[2] = (unsigned char)((temp << 4) & 0xf0); stv0288_writeregI(state, 0x28, 0x80); /* SFRH */ stv0288_writeregI(state, 0x29, 0); /* SFRM */ stv0288_writeregI(state, 0x2a, 0); /* SFRL */ stv0288_writeregI(state, 0x28, b[0]); stv0288_writeregI(state, 0x29, b[1]); stv0288_writeregI(state, 0x2a, b[2]); dprintk("stv0288: stv0288_set_symbolrate\n"); return 0; } static int stv0288_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *m) { struct stv0288_state *state = fe->demodulator_priv; int i; dprintk("%s\n", __func__); stv0288_writeregI(state, 0x09, 0); msleep(30); stv0288_writeregI(state, 0x05, 0x12);/* modulated mode, single shot */ for (i = 0; i < m->msg_len; i++) { if (stv0288_writeregI(state, 0x06, m->msg[i])) return -EREMOTEIO; } msleep(m->msg_len*12); return 0; } static int stv0288_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst) { struct stv0288_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); if (stv0288_writeregI(state, 0x05, 0x03))/* burst mode, single shot */ return -EREMOTEIO; if (stv0288_writeregI(state, 0x06, burst == SEC_MINI_A ? 0x00 : 0xff)) return -EREMOTEIO; msleep(15); if (stv0288_writeregI(state, 0x05, 0x12)) return -EREMOTEIO; return 0; } static int stv0288_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct stv0288_state *state = fe->demodulator_priv; switch (tone) { case SEC_TONE_ON: if (stv0288_writeregI(state, 0x05, 0x10))/* cont carrier */ return -EREMOTEIO; break; case SEC_TONE_OFF: if (stv0288_writeregI(state, 0x05, 0x12))/* burst mode off*/ return -EREMOTEIO; break; default: return -EINVAL; } return 0; } static u8 stv0288_inittab[] = { 0x01, 0x15, 0x02, 0x20, 0x09, 0x0, 0x0a, 0x4, 0x0b, 0x0, 0x0c, 0x0, 0x0d, 0x0, 0x0e, 0xd4, 0x0f, 0x30, 0x11, 0x80, 0x12, 0x03, 0x13, 0x48, 0x14, 0x84, 0x15, 0x45, 0x16, 0xb7, 0x17, 0x9c, 0x18, 0x0, 0x19, 0xa6, 0x1a, 0x88, 0x1b, 0x8f, 0x1c, 0xf0, 0x20, 0x0b, 0x21, 0x54, 0x22, 0x0, 0x23, 0x0, 0x2b, 0xff, 0x2c, 0xf7, 0x30, 0x0, 0x31, 0x1e, 0x32, 0x14, 0x33, 0x0f, 0x34, 0x09, 0x35, 0x0c, 0x36, 0x05, 0x37, 0x2f, 0x38, 0x16, 0x39, 0xbe, 0x3a, 0x0, 0x3b, 0x13, 0x3c, 0x11, 0x3d, 0x30, 0x40, 0x63, 0x41, 0x04, 0x42, 0x20, 0x43, 0x00, 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00, 0x4a, 0x00, 0x50, 0x10, 0x51, 0x38, 0x52, 0x21, 0x58, 0x54, 0x59, 0x86, 0x5a, 0x0, 0x5b, 0x9b, 0x5c, 0x08, 0x5d, 0x7f, 0x5e, 0x0, 0x5f, 0xff, 0x70, 0x0, 0x71, 0x0, 0x72, 0x0, 0x74, 0x0, 0x75, 0x0, 0x76, 0x0, 0x81, 0x0, 0x82, 0x3f, 0x83, 0x3f, 0x84, 0x0, 0x85, 0x0, 0x88, 0x0, 0x89, 0x0, 0x8a, 0x0, 0x8b, 0x0, 0x8c, 0x0, 0x90, 0x0, 0x91, 0x0, 0x92, 0x0, 0x93, 0x0, 0x94, 0x1c, 0x97, 0x0, 0xa0, 0x48, 0xa1, 0x0, 0xb0, 0xb8, 0xb1, 0x3a, 0xb2, 0x10, 0xb3, 0x82, 0xb4, 0x80, 0xb5, 0x82, 0xb6, 0x82, 0xb7, 0x82, 0xb8, 0x20, 0xb9, 0x0, 0xf0, 0x0, 0xf1, 0x0, 0xf2, 0xc0, 0x51, 0x36, 0x52, 0x09, 0x53, 0x94, 0x54, 0x62, 0x55, 0x29, 0x56, 0x64, 0x57, 0x2b, 0xff, 0xff, }; static int stv0288_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt) { dprintk("%s: %s\n", __func__, volt == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" : volt == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : "??"); return 0; } static int stv0288_init(struct dvb_frontend *fe) { struct stv0288_state *state = fe->demodulator_priv; int i; u8 reg; u8 val; dprintk("stv0288: init chip\n"); stv0288_writeregI(state, 0x41, 0x04); msleep(50); /* we have default inittab */ if (state->config->inittab == NULL) { for (i = 0; !(stv0288_inittab[i] == 0xff && stv0288_inittab[i + 1] == 0xff); i += 2) stv0288_writeregI(state, stv0288_inittab[i], stv0288_inittab[i + 1]); } else { for (i = 0; ; i += 2) { reg = state->config->inittab[i]; val = state->config->inittab[i+1]; if (reg == 0xff && val == 0xff) break; stv0288_writeregI(state, reg, val); } } return 0; } static int stv0288_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct stv0288_state *state = fe->demodulator_priv; u8 sync = stv0288_readreg(state, 0x24); if (sync == 255) sync = 0; dprintk("%s : FE_READ_STATUS : VSTATUS: 0x%02x\n", __func__, sync); *status = 0; if (sync & 0x80) *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL; if (sync & 0x10) *status |= FE_HAS_VITERBI; if (sync & 0x08) { *status |= FE_HAS_LOCK; dprintk("stv0288 has locked\n"); } return 0; } static int stv0288_read_ber(struct dvb_frontend *fe, u32 *ber) { struct stv0288_state *state = fe->demodulator_priv; if (state->errmode != STATUS_BER) return 0; *ber = (stv0288_readreg(state, 0x26) << 8) | stv0288_readreg(state, 0x27); dprintk("stv0288_read_ber %d\n", *ber); return 0; } static int stv0288_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct stv0288_state *state = fe->demodulator_priv; s32 signal = 0xffff - ((stv0288_readreg(state, 0x10) << 8)); signal = signal * 5 / 4; *strength = (signal > 0xffff) ? 0xffff : (signal < 0) ? 0 : signal; dprintk("stv0288_read_signal_strength %d\n", *strength); return 0; } static int stv0288_sleep(struct dvb_frontend *fe) { struct stv0288_state *state = fe->demodulator_priv; stv0288_writeregI(state, 0x41, 0x84); state->initialised = 0; return 0; } static int stv0288_read_snr(struct dvb_frontend *fe, u16 *snr) { struct stv0288_state *state = fe->demodulator_priv; s32 xsnr = 0xffff - ((stv0288_readreg(state, 0x2d) << 8) | stv0288_readreg(state, 0x2e)); xsnr = 3 * (xsnr - 0xa100); *snr = (xsnr > 0xffff) ? 0xffff : (xsnr < 0) ? 0 : xsnr; dprintk("stv0288_read_snr %d\n", *snr); return 0; } static int stv0288_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct stv0288_state *state = fe->demodulator_priv; if (state->errmode != STATUS_BER) return 0; *ucblocks = (stv0288_readreg(state, 0x26) << 8) | stv0288_readreg(state, 0x27); dprintk("stv0288_read_ber %d\n", *ucblocks); return 0; } static int stv0288_set_property(struct dvb_frontend *fe, struct dtv_property *p) { dprintk("%s(..)\n", __func__); return 0; } static int stv0288_get_property(struct dvb_frontend *fe, struct dtv_property *p) { dprintk("%s(..)\n", __func__); return 0; } static int stv0288_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *dfp) { struct stv0288_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; char tm; unsigned char tda[3]; dprintk("%s : FE_SET_FRONTEND\n", __func__); if (c->delivery_system != SYS_DVBS) { dprintk("%s: unsupported delivery " "system selected (%d)\n", __func__, c->delivery_system); return -EOPNOTSUPP; } if (state->config->set_ts_params) state->config->set_ts_params(fe, 0); /* only frequency & symbol_rate are used for tuner*/ dfp->frequency = c->frequency; dfp->u.qpsk.symbol_rate = c->symbol_rate; if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, dfp); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } udelay(10); stv0288_set_symbolrate(fe, c->symbol_rate); /* Carrier lock control register */ stv0288_writeregI(state, 0x15, 0xc5); tda[0] = 0x2b; /* CFRM */ tda[2] = 0x0; /* CFRL */ for (tm = -6; tm < 7;) { /* Viterbi status */ if (stv0288_readreg(state, 0x24) & 0x8) break; tda[2] += 40; if (tda[2] < 40) tm++; tda[1] = (unsigned char)tm; stv0288_writeregI(state, 0x2b, tda[1]); stv0288_writeregI(state, 0x2c, tda[2]); udelay(30); } state->tuner_frequency = c->frequency; state->fec_inner = FEC_AUTO; state->symbol_rate = c->symbol_rate; return 0; } static int stv0288_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct stv0288_state *state = fe->demodulator_priv; if (enable) stv0288_writeregI(state, 0x01, 0xb5); else stv0288_writeregI(state, 0x01, 0x35); udelay(1); return 0; } static void stv0288_release(struct dvb_frontend *fe) { struct stv0288_state *state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops stv0288_ops = { .info = { .name = "ST STV0288 DVB-S", .type = FE_QPSK, .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 1000, /* kHz for QPSK frontends */ .frequency_tolerance = 0, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .symbol_rate_tolerance = 500, /* ppm */ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_QPSK | FE_CAN_FEC_AUTO }, .release = stv0288_release, .init = stv0288_init, .sleep = stv0288_sleep, .write = stv0288_write, .i2c_gate_ctrl = stv0288_i2c_gate_ctrl, .read_status = stv0288_read_status, .read_ber = stv0288_read_ber, .read_signal_strength = stv0288_read_signal_strength, .read_snr = stv0288_read_snr, .read_ucblocks = stv0288_read_ucblocks, .diseqc_send_master_cmd = stv0288_send_diseqc_msg, .diseqc_send_burst = stv0288_send_diseqc_burst, .set_tone = stv0288_set_tone, .set_voltage = stv0288_set_voltage, .set_property = stv0288_set_property, .get_property = stv0288_get_property, .set_frontend = stv0288_set_frontend, }; struct dvb_frontend *stv0288_attach(const struct stv0288_config *config, struct i2c_adapter *i2c) { struct stv0288_state *state = NULL; int id; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct stv0288_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; state->initialised = 0; state->tuner_frequency = 0; state->symbol_rate = 0; state->fec_inner = 0; state->errmode = STATUS_BER; stv0288_writeregI(state, 0x41, 0x04); msleep(200); id = stv0288_readreg(state, 0x00); dprintk("stv0288 id %x\n", id); /* register 0x00 contains 0x11 for STV0288 */ if (id != 0x11) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &stv0288_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(stv0288_attach); module_param(debug_legacy_dish_switch, int, 0444); MODULE_PARM_DESC(debug_legacy_dish_switch, "Enable timing analysis for Dish Network legacy switches"); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("ST STV0288 DVB Demodulator driver"); MODULE_AUTHOR("Georg Acher, Bob Liu, Igor liplianin"); MODULE_LICENSE("GPL");
gpl-2.0
smac0628/kernel-htc-m8-gpe-stock
arch/powerpc/platforms/pseries/eeh_driver.c
3041
15614
/* * PCI Error Recovery Driver for RPA-compliant PPC64 platform. * Copyright IBM Corp. 2004 2005 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005 * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com> */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pci.h> #include <asm/eeh.h> #include <asm/eeh_event.h> #include <asm/ppc-pci.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #include <asm/rtas.h> /** * eeh_pcid_name - Retrieve name of PCI device driver * @pdev: PCI device * * This routine is used to retrieve the name of PCI device driver * if that's valid. */ static inline const char *eeh_pcid_name(struct pci_dev *pdev) { if (pdev && pdev->dev.driver) return pdev->dev.driver->name; return ""; } #if 0 static void print_device_node_tree(struct pci_dn *pdn, int dent) { int i; struct device_node *pc; if (!pdn) return; for (i = 0; i < dent; i++) printk(" "); printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n", pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr, pdn->eeh_pe_config_addr, pdn->node->full_name); dent += 3; pc = pdn->node->child; while (pc) { print_device_node_tree(PCI_DN(pc), dent); pc = pc->sibling; } } #endif /** * eeh_disable_irq - Disable interrupt for the recovering device * @dev: PCI device * * This routine must be called when reporting temporary or permanent * error to the particular PCI device to disable interrupt of that * device. If the device has enabled MSI or MSI-X interrupt, we needn't * do real work because EEH should freeze DMA transfers for those PCI * devices encountering EEH errors, which includes MSI or MSI-X. */ static void eeh_disable_irq(struct pci_dev *dev) { struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); /* Don't disable MSI and MSI-X interrupts. They are * effectively disabled by the DMA Stopped state * when an EEH error occurs. */ if (dev->msi_enabled || dev->msix_enabled) return; if (!irq_has_action(dev->irq)) return; edev->mode |= EEH_MODE_IRQ_DISABLED; disable_irq_nosync(dev->irq); } /** * eeh_enable_irq - Enable interrupt for the recovering device * @dev: PCI device * * This routine must be called to enable interrupt while failed * device could be resumed. */ static void eeh_enable_irq(struct pci_dev *dev) { struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); if ((edev->mode) & EEH_MODE_IRQ_DISABLED) { edev->mode &= ~EEH_MODE_IRQ_DISABLED; enable_irq(dev->irq); } } /** * eeh_report_error - Report pci error to each device driver * @dev: PCI device * @userdata: return value * * Report an EEH error to each device driver, collect up and * merge the device driver responses. Cumulative response * passed back in "userdata". */ static int eeh_report_error(struct pci_dev *dev, void *userdata) { enum pci_ers_result rc, *res = userdata; struct pci_driver *driver = dev->driver; dev->error_state = pci_channel_io_frozen; if (!driver) return 0; eeh_disable_irq(dev); if (!driver->err_handler || !driver->err_handler->error_detected) return 0; rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); /* A driver that needs a reset trumps all others */ if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; if (*res == PCI_ERS_RESULT_NONE) *res = rc; return 0; } /** * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled * @dev: PCI device * @userdata: return value * * Tells each device driver that IO ports, MMIO and config space I/O * are now enabled. Collects up and merges the device driver responses. * Cumulative response passed back in "userdata". */ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) { enum pci_ers_result rc, *res = userdata; struct pci_driver *driver = dev->driver; if (!driver || !driver->err_handler || !driver->err_handler->mmio_enabled) return 0; rc = driver->err_handler->mmio_enabled(dev); /* A driver that needs a reset trumps all others */ if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; if (*res == PCI_ERS_RESULT_NONE) *res = rc; return 0; } /** * eeh_report_reset - Tell device that slot has been reset * @dev: PCI device * @userdata: return value * * This routine must be called while EEH tries to reset particular * PCI device so that the associated PCI device driver could take * some actions, usually to save data the driver needs so that the * driver can work again while the device is recovered. */ static int eeh_report_reset(struct pci_dev *dev, void *userdata) { enum pci_ers_result rc, *res = userdata; struct pci_driver *driver = dev->driver; if (!driver) return 0; dev->error_state = pci_channel_io_normal; eeh_enable_irq(dev); if (!driver->err_handler || !driver->err_handler->slot_reset) return 0; rc = driver->err_handler->slot_reset(dev); if ((*res == PCI_ERS_RESULT_NONE) || (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc; if (*res == PCI_ERS_RESULT_DISCONNECT && rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; return 0; } /** * eeh_report_resume - Tell device to resume normal operations * @dev: PCI device * @userdata: return value * * This routine must be called to notify the device driver that it * could resume so that the device driver can do some initialization * to make the recovered device work again. */ static int eeh_report_resume(struct pci_dev *dev, void *userdata) { struct pci_driver *driver = dev->driver; dev->error_state = pci_channel_io_normal; if (!driver) return 0; eeh_enable_irq(dev); if (!driver->err_handler || !driver->err_handler->resume) return 0; driver->err_handler->resume(dev); return 0; } /** * eeh_report_failure - Tell device driver that device is dead. * @dev: PCI device * @userdata: return value * * This informs the device driver that the device is permanently * dead, and that no further recovery attempts will be made on it. */ static int eeh_report_failure(struct pci_dev *dev, void *userdata) { struct pci_driver *driver = dev->driver; dev->error_state = pci_channel_io_perm_failure; if (!driver) return 0; eeh_disable_irq(dev); if (!driver->err_handler || !driver->err_handler->error_detected) return 0; driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); return 0; } /** * eeh_reset_device - Perform actual reset of a pci slot * @edev: PE associated EEH device * @bus: PCI bus corresponding to the isolcated slot * * This routine must be called to do reset on the indicated PE. * During the reset, udev might be invoked because those affected * PCI devices will be removed and then added. */ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus) { struct device_node *dn; int cnt, rc; /* pcibios will clear the counter; save the value */ cnt = edev->freeze_count; if (bus) pcibios_remove_pci_devices(bus); /* Reset the pci controller. (Asserts RST#; resets config space). * Reconfigure bridges and devices. Don't try to bring the system * up if the reset failed for some reason. */ rc = eeh_reset_pe(edev); if (rc) return rc; /* Walk over all functions on this device. */ dn = eeh_dev_to_of_node(edev); if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) dn = dn->parent->child; while (dn) { struct eeh_dev *pedev = of_node_to_eeh_dev(dn); /* On Power4, always true because eeh_pe_config_addr=0 */ if (edev->pe_config_addr == pedev->pe_config_addr) { eeh_ops->configure_bridge(dn); eeh_restore_bars(pedev); } dn = dn->sibling; } /* Give the system 5 seconds to finish running the user-space * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, * this is a hack, but if we don't do this, and try to bring * the device up before the scripts have taken it down, * potentially weird things happen. */ if (bus) { ssleep(5); pcibios_add_pci_devices(bus); } edev->freeze_count = cnt; return 0; } /* The longest amount of time to wait for a pci device * to come back on line, in seconds. */ #define MAX_WAIT_FOR_RECOVERY 150 /** * eeh_handle_event - Reset a PCI device after hard lockup. * @event: EEH event * * While PHB detects address or data parity errors on particular PCI * slot, the associated PE will be frozen. Besides, DMA's occurring * to wild addresses (which usually happen due to bugs in device * drivers or in PCI adapter firmware) can cause EEH error. #SERR, * #PERR or other misc PCI-related errors also can trigger EEH errors. * * Recovery process consists of unplugging the device driver (which * generated hotplug events to userspace), then issuing a PCI #RST to * the device, then reconfiguring the PCI config space for all bridges * & devices under this slot, and then finally restarting the device * drivers (which cause a second set of hotplug events to go out to * userspace). */ struct eeh_dev *handle_eeh_events(struct eeh_event *event) { struct device_node *frozen_dn; struct eeh_dev *frozen_edev; struct pci_bus *frozen_bus; int rc = 0; enum pci_ers_result result = PCI_ERS_RESULT_NONE; const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str; frozen_dn = eeh_find_device_pe(eeh_dev_to_of_node(event->edev)); if (!frozen_dn) { location = of_get_property(eeh_dev_to_of_node(event->edev), "ibm,loc-code", NULL); location = location ? location : "unknown"; printk(KERN_ERR "EEH: Error: Cannot find partition endpoint " "for location=%s pci addr=%s\n", location, eeh_pci_name(eeh_dev_to_pci_dev(event->edev))); return NULL; } frozen_bus = pcibios_find_pci_bus(frozen_dn); location = of_get_property(frozen_dn, "ibm,loc-code", NULL); location = location ? location : "unknown"; /* There are two different styles for coming up with the PE. * In the old style, it was the highest EEH-capable device * which was always an EADS pci bridge. In the new style, * there might not be any EADS bridges, and even when there are, * the firmware marks them as "EEH incapable". So another * two-step is needed to find the pci bus.. */ if (!frozen_bus) frozen_bus = pcibios_find_pci_bus(frozen_dn->parent); if (!frozen_bus) { printk(KERN_ERR "EEH: Cannot find PCI bus " "for location=%s dn=%s\n", location, frozen_dn->full_name); return NULL; } frozen_edev = of_node_to_eeh_dev(frozen_dn); frozen_edev->freeze_count++; pci_str = eeh_pci_name(eeh_dev_to_pci_dev(event->edev)); drv_str = eeh_pcid_name(eeh_dev_to_pci_dev(event->edev)); if (frozen_edev->freeze_count > EEH_MAX_ALLOWED_FREEZES) goto excess_failures; printk(KERN_WARNING "EEH: This PCI device has failed %d times in the last hour:\n", frozen_edev->freeze_count); if (frozen_edev->pdev) { bus_pci_str = pci_name(frozen_edev->pdev); bus_drv_str = eeh_pcid_name(frozen_edev->pdev); printk(KERN_WARNING "EEH: Bus location=%s driver=%s pci addr=%s\n", location, bus_drv_str, bus_pci_str); } printk(KERN_WARNING "EEH: Device location=%s driver=%s pci addr=%s\n", location, drv_str, pci_str); /* Walk the various device drivers attached to this slot through * a reset sequence, giving each an opportunity to do what it needs * to accomplish the reset. Each child gets a report of the * status ... if any child can't handle the reset, then the entire * slot is dlpar removed and added. */ pci_walk_bus(frozen_bus, eeh_report_error, &result); /* Get the current PCI slot state. This can take a long time, * sometimes over 3 seconds for certain systems. */ rc = eeh_ops->wait_state(eeh_dev_to_of_node(frozen_edev), MAX_WAIT_FOR_RECOVERY*1000); if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { printk(KERN_WARNING "EEH: Permanent failure\n"); goto hard_fail; } /* Since rtas may enable MMIO when posting the error log, * don't post the error log until after all dev drivers * have been informed. */ eeh_slot_error_detail(frozen_edev, EEH_LOG_TEMP); /* If all device drivers were EEH-unaware, then shut * down all of the device drivers, and hope they * go down willingly, without panicing the system. */ if (result == PCI_ERS_RESULT_NONE) { rc = eeh_reset_device(frozen_edev, frozen_bus); if (rc) { printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc); goto hard_fail; } } /* If all devices reported they can proceed, then re-enable MMIO */ if (result == PCI_ERS_RESULT_CAN_RECOVER) { rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_MMIO); if (rc < 0) goto hard_fail; if (rc) { result = PCI_ERS_RESULT_NEED_RESET; } else { result = PCI_ERS_RESULT_NONE; pci_walk_bus(frozen_bus, eeh_report_mmio_enabled, &result); } } /* If all devices reported they can proceed, then re-enable DMA */ if (result == PCI_ERS_RESULT_CAN_RECOVER) { rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_DMA); if (rc < 0) goto hard_fail; if (rc) result = PCI_ERS_RESULT_NEED_RESET; else result = PCI_ERS_RESULT_RECOVERED; } /* If any device has a hard failure, then shut off everything. */ if (result == PCI_ERS_RESULT_DISCONNECT) { printk(KERN_WARNING "EEH: Device driver gave up\n"); goto hard_fail; } /* If any device called out for a reset, then reset the slot */ if (result == PCI_ERS_RESULT_NEED_RESET) { rc = eeh_reset_device(frozen_edev, NULL); if (rc) { printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc); goto hard_fail; } result = PCI_ERS_RESULT_NONE; pci_walk_bus(frozen_bus, eeh_report_reset, &result); } /* All devices should claim they have recovered by now. */ if ((result != PCI_ERS_RESULT_RECOVERED) && (result != PCI_ERS_RESULT_NONE)) { printk(KERN_WARNING "EEH: Not recovered\n"); goto hard_fail; } /* Tell all device drivers that they can resume operations */ pci_walk_bus(frozen_bus, eeh_report_resume, NULL); return frozen_edev; excess_failures: /* * About 90% of all real-life EEH failures in the field * are due to poorly seated PCI cards. Only 10% or so are * due to actual, failed cards. */ printk(KERN_ERR "EEH: PCI device at location=%s driver=%s pci addr=%s\n" "has failed %d times in the last hour " "and has been permanently disabled.\n" "Please try reseating this device or replacing it.\n", location, drv_str, pci_str, frozen_edev->freeze_count); goto perm_error; hard_fail: printk(KERN_ERR "EEH: Unable to recover from failure of PCI device " "at location=%s driver=%s pci addr=%s\n" "Please try reseating this device or replacing it.\n", location, drv_str, pci_str); perm_error: eeh_slot_error_detail(frozen_edev, EEH_LOG_PERM); /* Notify all devices that they're about to go down. */ pci_walk_bus(frozen_bus, eeh_report_failure, NULL); /* Shut down the device drivers for good. */ pcibios_remove_pci_devices(frozen_bus); return NULL; }
gpl-2.0
lipro-compulab/cl-som-imx6ul-kernel
drivers/pcmcia/pxa2xx_viper.c
3297
4205
/* * Viper/Zeus PCMCIA support * Copyright 2004 Arcom Control Systems * * Maintained by Marc Zyngier <maz@misterjones.org> * * Based on: * iPAQ h2200 PCMCIA support * Copyright 2004 Koen Kooi <koen@vestingbar.nl> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <pcmcia/ss.h> #include <asm/irq.h> #include <linux/platform_data/pcmcia-pxa2xx_viper.h> #include "soc_common.h" #include "pxa2xx_base.h" static struct platform_device *arcom_pcmcia_dev; static inline struct arcom_pcmcia_pdata *viper_get_pdata(void) { return arcom_pcmcia_dev->dev.platform_data; } static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { struct arcom_pcmcia_pdata *pdata = viper_get_pdata(); unsigned long flags; skt->stat[SOC_STAT_CD].gpio = pdata->cd_gpio; skt->stat[SOC_STAT_CD].name = "PCMCIA_CD"; skt->stat[SOC_STAT_RDY].gpio = pdata->rdy_gpio; skt->stat[SOC_STAT_RDY].name = "CF ready"; if (gpio_request(pdata->pwr_gpio, "CF power")) goto err_request_pwr; local_irq_save(flags); if (gpio_direction_output(pdata->pwr_gpio, 0)) { local_irq_restore(flags); goto err_dir; } local_irq_restore(flags); return 0; err_dir: gpio_free(pdata->pwr_gpio); err_request_pwr: dev_err(&arcom_pcmcia_dev->dev, "Failed to setup PCMCIA GPIOs\n"); return -1; } /* * Release all resources. */ static void viper_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { struct arcom_pcmcia_pdata *pdata = viper_get_pdata(); gpio_free(pdata->pwr_gpio); } static void viper_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { state->vs_3v = 1; /* Can only apply 3.3V */ state->vs_Xv = 0; } static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { struct arcom_pcmcia_pdata *pdata = viper_get_pdata(); /* Silently ignore Vpp, output enable, speaker enable. */ pdata->reset(state->flags & SS_RESET); /* Apply socket voltage */ switch (state->Vcc) { case 0: gpio_set_value(pdata->pwr_gpio, 0); break; case 33: gpio_set_value(pdata->pwr_gpio, 1); break; default: dev_err(&arcom_pcmcia_dev->dev, "Unsupported Vcc:%d\n", state->Vcc); return -1; } return 0; } static struct pcmcia_low_level viper_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = viper_pcmcia_hw_init, .hw_shutdown = viper_pcmcia_hw_shutdown, .socket_state = viper_pcmcia_socket_state, .configure_socket = viper_pcmcia_configure_socket, .nr = 1, }; static struct platform_device *viper_pcmcia_device; static int viper_pcmcia_probe(struct platform_device *pdev) { int ret; /* I can't imagine more than one device, but you never know... */ if (arcom_pcmcia_dev) return -EEXIST; if (!pdev->dev.platform_data) return -EINVAL; viper_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!viper_pcmcia_device) return -ENOMEM; arcom_pcmcia_dev = pdev; viper_pcmcia_device->dev.parent = &pdev->dev; ret = platform_device_add_data(viper_pcmcia_device, &viper_pcmcia_ops, sizeof(viper_pcmcia_ops)); if (!ret) ret = platform_device_add(viper_pcmcia_device); if (ret) { platform_device_put(viper_pcmcia_device); arcom_pcmcia_dev = NULL; } return ret; } static int viper_pcmcia_remove(struct platform_device *pdev) { platform_device_unregister(viper_pcmcia_device); arcom_pcmcia_dev = NULL; return 0; } static struct platform_device_id viper_pcmcia_id_table[] = { { .name = "viper-pcmcia", }, { .name = "zeus-pcmcia", }, { }, }; static struct platform_driver viper_pcmcia_driver = { .probe = viper_pcmcia_probe, .remove = viper_pcmcia_remove, .driver = { .name = "arcom-pcmcia", .owner = THIS_MODULE, }, .id_table = viper_pcmcia_id_table, }; module_platform_driver(viper_pcmcia_driver); MODULE_DEVICE_TABLE(platform, viper_pcmcia_id_table); MODULE_LICENSE("GPL");
gpl-2.0
bsmitty83/kernel_omap
lib/cpu_rmap.c
4321
6975
/* * cpu_rmap.c: CPU affinity reverse-map support * Copyright 2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/cpu_rmap.h> #ifdef CONFIG_GENERIC_HARDIRQS #include <linux/interrupt.h> #endif #include <linux/export.h> /* * These functions maintain a mapping from CPUs to some ordered set of * objects with CPU affinities. This can be seen as a reverse-map of * CPU affinity. However, we do not assume that the object affinities * cover all CPUs in the system. For those CPUs not directly covered * by object affinities, we attempt to find a nearest object based on * CPU topology. */ /** * alloc_cpu_rmap - allocate CPU affinity reverse-map * @size: Number of objects to be mapped * @flags: Allocation flags e.g. %GFP_KERNEL */ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) { struct cpu_rmap *rmap; unsigned int cpu; size_t obj_offset; /* This is a silly number of objects, and we use u16 indices. */ if (size > 0xffff) return NULL; /* Offset of object pointer array from base structure */ obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]), sizeof(void *)); rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags); if (!rmap) return NULL; rmap->obj = (void **)((char *)rmap + obj_offset); /* Initially assign CPUs to objects on a rota, since we have * no idea where the objects are. Use infinite distance, so * any object with known distance is preferable. Include the * CPUs that are not present/online, since we definitely want * any newly-hotplugged CPUs to have some object assigned. */ for_each_possible_cpu(cpu) { rmap->near[cpu].index = cpu % size; rmap->near[cpu].dist = CPU_RMAP_DIST_INF; } rmap->size = size; return rmap; } EXPORT_SYMBOL(alloc_cpu_rmap); /* Reevaluate nearest object for given CPU, comparing with the given * neighbours at the given distance. */ static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, const struct cpumask *mask, u16 dist) { int neigh; for_each_cpu(neigh, mask) { if (rmap->near[cpu].dist > dist && rmap->near[neigh].dist <= dist) { rmap->near[cpu].index = rmap->near[neigh].index; rmap->near[cpu].dist = dist; return true; } } return false; } #ifdef DEBUG static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) { unsigned index; unsigned int cpu; pr_info("cpu_rmap %p, %s:\n", rmap, prefix); for_each_possible_cpu(cpu) { index = rmap->near[cpu].index; pr_info("cpu %d -> obj %u (distance %u)\n", cpu, index, rmap->near[cpu].dist); } } #else static inline void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) { } #endif /** * cpu_rmap_add - add object to a rmap * @rmap: CPU rmap allocated with alloc_cpu_rmap() * @obj: Object to add to rmap * * Return index of object. */ int cpu_rmap_add(struct cpu_rmap *rmap, void *obj) { u16 index; BUG_ON(rmap->used >= rmap->size); index = rmap->used++; rmap->obj[index] = obj; return index; } EXPORT_SYMBOL(cpu_rmap_add); /** * cpu_rmap_update - update CPU rmap following a change of object affinity * @rmap: CPU rmap to update * @index: Index of object whose affinity changed * @affinity: New CPU affinity of object */ int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, const struct cpumask *affinity) { cpumask_var_t update_mask; unsigned int cpu; if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL))) return -ENOMEM; /* Invalidate distance for all CPUs for which this used to be * the nearest object. Mark those CPUs for update. */ for_each_online_cpu(cpu) { if (rmap->near[cpu].index == index) { rmap->near[cpu].dist = CPU_RMAP_DIST_INF; cpumask_set_cpu(cpu, update_mask); } } debug_print_rmap(rmap, "after invalidating old distances"); /* Set distance to 0 for all CPUs in the new affinity mask. * Mark all CPUs within their NUMA nodes for update. */ for_each_cpu(cpu, affinity) { rmap->near[cpu].index = index; rmap->near[cpu].dist = 0; cpumask_or(update_mask, update_mask, cpumask_of_node(cpu_to_node(cpu))); } debug_print_rmap(rmap, "after updating neighbours"); /* Update distances based on topology */ for_each_cpu(cpu, update_mask) { if (cpu_rmap_copy_neigh(rmap, cpu, topology_thread_cpumask(cpu), 1)) continue; if (cpu_rmap_copy_neigh(rmap, cpu, topology_core_cpumask(cpu), 2)) continue; if (cpu_rmap_copy_neigh(rmap, cpu, cpumask_of_node(cpu_to_node(cpu)), 3)) continue; /* We could continue into NUMA node distances, but for now * we give up. */ } debug_print_rmap(rmap, "after copying neighbours"); free_cpumask_var(update_mask); return 0; } EXPORT_SYMBOL(cpu_rmap_update); #ifdef CONFIG_GENERIC_HARDIRQS /* Glue between IRQ affinity notifiers and CPU rmaps */ struct irq_glue { struct irq_affinity_notify notify; struct cpu_rmap *rmap; u16 index; }; /** * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL * * Must be called in process context, before freeing the IRQs, and * without holding any locks required by global workqueue items. */ void free_irq_cpu_rmap(struct cpu_rmap *rmap) { struct irq_glue *glue; u16 index; if (!rmap) return; for (index = 0; index < rmap->used; index++) { glue = rmap->obj[index]; irq_set_affinity_notifier(glue->notify.irq, NULL); } irq_run_affinity_notifiers(); kfree(rmap); } EXPORT_SYMBOL(free_irq_cpu_rmap); static void irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) { struct irq_glue *glue = container_of(notify, struct irq_glue, notify); int rc; rc = cpu_rmap_update(glue->rmap, glue->index, mask); if (rc) pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); } static void irq_cpu_rmap_release(struct kref *ref) { struct irq_glue *glue = container_of(ref, struct irq_glue, notify.kref); kfree(glue); } /** * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map * @rmap: The reverse-map * @irq: The IRQ number * * This adds an IRQ affinity notifier that will update the reverse-map * automatically. * * Must be called in process context, after the IRQ is allocated but * before it is bound with request_irq(). */ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) { struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL); int rc; if (!glue) return -ENOMEM; glue->notify.notify = irq_cpu_rmap_notify; glue->notify.release = irq_cpu_rmap_release; glue->rmap = rmap; glue->index = cpu_rmap_add(rmap, glue); rc = irq_set_affinity_notifier(irq, &glue->notify); if (rc) kfree(glue); return rc; } EXPORT_SYMBOL(irq_cpu_rmap_add); #endif /* CONFIG_GENERIC_HARDIRQS */
gpl-2.0
abazad/SizotrixKernel
fs/jfs/namei.c
4833
38032
/* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/namei.h> #include <linux/ctype.h> #include <linux/quotaops.h> #include <linux/exportfs.h> #include "jfs_incore.h" #include "jfs_superblock.h" #include "jfs_inode.h" #include "jfs_dinode.h" #include "jfs_dmap.h" #include "jfs_unicode.h" #include "jfs_metapage.h" #include "jfs_xattr.h" #include "jfs_acl.h" #include "jfs_debug.h" /* * forward references */ const struct dentry_operations jfs_ci_dentry_operations; static s64 commitZeroLink(tid_t, struct inode *); /* * NAME: free_ea_wmap(inode) * * FUNCTION: free uncommitted extended attributes from working map * */ static inline void free_ea_wmap(struct inode *inode) { dxd_t *ea = &JFS_IP(inode)->ea; if (ea->flag & DXD_EXTENT) { /* free EA pages from cache */ invalidate_dxd_metapages(inode, *ea); dbFree(inode, addressDXD(ea), lengthDXD(ea)); } ea->flag = 0; } /* * NAME: jfs_create(dip, dentry, mode) * * FUNCTION: create a regular file in the parent directory <dip> * with name = <from dentry> and mode = <mode> * * PARAMETER: dip - parent directory vnode * dentry - dentry of new file * mode - create mode (rwxrwxrwx). * nd- nd struct * * RETURN: Errors from subroutines * */ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode, struct nameidata *nd) { int rc = 0; tid_t tid; /* transaction id */ struct inode *ip = NULL; /* child directory inode */ ino_t ino; struct component_name dname; /* child directory name */ struct btstack btstack; struct inode *iplist[2]; struct tblock *tblk; jfs_info("jfs_create: dip:0x%p name:%s", dip, dentry->d_name.name); dquot_initialize(dip); /* * search parent directory for entry/freespace * (dtSearch() returns parent directory page pinned) */ if ((rc = get_UCSname(&dname, dentry))) goto out1; /* * Either iAlloc() or txBegin() may block. Deadlock can occur if we * block there while holding dtree page, so we allocate the inode & * begin the transaction before we search the directory. */ ip = ialloc(dip, mode); if (IS_ERR(ip)) { rc = PTR_ERR(ip); goto out2; } tid = txBegin(dip->i_sb, 0); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); rc = jfs_init_acl(tid, ip, dip); if (rc) goto out3; rc = jfs_init_security(tid, ip, dip, &dentry->d_name); if (rc) { txAbort(tid, 0); goto out3; } if ((rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE))) { jfs_err("jfs_create: dtSearch returned %d", rc); txAbort(tid, 0); goto out3; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_CREATE; tblk->ino = ip->i_ino; tblk->u.ixpxd = JFS_IP(ip)->ixpxd; iplist[0] = dip; iplist[1] = ip; /* * initialize the child XAD tree root in-line in inode */ xtInitRoot(tid, ip); /* * create entry in parent directory for child directory * (dtInsert() releases parent directory page) */ ino = ip->i_ino; if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) { if (rc == -EIO) { jfs_err("jfs_create: dtInsert returned -EIO"); txAbort(tid, 1); /* Marks Filesystem dirty */ } else txAbort(tid, 0); /* Filesystem full */ goto out3; } ip->i_op = &jfs_file_inode_operations; ip->i_fop = &jfs_file_operations; ip->i_mapping->a_ops = &jfs_aops; mark_inode_dirty(ip); dip->i_ctime = dip->i_mtime = CURRENT_TIME; mark_inode_dirty(dip); rc = txCommit(tid, 2, &iplist[0], 0); out3: txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); if (rc) { free_ea_wmap(ip); clear_nlink(ip); unlock_new_inode(ip); iput(ip); } else { d_instantiate(dentry, ip); unlock_new_inode(ip); } out2: free_UCSname(&dname); out1: jfs_info("jfs_create: rc:%d", rc); return rc; } /* * NAME: jfs_mkdir(dip, dentry, mode) * * FUNCTION: create a child directory in the parent directory <dip> * with name = <from dentry> and mode = <mode> * * PARAMETER: dip - parent directory vnode * dentry - dentry of child directory * mode - create mode (rwxrwxrwx). * * RETURN: Errors from subroutines * * note: * EACCESS: user needs search+write permission on the parent directory */ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) { int rc = 0; tid_t tid; /* transaction id */ struct inode *ip = NULL; /* child directory inode */ ino_t ino; struct component_name dname; /* child directory name */ struct btstack btstack; struct inode *iplist[2]; struct tblock *tblk; jfs_info("jfs_mkdir: dip:0x%p name:%s", dip, dentry->d_name.name); dquot_initialize(dip); /* * search parent directory for entry/freespace * (dtSearch() returns parent directory page pinned) */ if ((rc = get_UCSname(&dname, dentry))) goto out1; /* * Either iAlloc() or txBegin() may block. Deadlock can occur if we * block there while holding dtree page, so we allocate the inode & * begin the transaction before we search the directory. */ ip = ialloc(dip, S_IFDIR | mode); if (IS_ERR(ip)) { rc = PTR_ERR(ip); goto out2; } tid = txBegin(dip->i_sb, 0); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); rc = jfs_init_acl(tid, ip, dip); if (rc) goto out3; rc = jfs_init_security(tid, ip, dip, &dentry->d_name); if (rc) { txAbort(tid, 0); goto out3; } if ((rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE))) { jfs_err("jfs_mkdir: dtSearch returned %d", rc); txAbort(tid, 0); goto out3; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_CREATE; tblk->ino = ip->i_ino; tblk->u.ixpxd = JFS_IP(ip)->ixpxd; iplist[0] = dip; iplist[1] = ip; /* * initialize the child directory in-line in inode */ dtInitRoot(tid, ip, dip->i_ino); /* * create entry in parent directory for child directory * (dtInsert() releases parent directory page) */ ino = ip->i_ino; if ((rc = dtInsert(tid, dip, &dname, &ino, &btstack))) { if (rc == -EIO) { jfs_err("jfs_mkdir: dtInsert returned -EIO"); txAbort(tid, 1); /* Marks Filesystem dirty */ } else txAbort(tid, 0); /* Filesystem full */ goto out3; } set_nlink(ip, 2); /* for '.' */ ip->i_op = &jfs_dir_inode_operations; ip->i_fop = &jfs_dir_operations; mark_inode_dirty(ip); /* update parent directory inode */ inc_nlink(dip); /* for '..' from child directory */ dip->i_ctime = dip->i_mtime = CURRENT_TIME; mark_inode_dirty(dip); rc = txCommit(tid, 2, &iplist[0], 0); out3: txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); if (rc) { free_ea_wmap(ip); clear_nlink(ip); unlock_new_inode(ip); iput(ip); } else { d_instantiate(dentry, ip); unlock_new_inode(ip); } out2: free_UCSname(&dname); out1: jfs_info("jfs_mkdir: rc:%d", rc); return rc; } /* * NAME: jfs_rmdir(dip, dentry) * * FUNCTION: remove a link to child directory * * PARAMETER: dip - parent inode * dentry - child directory dentry * * RETURN: -EINVAL - if name is . or .. * -EINVAL - if . or .. exist but are invalid. * errors from subroutines * * note: * if other threads have the directory open when the last link * is removed, the "." and ".." entries, if present, are removed before * rmdir() returns and no new entries may be created in the directory, * but the directory is not removed until the last reference to * the directory is released (cf.unlink() of regular file). */ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) { int rc; tid_t tid; /* transaction id */ struct inode *ip = dentry->d_inode; ino_t ino; struct component_name dname; struct inode *iplist[2]; struct tblock *tblk; jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); /* Init inode for quota operations. */ dquot_initialize(dip); dquot_initialize(ip); /* directory must be empty to be removed */ if (!dtEmpty(ip)) { rc = -ENOTEMPTY; goto out; } if ((rc = get_UCSname(&dname, dentry))) { goto out; } tid = txBegin(dip->i_sb, 0); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); iplist[0] = dip; iplist[1] = ip; tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_DELETE; tblk->u.ip = ip; /* * delete the entry of target directory from parent directory */ ino = ip->i_ino; if ((rc = dtDelete(tid, dip, &dname, &ino, JFS_REMOVE))) { jfs_err("jfs_rmdir: dtDelete returned %d", rc); if (rc == -EIO) txAbort(tid, 1); txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); goto out2; } /* update parent directory's link count corresponding * to ".." entry of the target directory deleted */ dip->i_ctime = dip->i_mtime = CURRENT_TIME; inode_dec_link_count(dip); /* * OS/2 could have created EA and/or ACL */ /* free EA from both persistent and working map */ if (JFS_IP(ip)->ea.flag & DXD_EXTENT) { /* free EA pages */ txEA(tid, ip, &JFS_IP(ip)->ea, NULL); } JFS_IP(ip)->ea.flag = 0; /* free ACL from both persistent and working map */ if (JFS_IP(ip)->acl.flag & DXD_EXTENT) { /* free ACL pages */ txEA(tid, ip, &JFS_IP(ip)->acl, NULL); } JFS_IP(ip)->acl.flag = 0; /* mark the target directory as deleted */ clear_nlink(ip); mark_inode_dirty(ip); rc = txCommit(tid, 2, &iplist[0], 0); txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); /* * Truncating the directory index table is not guaranteed. It * may need to be done iteratively */ if (test_cflag(COMMIT_Stale, dip)) { if (dip->i_size > 1) jfs_truncate_nolock(dip, 0); clear_cflag(COMMIT_Stale, dip); } out2: free_UCSname(&dname); out: jfs_info("jfs_rmdir: rc:%d", rc); return rc; } /* * NAME: jfs_unlink(dip, dentry) * * FUNCTION: remove a link to object <vp> named by <name> * from parent directory <dvp> * * PARAMETER: dip - inode of parent directory * dentry - dentry of object to be removed * * RETURN: errors from subroutines * * note: * temporary file: if one or more processes have the file open * when the last link is removed, the link will be removed before * unlink() returns, but the removal of the file contents will be * postponed until all references to the files are closed. * * JFS does NOT support unlink() on directories. * */ static int jfs_unlink(struct inode *dip, struct dentry *dentry) { int rc; tid_t tid; /* transaction id */ struct inode *ip = dentry->d_inode; ino_t ino; struct component_name dname; /* object name */ struct inode *iplist[2]; struct tblock *tblk; s64 new_size = 0; int commit_flag; jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name); /* Init inode for quota operations. */ dquot_initialize(dip); dquot_initialize(ip); if ((rc = get_UCSname(&dname, dentry))) goto out; IWRITE_LOCK(ip, RDWRLOCK_NORMAL); tid = txBegin(dip->i_sb, 0); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); iplist[0] = dip; iplist[1] = ip; /* * delete the entry of target file from parent directory */ ino = ip->i_ino; if ((rc = dtDelete(tid, dip, &dname, &ino, JFS_REMOVE))) { jfs_err("jfs_unlink: dtDelete returned %d", rc); if (rc == -EIO) txAbort(tid, 1); /* Marks FS Dirty */ txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); IWRITE_UNLOCK(ip); goto out1; } ASSERT(ip->i_nlink); ip->i_ctime = dip->i_ctime = dip->i_mtime = CURRENT_TIME; mark_inode_dirty(dip); /* update target's inode */ inode_dec_link_count(ip); /* * commit zero link count object */ if (ip->i_nlink == 0) { assert(!test_cflag(COMMIT_Nolink, ip)); /* free block resources */ if ((new_size = commitZeroLink(tid, ip)) < 0) { txAbort(tid, 1); /* Marks FS Dirty */ txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); IWRITE_UNLOCK(ip); rc = new_size; goto out1; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_DELETE; tblk->u.ip = ip; } /* * Incomplete truncate of file data can * result in timing problems unless we synchronously commit the * transaction. */ if (new_size) commit_flag = COMMIT_SYNC; else commit_flag = 0; /* * If xtTruncate was incomplete, commit synchronously to avoid * timing complications */ rc = txCommit(tid, 2, &iplist[0], commit_flag); txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); while (new_size && (rc == 0)) { tid = txBegin(dip->i_sb, 0); mutex_lock(&JFS_IP(ip)->commit_mutex); new_size = xtTruncate_pmap(tid, ip, new_size); if (new_size < 0) { txAbort(tid, 1); /* Marks FS Dirty */ rc = new_size; } else rc = txCommit(tid, 2, &iplist[0], COMMIT_SYNC); txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); } if (ip->i_nlink == 0) set_cflag(COMMIT_Nolink, ip); IWRITE_UNLOCK(ip); /* * Truncating the directory index table is not guaranteed. It * may need to be done iteratively */ if (test_cflag(COMMIT_Stale, dip)) { if (dip->i_size > 1) jfs_truncate_nolock(dip, 0); clear_cflag(COMMIT_Stale, dip); } out1: free_UCSname(&dname); out: jfs_info("jfs_unlink: rc:%d", rc); return rc; } /* * NAME: commitZeroLink() * * FUNCTION: for non-directory, called by jfs_remove(), * truncate a regular file, directory or symbolic * link to zero length. return 0 if type is not * one of these. * * if the file is currently associated with a VM segment * only permanent disk and inode map resources are freed, * and neither the inode nor indirect blocks are modified * so that the resources can be later freed in the work * map by ctrunc1. * if there is no VM segment on entry, the resources are * freed in both work and permanent map. * (? for temporary file - memory object is cached even * after no reference: * reference count > 0 - ) * * PARAMETERS: cd - pointer to commit data structure. * current inode is the one to truncate. * * RETURN: Errors from subroutines */ static s64 commitZeroLink(tid_t tid, struct inode *ip) { int filetype; struct tblock *tblk; jfs_info("commitZeroLink: tid = %d, ip = 0x%p", tid, ip); filetype = ip->i_mode & S_IFMT; switch (filetype) { case S_IFREG: break; case S_IFLNK: /* fast symbolic link */ if (ip->i_size < IDATASIZE) { ip->i_size = 0; return 0; } break; default: assert(filetype != S_IFDIR); return 0; } set_cflag(COMMIT_Freewmap, ip); /* mark transaction of block map update type */ tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_PMAP; /* * free EA */ if (JFS_IP(ip)->ea.flag & DXD_EXTENT) /* acquire maplock on EA to be freed from block map */ txEA(tid, ip, &JFS_IP(ip)->ea, NULL); /* * free ACL */ if (JFS_IP(ip)->acl.flag & DXD_EXTENT) /* acquire maplock on EA to be freed from block map */ txEA(tid, ip, &JFS_IP(ip)->acl, NULL); /* * free xtree/data (truncate to zero length): * free xtree/data pages from cache if COMMIT_PWMAP, * free xtree/data blocks from persistent block map, and * free xtree/data blocks from working block map if COMMIT_PWMAP; */ if (ip->i_size) return xtTruncate_pmap(tid, ip, 0); return 0; } /* * NAME: jfs_free_zero_link() * * FUNCTION: for non-directory, called by iClose(), * free resources of a file from cache and WORKING map * for a file previously committed with zero link count * while associated with a pager object, * * PARAMETER: ip - pointer to inode of file. */ void jfs_free_zero_link(struct inode *ip) { int type; jfs_info("jfs_free_zero_link: ip = 0x%p", ip); /* return if not reg or symbolic link or if size is * already ok. */ type = ip->i_mode & S_IFMT; switch (type) { case S_IFREG: break; case S_IFLNK: /* if its contained in inode nothing to do */ if (ip->i_size < IDATASIZE) return; break; default: return; } /* * free EA */ if (JFS_IP(ip)->ea.flag & DXD_EXTENT) { s64 xaddr = addressDXD(&JFS_IP(ip)->ea); int xlen = lengthDXD(&JFS_IP(ip)->ea); struct maplock maplock; /* maplock for COMMIT_WMAP */ struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */ /* free EA pages from cache */ invalidate_dxd_metapages(ip, JFS_IP(ip)->ea); /* free EA extent from working block map */ maplock.index = 1; pxdlock = (struct pxd_lock *) & maplock; pxdlock->flag = mlckFREEPXD; PXDaddress(&pxdlock->pxd, xaddr); PXDlength(&pxdlock->pxd, xlen); txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP); } /* * free ACL */ if (JFS_IP(ip)->acl.flag & DXD_EXTENT) { s64 xaddr = addressDXD(&JFS_IP(ip)->acl); int xlen = lengthDXD(&JFS_IP(ip)->acl); struct maplock maplock; /* maplock for COMMIT_WMAP */ struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */ invalidate_dxd_metapages(ip, JFS_IP(ip)->acl); /* free ACL extent from working block map */ maplock.index = 1; pxdlock = (struct pxd_lock *) & maplock; pxdlock->flag = mlckFREEPXD; PXDaddress(&pxdlock->pxd, xaddr); PXDlength(&pxdlock->pxd, xlen); txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP); } /* * free xtree/data (truncate to zero length): * free xtree/data pages from cache, and * free xtree/data blocks from working block map; */ if (ip->i_size) xtTruncate(0, ip, 0, COMMIT_WMAP); } /* * NAME: jfs_link(vp, dvp, name, crp) * * FUNCTION: create a link to <vp> by the name = <name> * in the parent directory <dvp> * * PARAMETER: vp - target object * dvp - parent directory of new link * name - name of new link to target object * crp - credential * * RETURN: Errors from subroutines * * note: * JFS does NOT support link() on directories (to prevent circular * path in the directory hierarchy); * EPERM: the target object is a directory, and either the caller * does not have appropriate privileges or the implementation prohibits * using link() on directories [XPG4.2]. * * JFS does NOT support links between file systems: * EXDEV: target object and new link are on different file systems and * implementation does not support links between file systems [XPG4.2]. */ static int jfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { int rc; tid_t tid; struct inode *ip = old_dentry->d_inode; ino_t ino; struct component_name dname; struct btstack btstack; struct inode *iplist[2]; jfs_info("jfs_link: %s %s", old_dentry->d_name.name, dentry->d_name.name); dquot_initialize(dir); tid = txBegin(ip->i_sb, 0); mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); /* * scan parent directory for entry/freespace */ if ((rc = get_UCSname(&dname, dentry))) goto out; if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE))) goto free_dname; /* * create entry for new link in parent directory */ ino = ip->i_ino; if ((rc = dtInsert(tid, dir, &dname, &ino, &btstack))) goto free_dname; /* update object inode */ inc_nlink(ip); /* for new link */ ip->i_ctime = CURRENT_TIME; dir->i_ctime = dir->i_mtime = CURRENT_TIME; mark_inode_dirty(dir); ihold(ip); iplist[0] = ip; iplist[1] = dir; rc = txCommit(tid, 2, &iplist[0], 0); if (rc) { drop_nlink(ip); /* never instantiated */ iput(ip); } else d_instantiate(dentry, ip); free_dname: free_UCSname(&dname); out: txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dir)->commit_mutex); jfs_info("jfs_link: rc:%d", rc); return rc; } /* * NAME: jfs_symlink(dip, dentry, name) * * FUNCTION: creates a symbolic link to <symlink> by name <name> * in directory <dip> * * PARAMETER: dip - parent directory vnode * dentry - dentry of symbolic link * name - the path name of the existing object * that will be the source of the link * * RETURN: errors from subroutines * * note: * ENAMETOOLONG: pathname resolution of a symbolic link produced * an intermediate result whose length exceeds PATH_MAX [XPG4.2] */ static int jfs_symlink(struct inode *dip, struct dentry *dentry, const char *name) { int rc; tid_t tid; ino_t ino = 0; struct component_name dname; int ssize; /* source pathname size */ struct btstack btstack; struct inode *ip = dentry->d_inode; unchar *i_fastsymlink; s64 xlen = 0; int bmask = 0, xsize; s64 xaddr; struct metapage *mp; struct super_block *sb; struct tblock *tblk; struct inode *iplist[2]; jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name); dquot_initialize(dip); ssize = strlen(name) + 1; /* * search parent directory for entry/freespace * (dtSearch() returns parent directory page pinned) */ if ((rc = get_UCSname(&dname, dentry))) goto out1; /* * allocate on-disk/in-memory inode for symbolic link: * (iAlloc() returns new, locked inode) */ ip = ialloc(dip, S_IFLNK | 0777); if (IS_ERR(ip)) { rc = PTR_ERR(ip); goto out2; } tid = txBegin(dip->i_sb, 0); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); rc = jfs_init_security(tid, ip, dip, &dentry->d_name); if (rc) goto out3; tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_CREATE; tblk->ino = ip->i_ino; tblk->u.ixpxd = JFS_IP(ip)->ixpxd; /* fix symlink access permission * (dir_create() ANDs in the u.u_cmask, * but symlinks really need to be 777 access) */ ip->i_mode |= 0777; /* * write symbolic link target path name */ xtInitRoot(tid, ip); /* * write source path name inline in on-disk inode (fast symbolic link) */ if (ssize <= IDATASIZE) { ip->i_op = &jfs_fast_symlink_inode_operations; i_fastsymlink = JFS_IP(ip)->i_inline; memcpy(i_fastsymlink, name, ssize); ip->i_size = ssize - 1; /* * if symlink is > 128 bytes, we don't have the space to * store inline extended attributes */ if (ssize > sizeof (JFS_IP(ip)->i_inline)) JFS_IP(ip)->mode2 &= ~INLINEEA; jfs_info("jfs_symlink: fast symlink added ssize:%d name:%s ", ssize, name); } /* * write source path name in a single extent */ else { jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); ip->i_op = &jfs_symlink_inode_operations; ip->i_mapping->a_ops = &jfs_aops; /* * even though the data of symlink object (source * path name) is treated as non-journaled user data, * it is read/written thru buffer cache for performance. */ sb = ip->i_sb; bmask = JFS_SBI(sb)->bsize - 1; xsize = (ssize + bmask) & ~bmask; xaddr = 0; xlen = xsize >> JFS_SBI(sb)->l2bsize; if ((rc = xtInsert(tid, ip, 0, 0, xlen, &xaddr, 0))) { txAbort(tid, 0); goto out3; } ip->i_size = ssize - 1; while (ssize) { /* This is kind of silly since PATH_MAX == 4K */ int copy_size = min(ssize, PSIZE); mp = get_metapage(ip, xaddr, PSIZE, 1); if (mp == NULL) { xtTruncate(tid, ip, 0, COMMIT_PWMAP); rc = -EIO; txAbort(tid, 0); goto out3; } memcpy(mp->data, name, copy_size); flush_metapage(mp); ssize -= copy_size; name += copy_size; xaddr += JFS_SBI(sb)->nbperpage; } } /* * create entry for symbolic link in parent directory */ rc = dtSearch(dip, &dname, &ino, &btstack, JFS_CREATE); if (rc == 0) { ino = ip->i_ino; rc = dtInsert(tid, dip, &dname, &ino, &btstack); } if (rc) { if (xlen) xtTruncate(tid, ip, 0, COMMIT_PWMAP); txAbort(tid, 0); /* discard new inode */ goto out3; } mark_inode_dirty(ip); dip->i_ctime = dip->i_mtime = CURRENT_TIME; mark_inode_dirty(dip); /* * commit update of parent directory and link object */ iplist[0] = dip; iplist[1] = ip; rc = txCommit(tid, 2, &iplist[0], 0); out3: txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dip)->commit_mutex); if (rc) { free_ea_wmap(ip); clear_nlink(ip); unlock_new_inode(ip); iput(ip); } else { d_instantiate(dentry, ip); unlock_new_inode(ip); } out2: free_UCSname(&dname); out1: jfs_info("jfs_symlink: rc:%d", rc); return rc; } /* * NAME: jfs_rename * * FUNCTION: rename a file or directory */ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct btstack btstack; ino_t ino; struct component_name new_dname; struct inode *new_ip; struct component_name old_dname; struct inode *old_ip; int rc; tid_t tid; struct tlock *tlck; struct dt_lock *dtlck; struct lv *lv; int ipcount; struct inode *iplist[4]; struct tblock *tblk; s64 new_size = 0; int commit_flag; jfs_info("jfs_rename: %s %s", old_dentry->d_name.name, new_dentry->d_name.name); dquot_initialize(old_dir); dquot_initialize(new_dir); old_ip = old_dentry->d_inode; new_ip = new_dentry->d_inode; if ((rc = get_UCSname(&old_dname, old_dentry))) goto out1; if ((rc = get_UCSname(&new_dname, new_dentry))) goto out2; /* * Make sure source inode number is what we think it is */ rc = dtSearch(old_dir, &old_dname, &ino, &btstack, JFS_LOOKUP); if (rc || (ino != old_ip->i_ino)) { rc = -ENOENT; goto out3; } /* * Make sure dest inode number (if any) is what we think it is */ rc = dtSearch(new_dir, &new_dname, &ino, &btstack, JFS_LOOKUP); if (!rc) { if ((!new_ip) || (ino != new_ip->i_ino)) { rc = -ESTALE; goto out3; } } else if (rc != -ENOENT) goto out3; else if (new_ip) { /* no entry exists, but one was expected */ rc = -ESTALE; goto out3; } if (S_ISDIR(old_ip->i_mode)) { if (new_ip) { if (!dtEmpty(new_ip)) { rc = -ENOTEMPTY; goto out3; } } } else if (new_ip) { IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL); /* Init inode for quota operations. */ dquot_initialize(new_ip); } /* * The real work starts here */ tid = txBegin(new_dir->i_sb, 0); /* * How do we know the locking is safe from deadlocks? * The vfs does the hard part for us. Any time we are taking nested * commit_mutexes, the vfs already has i_mutex held on the parent. * Here, the vfs has already taken i_mutex on both old_dir and new_dir. */ mutex_lock_nested(&JFS_IP(new_dir)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(old_ip)->commit_mutex, COMMIT_MUTEX_CHILD); if (old_dir != new_dir) mutex_lock_nested(&JFS_IP(old_dir)->commit_mutex, COMMIT_MUTEX_SECOND_PARENT); if (new_ip) { mutex_lock_nested(&JFS_IP(new_ip)->commit_mutex, COMMIT_MUTEX_VICTIM); /* * Change existing directory entry to new inode number */ ino = new_ip->i_ino; rc = dtModify(tid, new_dir, &new_dname, &ino, old_ip->i_ino, JFS_RENAME); if (rc) goto out4; drop_nlink(new_ip); if (S_ISDIR(new_ip->i_mode)) { drop_nlink(new_ip); if (new_ip->i_nlink) { mutex_unlock(&JFS_IP(new_ip)->commit_mutex); if (old_dir != new_dir) mutex_unlock(&JFS_IP(old_dir)->commit_mutex); mutex_unlock(&JFS_IP(old_ip)->commit_mutex); mutex_unlock(&JFS_IP(new_dir)->commit_mutex); if (!S_ISDIR(old_ip->i_mode) && new_ip) IWRITE_UNLOCK(new_ip); jfs_error(new_ip->i_sb, "jfs_rename: new_ip->i_nlink != 0"); return -EIO; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_DELETE; tblk->u.ip = new_ip; } else if (new_ip->i_nlink == 0) { assert(!test_cflag(COMMIT_Nolink, new_ip)); /* free block resources */ if ((new_size = commitZeroLink(tid, new_ip)) < 0) { txAbort(tid, 1); /* Marks FS Dirty */ rc = new_size; goto out4; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_DELETE; tblk->u.ip = new_ip; } else { new_ip->i_ctime = CURRENT_TIME; mark_inode_dirty(new_ip); } } else { /* * Add new directory entry */ rc = dtSearch(new_dir, &new_dname, &ino, &btstack, JFS_CREATE); if (rc) { jfs_err("jfs_rename didn't expect dtSearch to fail " "w/rc = %d", rc); goto out4; } ino = old_ip->i_ino; rc = dtInsert(tid, new_dir, &new_dname, &ino, &btstack); if (rc) { if (rc == -EIO) jfs_err("jfs_rename: dtInsert returned -EIO"); goto out4; } if (S_ISDIR(old_ip->i_mode)) inc_nlink(new_dir); } /* * Remove old directory entry */ ino = old_ip->i_ino; rc = dtDelete(tid, old_dir, &old_dname, &ino, JFS_REMOVE); if (rc) { jfs_err("jfs_rename did not expect dtDelete to return rc = %d", rc); txAbort(tid, 1); /* Marks Filesystem dirty */ goto out4; } if (S_ISDIR(old_ip->i_mode)) { drop_nlink(old_dir); if (old_dir != new_dir) { /* * Change inode number of parent for moved directory */ JFS_IP(old_ip)->i_dtroot.header.idotdot = cpu_to_le32(new_dir->i_ino); /* Linelock header of dtree */ tlck = txLock(tid, old_ip, (struct metapage *) &JFS_IP(old_ip)->bxflag, tlckDTREE | tlckBTROOT | tlckRELINK); dtlck = (struct dt_lock *) & tlck->lock; ASSERT(dtlck->index == 0); lv = & dtlck->lv[0]; lv->offset = 0; lv->length = 1; dtlck->index++; } } /* * Update ctime on changed/moved inodes & mark dirty */ old_ip->i_ctime = CURRENT_TIME; mark_inode_dirty(old_ip); new_dir->i_ctime = new_dir->i_mtime = current_fs_time(new_dir->i_sb); mark_inode_dirty(new_dir); /* Build list of inodes modified by this transaction */ ipcount = 0; iplist[ipcount++] = old_ip; if (new_ip) iplist[ipcount++] = new_ip; iplist[ipcount++] = old_dir; if (old_dir != new_dir) { iplist[ipcount++] = new_dir; old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME; mark_inode_dirty(old_dir); } /* * Incomplete truncate of file data can * result in timing problems unless we synchronously commit the * transaction. */ if (new_size) commit_flag = COMMIT_SYNC; else commit_flag = 0; rc = txCommit(tid, ipcount, iplist, commit_flag); out4: txEnd(tid); if (new_ip) mutex_unlock(&JFS_IP(new_ip)->commit_mutex); if (old_dir != new_dir) mutex_unlock(&JFS_IP(old_dir)->commit_mutex); mutex_unlock(&JFS_IP(old_ip)->commit_mutex); mutex_unlock(&JFS_IP(new_dir)->commit_mutex); while (new_size && (rc == 0)) { tid = txBegin(new_ip->i_sb, 0); mutex_lock(&JFS_IP(new_ip)->commit_mutex); new_size = xtTruncate_pmap(tid, new_ip, new_size); if (new_size < 0) { txAbort(tid, 1); rc = new_size; } else rc = txCommit(tid, 1, &new_ip, COMMIT_SYNC); txEnd(tid); mutex_unlock(&JFS_IP(new_ip)->commit_mutex); } if (new_ip && (new_ip->i_nlink == 0)) set_cflag(COMMIT_Nolink, new_ip); out3: free_UCSname(&new_dname); out2: free_UCSname(&old_dname); out1: if (new_ip && !S_ISDIR(new_ip->i_mode)) IWRITE_UNLOCK(new_ip); /* * Truncating the directory index table is not guaranteed. It * may need to be done iteratively */ if (test_cflag(COMMIT_Stale, old_dir)) { if (old_dir->i_size > 1) jfs_truncate_nolock(old_dir, 0); clear_cflag(COMMIT_Stale, old_dir); } jfs_info("jfs_rename: returning %d", rc); return rc; } /* * NAME: jfs_mknod * * FUNCTION: Create a special file (device) */ static int jfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct jfs_inode_info *jfs_ip; struct btstack btstack; struct component_name dname; ino_t ino; struct inode *ip; struct inode *iplist[2]; int rc; tid_t tid; struct tblock *tblk; if (!new_valid_dev(rdev)) return -EINVAL; jfs_info("jfs_mknod: %s", dentry->d_name.name); dquot_initialize(dir); if ((rc = get_UCSname(&dname, dentry))) goto out; ip = ialloc(dir, mode); if (IS_ERR(ip)) { rc = PTR_ERR(ip); goto out1; } jfs_ip = JFS_IP(ip); tid = txBegin(dir->i_sb, 0); mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); rc = jfs_init_acl(tid, ip, dir); if (rc) goto out3; rc = jfs_init_security(tid, ip, dir, &dentry->d_name); if (rc) { txAbort(tid, 0); goto out3; } if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE))) { txAbort(tid, 0); goto out3; } tblk = tid_to_tblock(tid); tblk->xflag |= COMMIT_CREATE; tblk->ino = ip->i_ino; tblk->u.ixpxd = JFS_IP(ip)->ixpxd; ino = ip->i_ino; if ((rc = dtInsert(tid, dir, &dname, &ino, &btstack))) { txAbort(tid, 0); goto out3; } ip->i_op = &jfs_file_inode_operations; jfs_ip->dev = new_encode_dev(rdev); init_special_inode(ip, ip->i_mode, rdev); mark_inode_dirty(ip); dir->i_ctime = dir->i_mtime = CURRENT_TIME; mark_inode_dirty(dir); iplist[0] = dir; iplist[1] = ip; rc = txCommit(tid, 2, iplist, 0); out3: txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); mutex_unlock(&JFS_IP(dir)->commit_mutex); if (rc) { free_ea_wmap(ip); clear_nlink(ip); unlock_new_inode(ip); iput(ip); } else { d_instantiate(dentry, ip); unlock_new_inode(ip); } out1: free_UCSname(&dname); out: jfs_info("jfs_mknod: returning %d", rc); return rc; } static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struct nameidata *nd) { struct btstack btstack; ino_t inum; struct inode *ip; struct component_name key; int rc; jfs_info("jfs_lookup: name = %s", dentry->d_name.name); if ((rc = get_UCSname(&key, dentry))) return ERR_PTR(rc); rc = dtSearch(dip, &key, &inum, &btstack, JFS_LOOKUP); free_UCSname(&key); if (rc == -ENOENT) { ip = NULL; } else if (rc) { jfs_err("jfs_lookup: dtSearch returned %d", rc); ip = ERR_PTR(rc); } else { ip = jfs_iget(dip->i_sb, inum); if (IS_ERR(ip)) jfs_err("jfs_lookup: iget failed on inum %d", (uint)inum); } return d_splice_alias(ip, dentry); } static struct inode *jfs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino == 0) return ERR_PTR(-ESTALE); inode = jfs_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } struct dentry *jfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, jfs_nfs_get_inode); } struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, jfs_nfs_get_inode); } struct dentry *jfs_get_parent(struct dentry *dentry) { unsigned long parent_ino; parent_ino = le32_to_cpu(JFS_IP(dentry->d_inode)->i_dtroot.header.idotdot); return d_obtain_alias(jfs_iget(dentry->d_inode->i_sb, parent_ino)); } const struct inode_operations jfs_dir_inode_operations = { .create = jfs_create, .lookup = jfs_lookup, .link = jfs_link, .unlink = jfs_unlink, .symlink = jfs_symlink, .mkdir = jfs_mkdir, .rmdir = jfs_rmdir, .mknod = jfs_mknod, .rename = jfs_rename, .setxattr = jfs_setxattr, .getxattr = jfs_getxattr, .listxattr = jfs_listxattr, .removexattr = jfs_removexattr, .setattr = jfs_setattr, #ifdef CONFIG_JFS_POSIX_ACL .get_acl = jfs_get_acl, #endif }; const struct file_operations jfs_dir_operations = { .read = generic_read_dir, .readdir = jfs_readdir, .fsync = jfs_fsync, .unlocked_ioctl = jfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = jfs_compat_ioctl, #endif .llseek = generic_file_llseek, }; static int jfs_ci_hash(const struct dentry *dir, const struct inode *inode, struct qstr *this) { unsigned long hash; int i; hash = init_name_hash(); for (i=0; i < this->len; i++) hash = partial_name_hash(tolower(this->name[i]), hash); this->hash = end_name_hash(hash); return 0; } static int jfs_ci_compare(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { int i, result = 1; if (len != name->len) goto out; for (i=0; i < len; i++) { if (tolower(str[i]) != tolower(name->name[i])) goto out; } result = 0; out: return result; } static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) { /* * This is not negative dentry. Always valid. * * Note, rename() to existing directory entry will have ->d_inode, * and will use existing name which isn't specified name by user. * * We may be able to drop this positive dentry here. But dropping * positive dentry isn't good idea. So it's unsupported like * rename("filename", "FILENAME") for now. */ if (dentry->d_inode) return 1; /* * This may be nfsd (or something), anyway, we can't see the * intent of this. So, since this can be for creation, drop it. */ if (!nd) return 0; /* * Drop the negative dentry, in order to make sure to use the * case sensitive name which is specified by user if this is * for creation. */ if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; return 1; } const struct dentry_operations jfs_ci_dentry_operations = { .d_hash = jfs_ci_hash, .d_compare = jfs_ci_compare, .d_revalidate = jfs_ci_revalidate, };
gpl-2.0
sjurbren/modem-ipc
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
5089
24491
/* * Linux network driver for Brocade Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com */ #include "bfa_ioc.h" #include "cna.h" #include "bfi.h" #include "bfi_reg.h" #include "bfa_defs.h" #define bfa_ioc_ct_sync_pos(__ioc) \ ((u32) (1 << bfa_ioc_pcifn(__ioc))) #define BFA_IOC_SYNC_REQD_SH 16 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) /* * forward declarations */ static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc); static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc); static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc); static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc); static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode); static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode); static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc); static const struct bfa_ioc_hwif nw_hwif_ct = { .ioc_pll_init = bfa_ioc_ct_pll_init, .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, .ioc_reg_init = bfa_ioc_ct_reg_init, .ioc_map_port = bfa_ioc_ct_map_port, .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set, .ioc_notify_fail = bfa_ioc_ct_notify_fail, .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, .ioc_sync_start = bfa_ioc_ct_sync_start, .ioc_sync_join = bfa_ioc_ct_sync_join, .ioc_sync_leave = bfa_ioc_ct_sync_leave, .ioc_sync_ack = bfa_ioc_ct_sync_ack, .ioc_sync_complete = bfa_ioc_ct_sync_complete, }; static const struct bfa_ioc_hwif nw_hwif_ct2 = { .ioc_pll_init = bfa_ioc_ct2_pll_init, .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, .ioc_reg_init = bfa_ioc_ct2_reg_init, .ioc_map_port = bfa_ioc_ct2_map_port, .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat, .ioc_isr_mode_set = NULL, .ioc_notify_fail = bfa_ioc_ct_notify_fail, .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, .ioc_sync_start = bfa_ioc_ct_sync_start, .ioc_sync_join = bfa_ioc_ct_sync_join, .ioc_sync_leave = bfa_ioc_ct_sync_leave, .ioc_sync_ack = bfa_ioc_ct_sync_ack, .ioc_sync_complete = bfa_ioc_ct_sync_complete, }; /** * Called from bfa_ioc_attach() to map asic specific calls. */ void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) { ioc->ioc_hwif = &nw_hwif_ct; } void bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) { ioc->ioc_hwif = &nw_hwif_ct2; } /** * Return true if firmware of current driver matches the running firmware. */ static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) { enum bfi_ioc_state ioc_fwstate; u32 usecnt; struct bfi_ioc_image_hdr fwhdr; /** * If bios boot (flash based) -- do not increment usage count */ if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < BFA_IOC_FWIMG_MINSZ) return true; bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); usecnt = readl(ioc->ioc_regs.ioc_usage_reg); /** * If usage count is 0, always return TRUE. */ if (usecnt == 0) { writel(1, ioc->ioc_regs.ioc_usage_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); writel(0, ioc->ioc_regs.ioc_fail_sync); return true; } ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); /** * Use count cannot be non-zero and chip in uninitialized state. */ BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT)); /** * Check if another driver with a different firmware is active */ bfa_nw_ioc_fwver_get(ioc, &fwhdr); if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); return false; } /** * Same firmware version. Increment the reference count. */ usecnt++; writel(usecnt, ioc->ioc_regs.ioc_usage_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); return true; } static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) { u32 usecnt; /** * If bios boot (flash based) -- do not decrement usage count */ if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < BFA_IOC_FWIMG_MINSZ) return; /** * decrement usage count */ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); usecnt = readl(ioc->ioc_regs.ioc_usage_reg); BUG_ON(!(usecnt > 0)); usecnt--; writel(usecnt, ioc->ioc_regs.ioc_usage_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); } /** * Notify other functions on HB failure. */ static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) { writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); /* Wait for halt to take effect */ readl(ioc->ioc_regs.ll_halt); readl(ioc->ioc_regs.alt_ll_halt); } /** * Host to LPU mailbox message addresses */ static const struct { u32 hfn_mbox; u32 lpu_mbox; u32 hfn_pgn; } ct_fnreg[] = { { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } }; /** * Host <-> LPU mailbox command/status registers - port 0 */ static const struct { u32 hfn; u32 lpu; } ct_p0reg[] = { { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } }; /** * Host <-> LPU mailbox command/status registers - port 1 */ static const struct { u32 hfn; u32 lpu; } ct_p1reg[] = { { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } }; static const struct { u32 hfn_mbox; u32 lpu_mbox; u32 hfn_pgn; u32 hfn; u32 lpu; u32 lpu_read; } ct2_reg[] = { { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, CT2_HOSTFN_LPU0_READ_STAT}, { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, CT2_HOSTFN_LPU1_READ_STAT}, }; static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) { void __iomem *rb; int pcifn = bfa_ioc_pcifn(ioc); rb = bfa_ioc_bar0(ioc); ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; if (ioc->port_id == 0) { ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; } else { ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; } /* * PSS control registers */ ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; /* * IOC semaphore registers and serialization */ ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; /** * sram memory access */ ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; /* * err set reg : for notification of hb failure in fcmode */ ioc->ioc_regs.err_set = (rb + ERR_SET_REG); } static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) { void __iomem *rb; int port = bfa_ioc_portid(ioc); rb = bfa_ioc_bar0(ioc); ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; if (port == 0) { ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; } else { ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; } /* * PSS control registers */ ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; /* * IOC semaphore registers and serialization */ ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; /** * sram memory access */ ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; /* * err set reg : for notification of hb failure in fcmode */ ioc->ioc_regs.err_set = rb + ERR_SET_REG; } /** * Initialize IOC to port mapping. */ #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32; /** * For catapult, base port id on personality register and IOC type */ r32 = readl(rb + FNC_PERS_REG); r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; } static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32; r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); } /** * Set interrupt mode for a function: INTX or MSIX */ static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32, mode; r32 = readl(rb + FNC_PERS_REG); mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & __F0_INTX_STATUS; /** * If already in desired mode, do not change anything */ if ((!msix && mode) || (msix && !mode)) return; if (msix) mode = __F0_INTX_STATUS_MSIX; else mode = __F0_INTX_STATUS_INTA; r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); writel(r32, rb + FNC_PERS_REG); } static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) { u32 r32; r32 = readl(ioc->ioc_regs.lpu_read_stat); if (r32) { writel(1, ioc->ioc_regs.lpu_read_stat); return true; } return false; } /** * MSI-X resource allocation for 1860 with no asic block */ #define HOSTFN_MSIX_DEFAULT 64 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c #define __MSIX_VT_NUMVT__MK 0x003ff800 #define __MSIX_VT_NUMVT__SH 11 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) #define __MSIX_VT_OFST_ 0x000007ff void bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32; r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); if (r32 & __MSIX_VT_NUMVT__MK) { writel(r32 & __MSIX_VT_OFST_, rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); return; } writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), rb + HOSTFN_MSIX_VT_OFST_NUMVT); writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); } /** * Cleanup hw semaphore and usecnt registers */ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) { bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); writel(0, ioc->ioc_regs.ioc_usage_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); /* * Read the hw sem reg to make sure that it is locked * before we clear it. If it is not locked, writing 1 * will lock it instead of clearing it. */ readl(ioc->ioc_regs.ioc_sem_reg); bfa_nw_ioc_hw_sem_release(ioc); } /** * Synchronized IOC failure processing routines */ static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); /* * Driver load time. If the sync required bit for this PCI fn * is set, it is due to an unclean exit by the driver for this * PCI fn in the previous incarnation. Whoever comes here first * should clean it up, no matter which PCI fn. */ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { writel(0, ioc->ioc_regs.ioc_fail_sync); writel(1, ioc->ioc_regs.ioc_usage_reg); writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); return true; } return bfa_ioc_ct_sync_complete(ioc); } /** * Synchronized IOC failure processing routines */ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); } static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | bfa_ioc_ct_sync_pos(ioc); writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); } static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync); } static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); u32 tmp_ackd; if (sync_ackd == 0) return true; /** * The check below is to see whether any other PCI fn * has reinitialized the ASIC (reset sync_ackd bits) * and failed again while this IOC was waiting for hw * semaphore (in bfa_iocpf_sm_semwait()). */ tmp_ackd = sync_ackd; if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) sync_ackd |= bfa_ioc_ct_sync_pos(ioc); if (sync_reqd == sync_ackd) { writel(bfa_ioc_ct_clear_sync_ackd(r32), ioc->ioc_regs.ioc_fail_sync); writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); return true; } /** * If another PCI fn reinitialized and failed again while * this IOC was waiting for hw sem, the sync_ackd bit for * this IOC need to be set again to allow reinitialization. */ if (tmp_ackd != sync_ackd) writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); return false; } static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) { u32 pll_sclk, pll_fclk, r32; bool fcmode = (asic_mode == BFI_ASIC_MODE_FC); pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | __APP_PLL_SCLK_JITLMT0_1(3U) | __APP_PLL_SCLK_CNTLMT0_1(1U); pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | __APP_PLL_LCLK_JITLMT0_1(3U) | __APP_PLL_LCLK_CNTLMT0_1(1U); if (fcmode) { writel(0, (rb + OP_MODE)); writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG)); } else { writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG)); } writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); readl(rb + HOSTFN0_INT_MSK); udelay(2000); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); if (!fcmode) { writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); } r32 = readl((rb + PSS_CTL_REG)); r32 &= ~__PSS_LMEM_RESET; writel(r32, (rb + PSS_CTL_REG)); udelay(1000); if (!fcmode) { writel(0, (rb + PMM_1T_RESET_REG_P0)); writel(0, (rb + PMM_1T_RESET_REG_P1)); } writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); udelay(1000); r32 = readl((rb + MBIST_STAT_REG)); writel(0, (rb + MBIST_CTL_REG)); return BFA_STATUS_OK; } static void bfa_ioc_ct2_sclk_init(void __iomem *rb) { u32 r32; /* * put s_clk PLL and PLL FSM in reset */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET); writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* * Ignore mode and program for the max clock (which is FC16) * Firmware/NFC will do the PLL init appropiately */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* * while doing PLL init dont clock gate ethernet subsystem */ r32 = readl((rb + CT2_CHIP_MISC_PRG)); writel((r32 | __ETH_CLK_ENABLE_PORT0), (rb + CT2_CHIP_MISC_PRG)); r32 = readl((rb + CT2_PCIE_MISC_REG)); writel((r32 | __ETH_CLK_ENABLE_PORT1), (rb + CT2_PCIE_MISC_REG)); /* * set sclk value */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* * poll for s_clk lock or delay 1ms */ udelay(1000); /* * Dont do clock gating for ethernet subsystem, firmware/NFC will * do this appropriately */ } static void bfa_ioc_ct2_lclk_init(void __iomem *rb) { u32 r32; /* * put l_clk PLL and PLL FSM in reset */ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET); writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); /* * set LPU speed (set for FC16 which will work for other modes) */ r32 = readl((rb + CT2_CHIP_MISC_PRG)); writel(r32, (rb + CT2_CHIP_MISC_PRG)); /* * set LPU half speed (set for FC16 which will work for other modes) */ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); /* * set lclk for mode (set for FC16) */ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); r32 |= 0x20c1731b; writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); /* * poll for s_clk lock or delay 1ms */ udelay(1000); } static void bfa_ioc_ct2_mem_init(void __iomem *rb) { u32 r32; r32 = readl((rb + PSS_CTL_REG)); r32 &= ~__PSS_LMEM_RESET; writel(r32, (rb + PSS_CTL_REG)); udelay(1000); writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); udelay(1000); writel(0, (rb + CT2_MBIST_CTL_REG)); } static void bfa_ioc_ct2_mac_reset(void __iomem *rb) { volatile u32 r32; bfa_ioc_ct2_sclk_init(rb); bfa_ioc_ct2_lclk_init(rb); /* * release soft reset on s_clk & l_clk */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET), (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* * release soft reset on s_clk & l_clk */ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET), (rb + CT2_APP_PLL_LCLK_CTL_REG)); /* put port0, port1 MAC & AHB in reset */ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), (rb + CT2_CSI_MAC_CONTROL_REG(0))); writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), (rb + CT2_CSI_MAC_CONTROL_REG(1))); } #define CT2_NFC_MAX_DELAY 1000 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) { volatile u32 wgn, r32; int i; /* * Initialize PLL if not already done by NFC */ wgn = readl(rb + CT2_WGN_STATUS); if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { r32 = readl(rb + CT2_NFC_CSR_SET_REG); if (r32 & __NFC_CONTROLLER_HALTED) break; udelay(1000); } } /* * Mask the interrupts and clear any * pending interrupts left by BIOS/EFI */ writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); if (r32 == 1) { writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); } r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); if (r32 == 1) { writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); } bfa_ioc_ct2_mac_reset(rb); bfa_ioc_ct2_sclk_init(rb); bfa_ioc_ct2_lclk_init(rb); /* * release soft reset on s_clk & l_clk */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET), (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* * release soft reset on s_clk & l_clk */ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, (rb + CT2_APP_PLL_LCLK_CTL_REG)); /* * Announce flash device presence, if flash was corrupted. */ if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { r32 = readl((rb + PSS_GPIO_OUT_REG)); writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG)); r32 = readl((rb + PSS_GPIO_OE_REG)); writel((r32 | 1), (rb + PSS_GPIO_OE_REG)); } bfa_ioc_ct2_mem_init(rb); writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); return BFA_STATUS_OK; }
gpl-2.0
neomanu/NeoKernel-MT6589-A116
drivers/staging/tidspbridge/pmgr/dbll.c
5089
36583
/* * dbll.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> /* ----------------------------------- Host OS */ #include <dspbridge/host_os.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> #include <dspbridge/gh.h> /* ----------------------------------- OS Adaptation Layer */ /* Dynamic loader library interface */ #include <dspbridge/dynamic_loader.h> #include <dspbridge/getsection.h> /* ----------------------------------- This */ #include <dspbridge/dbll.h> #include <dspbridge/rmm.h> /* Number of buckets for symbol hash table */ #define MAXBUCKETS 211 /* Max buffer length */ #define MAXEXPR 128 #define DOFF_ALIGN(x) (((x) + 3) & ~3UL) /* * ======== struct dbll_tar_obj* ======== * A target may have one or more libraries of symbols/code/data loaded * onto it, where a library is simply the symbols/code/data contained * in a DOFF file. */ /* * ======== dbll_tar_obj ======== */ struct dbll_tar_obj { struct dbll_attrs attrs; struct dbll_library_obj *head; /* List of all opened libraries */ }; /* * The following 4 typedefs are "super classes" of the dynamic loader * library types used in dynamic loader functions (dynamic_loader.h). */ /* * ======== dbll_stream ======== * Contains dynamic_loader_stream */ struct dbll_stream { struct dynamic_loader_stream dl_stream; struct dbll_library_obj *lib; }; /* * ======== ldr_symbol ======== */ struct ldr_symbol { struct dynamic_loader_sym dl_symbol; struct dbll_library_obj *lib; }; /* * ======== dbll_alloc ======== */ struct dbll_alloc { struct dynamic_loader_allocate dl_alloc; struct dbll_library_obj *lib; }; /* * ======== dbll_init_obj ======== */ struct dbll_init_obj { struct dynamic_loader_initialize dl_init; struct dbll_library_obj *lib; }; /* * ======== DBLL_Library ======== * A library handle is returned by DBLL_Open() and is passed to dbll_load() * to load symbols/code/data, and to dbll_unload(), to remove the * symbols/code/data loaded by dbll_load(). */ /* * ======== dbll_library_obj ======== */ struct dbll_library_obj { struct dbll_library_obj *next; /* Next library in target's list */ struct dbll_library_obj *prev; /* Previous in the list */ struct dbll_tar_obj *target_obj; /* target for this library */ /* Objects needed by dynamic loader */ struct dbll_stream stream; struct ldr_symbol symbol; struct dbll_alloc allocate; struct dbll_init_obj init; void *dload_mod_obj; char *file_name; /* COFF file name */ void *fp; /* Opaque file handle */ u32 entry; /* Entry point */ void *desc; /* desc of DOFF file loaded */ u32 open_ref; /* Number of times opened */ u32 load_ref; /* Number of times loaded */ struct gh_t_hash_tab *sym_tab; /* Hash table of symbols */ u32 pos; }; /* * ======== dbll_symbol ======== */ struct dbll_symbol { struct dbll_sym_val value; char *name; }; static void dof_close(struct dbll_library_obj *zl_lib); static int dof_open(struct dbll_library_obj *zl_lib); static s32 no_op(struct dynamic_loader_initialize *thisptr, void *bufr, ldr_addr locn, struct ldr_section_info *info, unsigned bytsize); /* * Functions called by dynamic loader * */ /* dynamic_loader_stream */ static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer, unsigned bufsize); static int dbll_set_file_posn(struct dynamic_loader_stream *this, unsigned int pos); /* dynamic_loader_sym */ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this, const char *name); static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym *this, const char *name, unsigned module_id); static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym *this, const char *name, unsigned moduleid); static void dbll_purge_symbol_table(struct dynamic_loader_sym *this, unsigned module_id); static void *allocate(struct dynamic_loader_sym *this, unsigned memsize); static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr); static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr, va_list args); /* dynamic_loader_allocate */ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this, struct ldr_section_info *info, unsigned align); static void rmm_dealloc(struct dynamic_loader_allocate *this, struct ldr_section_info *info); /* dynamic_loader_initialize */ static int connect(struct dynamic_loader_initialize *this); static int read_mem(struct dynamic_loader_initialize *this, void *buf, ldr_addr addr, struct ldr_section_info *info, unsigned bytes); static int write_mem(struct dynamic_loader_initialize *this, void *buf, ldr_addr addr, struct ldr_section_info *info, unsigned nbytes); static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr, struct ldr_section_info *info, unsigned bytes, unsigned val); static int execute(struct dynamic_loader_initialize *this, ldr_addr start); static void release(struct dynamic_loader_initialize *this); /* symbol table hash functions */ static u16 name_hash(void *key, u16 max_bucket); static bool name_match(void *key, void *sp); static void sym_delete(void *value); /* Symbol Redefinition */ static int redefined_symbol; static int gbl_search = 1; /* * ======== dbll_close ======== */ void dbll_close(struct dbll_library_obj *zl_lib) { struct dbll_tar_obj *zl_target; zl_target = zl_lib->target_obj; zl_lib->open_ref--; if (zl_lib->open_ref == 0) { /* Remove library from list */ if (zl_target->head == zl_lib) zl_target->head = zl_lib->next; if (zl_lib->prev) (zl_lib->prev)->next = zl_lib->next; if (zl_lib->next) (zl_lib->next)->prev = zl_lib->prev; /* Free DOF resources */ dof_close(zl_lib); kfree(zl_lib->file_name); /* remove symbols from symbol table */ if (zl_lib->sym_tab) gh_delete(zl_lib->sym_tab); /* remove the library object itself */ kfree(zl_lib); zl_lib = NULL; } } /* * ======== dbll_create ======== */ int dbll_create(struct dbll_tar_obj **target_obj, struct dbll_attrs *pattrs) { struct dbll_tar_obj *pzl_target; int status = 0; /* Allocate DBL target object */ pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL); if (target_obj != NULL) { if (pzl_target == NULL) { *target_obj = NULL; status = -ENOMEM; } else { pzl_target->attrs = *pattrs; *target_obj = (struct dbll_tar_obj *)pzl_target; } } return status; } /* * ======== dbll_delete ======== */ void dbll_delete(struct dbll_tar_obj *target) { struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; kfree(zl_target); } /* * ======== dbll_exit ======== * Discontinue usage of DBL module. */ void dbll_exit(void) { /* do nothing */ } /* * ======== dbll_get_addr ======== * Get address of name in the specified library. */ bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name, struct dbll_sym_val **sym_val) { struct dbll_symbol *sym; bool status = false; sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name); if (sym != NULL) { *sym_val = &sym->value; status = true; } dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p, status 0x%x\n", __func__, zl_lib, name, sym_val, status); return status; } /* * ======== dbll_get_attrs ======== * Retrieve the attributes of the target. */ void dbll_get_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs) { struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; if ((pattrs != NULL) && (zl_target != NULL)) *pattrs = zl_target->attrs; } /* * ======== dbll_get_c_addr ======== * Get address of a "C" name in the specified library. */ bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name, struct dbll_sym_val **sym_val) { struct dbll_symbol *sym; char cname[MAXEXPR + 1]; bool status = false; cname[0] = '_'; strncpy(cname + 1, name, sizeof(cname) - 2); cname[MAXEXPR] = '\0'; /* insure '\0' string termination */ /* Check for C name, if not found */ sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, cname); if (sym != NULL) { *sym_val = &sym->value; status = true; } return status; } /* * ======== dbll_get_sect ======== * Get the base address and size (in bytes) of a COFF section. */ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr, u32 *psize) { u32 byte_size; bool opened_doff = false; const struct ldr_section_info *sect = NULL; struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; int status = 0; /* If DOFF file is not open, we open it. */ if (zl_lib != NULL) { if (zl_lib->fp == NULL) { status = dof_open(zl_lib); if (!status) opened_doff = true; } else { (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, zl_lib->pos, SEEK_SET); } } else { status = -EFAULT; } if (!status) { byte_size = 1; if (dload_get_section_info(zl_lib->desc, name, &sect)) { *paddr = sect->load_addr; *psize = sect->size * byte_size; /* Make sure size is even for good swap */ if (*psize % 2) (*psize)++; /* Align size */ *psize = DOFF_ALIGN(*psize); } else { status = -ENXIO; } } if (opened_doff) { dof_close(zl_lib); opened_doff = false; } dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p psize: %p, " "status 0x%x\n", __func__, lib, name, paddr, psize, status); return status; } /* * ======== dbll_init ======== */ bool dbll_init(void) { /* do nothing */ return true; } /* * ======== dbll_load ======== */ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags, struct dbll_attrs *attrs, u32 *entry) { struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; struct dbll_tar_obj *dbzl; bool got_symbols = true; s32 err; int status = 0; bool opened_doff = false; /* * Load if not already loaded. */ if (zl_lib->load_ref == 0 || !(flags & DBLL_DYNAMIC)) { dbzl = zl_lib->target_obj; dbzl->attrs = *attrs; /* Create a hash table for symbols if not already created */ if (zl_lib->sym_tab == NULL) { got_symbols = false; zl_lib->sym_tab = gh_create(MAXBUCKETS, sizeof(struct dbll_symbol), name_hash, name_match, sym_delete); if (zl_lib->sym_tab == NULL) status = -ENOMEM; } /* * Set up objects needed by the dynamic loader */ /* Stream */ zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer; zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn; zl_lib->stream.lib = zl_lib; /* Symbol */ zl_lib->symbol.dl_symbol.find_matching_symbol = dbll_find_symbol; if (got_symbols) { zl_lib->symbol.dl_symbol.add_to_symbol_table = find_in_symbol_table; } else { zl_lib->symbol.dl_symbol.add_to_symbol_table = dbll_add_to_symbol_table; } zl_lib->symbol.dl_symbol.purge_symbol_table = dbll_purge_symbol_table; zl_lib->symbol.dl_symbol.dload_allocate = allocate; zl_lib->symbol.dl_symbol.dload_deallocate = deallocate; zl_lib->symbol.dl_symbol.error_report = dbll_err_report; zl_lib->symbol.lib = zl_lib; /* Allocate */ zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc; zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc; zl_lib->allocate.lib = zl_lib; /* Init */ zl_lib->init.dl_init.connect = connect; zl_lib->init.dl_init.readmem = read_mem; zl_lib->init.dl_init.writemem = write_mem; zl_lib->init.dl_init.fillmem = fill_mem; zl_lib->init.dl_init.execute = execute; zl_lib->init.dl_init.release = release; zl_lib->init.lib = zl_lib; /* If COFF file is not open, we open it. */ if (zl_lib->fp == NULL) { status = dof_open(zl_lib); if (!status) opened_doff = true; } if (!status) { zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp); /* Reset file cursor */ (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET); symbols_reloaded = true; /* The 5th argument, DLOAD_INITBSS, tells the DLL * module to zero-init all BSS sections. In general, * this is not necessary and also increases load time. * We may want to make this configurable by the user */ err = dynamic_load_module(&zl_lib->stream.dl_stream, &zl_lib->symbol.dl_symbol, &zl_lib->allocate.dl_alloc, &zl_lib->init.dl_init, DLOAD_INITBSS, &zl_lib->dload_mod_obj); if (err != 0) { status = -EILSEQ; } else if (redefined_symbol) { zl_lib->load_ref++; dbll_unload(zl_lib, (struct dbll_attrs *)attrs); redefined_symbol = false; status = -EILSEQ; } else { *entry = zl_lib->entry; } } } if (!status) zl_lib->load_ref++; /* Clean up DOFF resources */ if (opened_doff) dof_close(zl_lib); dev_dbg(bridge, "%s: lib: %p flags: 0x%x entry: %p, status 0x%x\n", __func__, lib, flags, entry, status); return status; } /* * ======== dbll_open ======== */ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags, struct dbll_library_obj **lib_obj) { struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; struct dbll_library_obj *zl_lib = NULL; s32 err; int status = 0; zl_lib = zl_target->head; while (zl_lib != NULL) { if (strcmp(zl_lib->file_name, file) == 0) { /* Library is already opened */ zl_lib->open_ref++; break; } zl_lib = zl_lib->next; } if (zl_lib == NULL) { /* Allocate DBL library object */ zl_lib = kzalloc(sizeof(struct dbll_library_obj), GFP_KERNEL); if (zl_lib == NULL) { status = -ENOMEM; } else { zl_lib->pos = 0; /* Increment ref count to allow close on failure * later on */ zl_lib->open_ref++; zl_lib->target_obj = zl_target; /* Keep a copy of the file name */ zl_lib->file_name = kzalloc(strlen(file) + 1, GFP_KERNEL); if (zl_lib->file_name == NULL) { status = -ENOMEM; } else { strncpy(zl_lib->file_name, file, strlen(file) + 1); } zl_lib->sym_tab = NULL; } } /* * Set up objects needed by the dynamic loader */ if (status) goto func_cont; /* Stream */ zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer; zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn; zl_lib->stream.lib = zl_lib; /* Symbol */ zl_lib->symbol.dl_symbol.add_to_symbol_table = dbll_add_to_symbol_table; zl_lib->symbol.dl_symbol.find_matching_symbol = dbll_find_symbol; zl_lib->symbol.dl_symbol.purge_symbol_table = dbll_purge_symbol_table; zl_lib->symbol.dl_symbol.dload_allocate = allocate; zl_lib->symbol.dl_symbol.dload_deallocate = deallocate; zl_lib->symbol.dl_symbol.error_report = dbll_err_report; zl_lib->symbol.lib = zl_lib; /* Allocate */ zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc; zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc; zl_lib->allocate.lib = zl_lib; /* Init */ zl_lib->init.dl_init.connect = connect; zl_lib->init.dl_init.readmem = read_mem; zl_lib->init.dl_init.writemem = write_mem; zl_lib->init.dl_init.fillmem = fill_mem; zl_lib->init.dl_init.execute = execute; zl_lib->init.dl_init.release = release; zl_lib->init.lib = zl_lib; if (!status && zl_lib->fp == NULL) status = dof_open(zl_lib); zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp); (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET); /* Create a hash table for symbols if flag is set */ if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB)) goto func_cont; zl_lib->sym_tab = gh_create(MAXBUCKETS, sizeof(struct dbll_symbol), name_hash, name_match, sym_delete); if (zl_lib->sym_tab == NULL) { status = -ENOMEM; } else { /* Do a fake load to get symbols - set write func to no_op */ zl_lib->init.dl_init.writemem = no_op; err = dynamic_open_module(&zl_lib->stream.dl_stream, &zl_lib->symbol.dl_symbol, &zl_lib->allocate.dl_alloc, &zl_lib->init.dl_init, 0, &zl_lib->dload_mod_obj); if (err != 0) { status = -EILSEQ; } else { /* Now that we have the symbol table, we can unload */ err = dynamic_unload_module(zl_lib->dload_mod_obj, &zl_lib->symbol.dl_symbol, &zl_lib->allocate.dl_alloc, &zl_lib->init.dl_init); if (err != 0) status = -EILSEQ; zl_lib->dload_mod_obj = NULL; } } func_cont: if (!status) { if (zl_lib->open_ref == 1) { /* First time opened - insert in list */ if (zl_target->head) (zl_target->head)->prev = zl_lib; zl_lib->prev = NULL; zl_lib->next = zl_target->head; zl_target->head = zl_lib; } *lib_obj = (struct dbll_library_obj *)zl_lib; } else { *lib_obj = NULL; if (zl_lib != NULL) dbll_close((struct dbll_library_obj *)zl_lib); } dev_dbg(bridge, "%s: target: %p file: %s lib_obj: %p, status 0x%x\n", __func__, target, file, lib_obj, status); return status; } /* * ======== dbll_read_sect ======== * Get the content of a COFF section. */ int dbll_read_sect(struct dbll_library_obj *lib, char *name, char *buf, u32 size) { struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; bool opened_doff = false; u32 byte_size; /* size of bytes */ u32 ul_sect_size; /* size of section */ const struct ldr_section_info *sect = NULL; int status = 0; /* If DOFF file is not open, we open it. */ if (zl_lib != NULL) { if (zl_lib->fp == NULL) { status = dof_open(zl_lib); if (!status) opened_doff = true; } else { (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, zl_lib->pos, SEEK_SET); } } else { status = -EFAULT; } if (status) goto func_cont; byte_size = 1; if (!dload_get_section_info(zl_lib->desc, name, &sect)) { status = -ENXIO; goto func_cont; } /* * Ensure the supplied buffer size is sufficient to store * the section buf to be read. */ ul_sect_size = sect->size * byte_size; /* Make sure size is even for good swap */ if (ul_sect_size % 2) ul_sect_size++; /* Align size */ ul_sect_size = DOFF_ALIGN(ul_sect_size); if (ul_sect_size > size) { status = -EPERM; } else { if (!dload_get_section(zl_lib->desc, sect, buf)) status = -EBADF; } func_cont: if (opened_doff) { dof_close(zl_lib); opened_doff = false; } dev_dbg(bridge, "%s: lib: %p name: %s buf: %p size: 0x%x, " "status 0x%x\n", __func__, lib, name, buf, size, status); return status; } /* * ======== dbll_unload ======== */ void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs) { struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; s32 err = 0; dev_dbg(bridge, "%s: lib: %p\n", __func__, lib); zl_lib->load_ref--; /* Unload only if reference count is 0 */ if (zl_lib->load_ref != 0) return; zl_lib->target_obj->attrs = *attrs; if (zl_lib->dload_mod_obj) { err = dynamic_unload_module(zl_lib->dload_mod_obj, &zl_lib->symbol.dl_symbol, &zl_lib->allocate.dl_alloc, &zl_lib->init.dl_init); if (err != 0) dev_dbg(bridge, "%s: failed: 0x%x\n", __func__, err); } /* remove symbols from symbol table */ if (zl_lib->sym_tab != NULL) { gh_delete(zl_lib->sym_tab); zl_lib->sym_tab = NULL; } /* delete DOFF desc since it holds *lots* of host OS * resources */ dof_close(zl_lib); } /* * ======== dof_close ======== */ static void dof_close(struct dbll_library_obj *zl_lib) { if (zl_lib->desc) { dload_module_close(zl_lib->desc); zl_lib->desc = NULL; } /* close file */ if (zl_lib->fp) { (zl_lib->target_obj->attrs.fclose) (zl_lib->fp); zl_lib->fp = NULL; } } /* * ======== dof_open ======== */ static int dof_open(struct dbll_library_obj *zl_lib) { void *open = *(zl_lib->target_obj->attrs.fopen); int status = 0; /* First open the file for the dynamic loader, then open COF */ zl_lib->fp = (void *)((dbll_f_open_fxn) (open)) (zl_lib->file_name, "rb"); /* Open DOFF module */ if (zl_lib->fp && zl_lib->desc == NULL) { (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET); zl_lib->desc = dload_module_open(&zl_lib->stream.dl_stream, &zl_lib->symbol.dl_symbol); if (zl_lib->desc == NULL) { (zl_lib->target_obj->attrs.fclose) (zl_lib->fp); zl_lib->fp = NULL; status = -EBADF; } } else { status = -EBADF; } return status; } /* * ======== name_hash ======== */ static u16 name_hash(void *key, u16 max_bucket) { u16 ret; u16 hash; char *name = (char *)key; hash = 0; while (*name) { hash <<= 1; hash ^= *name++; } ret = hash % max_bucket; return ret; } /* * ======== name_match ======== */ static bool name_match(void *key, void *sp) { if ((key != NULL) && (sp != NULL)) { if (strcmp((char *)key, ((struct dbll_symbol *)sp)->name) == 0) return true; } return false; } /* * ======== no_op ======== */ static int no_op(struct dynamic_loader_initialize *thisptr, void *bufr, ldr_addr locn, struct ldr_section_info *info, unsigned bytsize) { return 1; } /* * ======== sym_delete ======== */ static void sym_delete(void *value) { struct dbll_symbol *sp = (struct dbll_symbol *)value; kfree(sp->name); } /* * Dynamic Loader Functions */ /* dynamic_loader_stream */ /* * ======== dbll_read_buffer ======== */ static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer, unsigned bufsize) { struct dbll_stream *pstream = (struct dbll_stream *)this; struct dbll_library_obj *lib; int bytes_read = 0; lib = pstream->lib; if (lib != NULL) { bytes_read = (*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize, lib->fp); } return bytes_read; } /* * ======== dbll_set_file_posn ======== */ static int dbll_set_file_posn(struct dynamic_loader_stream *this, unsigned int pos) { struct dbll_stream *pstream = (struct dbll_stream *)this; struct dbll_library_obj *lib; int status = 0; /* Success */ lib = pstream->lib; if (lib != NULL) { status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos, SEEK_SET); } return status; } /* dynamic_loader_sym */ /* * ======== dbll_find_symbol ======== */ static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this, const char *name) { struct dynload_symbol *ret_sym; struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; struct dbll_library_obj *lib; struct dbll_sym_val *dbll_sym = NULL; bool status = false; /* Symbol not found yet */ lib = ldr_sym->lib; if (lib != NULL) { if (lib->target_obj->attrs.sym_lookup) { /* Check current lib + base lib + dep lib + * persistent lib */ status = (*(lib->target_obj->attrs.sym_lookup)) (lib->target_obj->attrs.sym_handle, lib->target_obj->attrs.sym_arg, lib->target_obj->attrs.rmm_handle, name, &dbll_sym); } else { /* Just check current lib for symbol */ status = dbll_get_addr((struct dbll_library_obj *)lib, (char *)name, &dbll_sym); if (!status) { status = dbll_get_c_addr((struct dbll_library_obj *) lib, (char *)name, &dbll_sym); } } } if (!status && gbl_search) dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name); ret_sym = (struct dynload_symbol *)dbll_sym; return ret_sym; } /* * ======== find_in_symbol_table ======== */ static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym *this, const char *name, unsigned moduleid) { struct dynload_symbol *ret_sym; struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; struct dbll_library_obj *lib; struct dbll_symbol *sym; lib = ldr_sym->lib; sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name); ret_sym = (struct dynload_symbol *)&sym->value; return ret_sym; } /* * ======== dbll_add_to_symbol_table ======== */ static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym *this, const char *name, unsigned module_id) { struct dbll_symbol *sym_ptr = NULL; struct dbll_symbol symbol; struct dynload_symbol *dbll_sym = NULL; struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; struct dbll_library_obj *lib; struct dynload_symbol *ret; lib = ldr_sym->lib; /* Check to see if symbol is already defined in symbol table */ if (!(lib->target_obj->attrs.base_image)) { gbl_search = false; dbll_sym = dbll_find_symbol(this, name); gbl_search = true; if (dbll_sym) { redefined_symbol = true; dev_dbg(bridge, "%s already defined in symbol table\n", name); return NULL; } } /* Allocate string to copy symbol name */ symbol.name = kzalloc(strlen((char *const)name) + 1, GFP_KERNEL); if (symbol.name == NULL) return NULL; if (symbol.name != NULL) { /* Just copy name (value will be filled in by dynamic loader) */ strncpy(symbol.name, (char *const)name, strlen((char *const)name) + 1); /* Add symbol to symbol table */ sym_ptr = (struct dbll_symbol *)gh_insert(lib->sym_tab, (void *)name, (void *)&symbol); if (sym_ptr == NULL) kfree(symbol.name); } if (sym_ptr != NULL) ret = (struct dynload_symbol *)&sym_ptr->value; else ret = NULL; return ret; } /* * ======== dbll_purge_symbol_table ======== */ static void dbll_purge_symbol_table(struct dynamic_loader_sym *this, unsigned module_id) { struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; struct dbll_library_obj *lib; lib = ldr_sym->lib; /* May not need to do anything */ } /* * ======== allocate ======== */ static void *allocate(struct dynamic_loader_sym *this, unsigned memsize) { struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; struct dbll_library_obj *lib; void *buf; lib = ldr_sym->lib; buf = kzalloc(memsize, GFP_KERNEL); return buf; } /* * ======== deallocate ======== */ static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr) { struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; struct dbll_library_obj *lib; lib = ldr_sym->lib; kfree(mem_ptr); } /* * ======== dbll_err_report ======== */ static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr, va_list args) { struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; struct dbll_library_obj *lib; char temp_buf[MAXEXPR]; lib = ldr_sym->lib; vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args); dev_dbg(bridge, "%s\n", temp_buf); } /* dynamic_loader_allocate */ /* * ======== dbll_rmm_alloc ======== */ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this, struct ldr_section_info *info, unsigned align) { struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this; struct dbll_library_obj *lib; int status = 0; u32 mem_sect_type; struct rmm_addr rmm_addr_obj; s32 ret = true; unsigned stype = DLOAD_SECTION_TYPE(info->type); char *token = NULL; char *sz_sec_last_token = NULL; char *sz_last_token = NULL; char *sz_sect_name = NULL; char *psz_cur; s32 token_len = 0; s32 seg_id = -1; s32 req = -1; s32 count = 0; u32 alloc_size = 0; u32 run_addr_flag = 0; lib = dbll_alloc_obj->lib; mem_sect_type = (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == DLOAD_BSS) ? DBLL_BSS : DBLL_DATA; /* Attempt to extract the segment ID and requirement information from the name of the section */ token_len = strlen((char *)(info->name)) + 1; sz_sect_name = kzalloc(token_len, GFP_KERNEL); sz_last_token = kzalloc(token_len, GFP_KERNEL); sz_sec_last_token = kzalloc(token_len, GFP_KERNEL); if (sz_sect_name == NULL || sz_sec_last_token == NULL || sz_last_token == NULL) { status = -ENOMEM; goto func_cont; } strncpy(sz_sect_name, (char *)(info->name), token_len); psz_cur = sz_sect_name; while ((token = strsep(&psz_cur, ":")) && *token != '\0') { strncpy(sz_sec_last_token, sz_last_token, strlen(sz_last_token) + 1); strncpy(sz_last_token, token, strlen(token) + 1); token = strsep(&psz_cur, ":"); count++; /* optimizes processing */ } /* If token is 0 or 1, and sz_sec_last_token is DYN_DARAM or DYN_SARAM, or DYN_EXTERNAL, then mem granularity information is present within the section name - only process if there are at least three tokens within the section name (just a minor optimization) */ if (count >= 3) strict_strtol(sz_last_token, 10, (long *)&req); if ((req == 0) || (req == 1)) { if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) { seg_id = 0; } else { if (strcmp(sz_sec_last_token, "DYN_SARAM") == 0) { seg_id = 1; } else { if (strcmp(sz_sec_last_token, "DYN_EXTERNAL") == 0) seg_id = 2; } } } func_cont: kfree(sz_sect_name); sz_sect_name = NULL; kfree(sz_last_token); sz_last_token = NULL; kfree(sz_sec_last_token); sz_sec_last_token = NULL; if (mem_sect_type == DBLL_CODE) alloc_size = info->size + GEM_L1P_PREFETCH_SIZE; else alloc_size = info->size; if (info->load_addr != info->run_addr) run_addr_flag = 1; /* TODO - ideally, we can pass the alignment requirement also * from here */ if (lib != NULL) { status = (lib->target_obj->attrs.alloc) (lib->target_obj->attrs. rmm_handle, mem_sect_type, alloc_size, align, (u32 *) &rmm_addr_obj, seg_id, req, false); } if (status) { ret = false; } else { /* RMM gives word address. Need to convert to byte address */ info->load_addr = rmm_addr_obj.addr * DSPWORDSIZE; if (!run_addr_flag) info->run_addr = info->load_addr; info->context = (u32) rmm_addr_obj.segid; dev_dbg(bridge, "%s: %s base = 0x%x len = 0x%x, " "info->run_addr 0x%x, info->load_addr 0x%x\n", __func__, info->name, info->load_addr / DSPWORDSIZE, info->size / DSPWORDSIZE, info->run_addr, info->load_addr); } return ret; } /* * ======== rmm_dealloc ======== */ static void rmm_dealloc(struct dynamic_loader_allocate *this, struct ldr_section_info *info) { struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this; struct dbll_library_obj *lib; u32 segid; int status = 0; unsigned stype = DLOAD_SECTION_TYPE(info->type); u32 mem_sect_type; u32 free_size = 0; mem_sect_type = (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == DLOAD_BSS) ? DBLL_BSS : DBLL_DATA; lib = dbll_alloc_obj->lib; /* segid was set by alloc function */ segid = (u32) info->context; if (mem_sect_type == DBLL_CODE) free_size = info->size + GEM_L1P_PREFETCH_SIZE; else free_size = info->size; if (lib != NULL) { status = (lib->target_obj->attrs.free) (lib->target_obj->attrs. sym_handle, segid, info->load_addr / DSPWORDSIZE, free_size, false); } } /* dynamic_loader_initialize */ /* * ======== connect ======== */ static int connect(struct dynamic_loader_initialize *this) { return true; } /* * ======== read_mem ======== * This function does not need to be implemented. */ static int read_mem(struct dynamic_loader_initialize *this, void *buf, ldr_addr addr, struct ldr_section_info *info, unsigned nbytes) { struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; struct dbll_library_obj *lib; int bytes_read = 0; lib = init_obj->lib; /* Need bridge_brd_read function */ return bytes_read; } /* * ======== write_mem ======== */ static int write_mem(struct dynamic_loader_initialize *this, void *buf, ldr_addr addr, struct ldr_section_info *info, unsigned bytes) { struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; struct dbll_library_obj *lib; struct dbll_tar_obj *target_obj; struct dbll_sect_info sect_info; u32 mem_sect_type; bool ret = true; lib = init_obj->lib; if (!lib) return false; target_obj = lib->target_obj; mem_sect_type = (DLOAD_SECTION_TYPE(info->type) == DLOAD_TEXT) ? DBLL_CODE : DBLL_DATA; if (target_obj && target_obj->attrs.write) { ret = (*target_obj->attrs.write) (target_obj->attrs.input_params, addr, buf, bytes, mem_sect_type); if (target_obj->attrs.log_write) { sect_info.name = info->name; sect_info.sect_run_addr = info->run_addr; sect_info.sect_load_addr = info->load_addr; sect_info.size = info->size; sect_info.type = mem_sect_type; /* Pass the information about what we've written to * another module */ (*target_obj->attrs.log_write) (target_obj->attrs. log_write_handle, &sect_info, addr, bytes); } } return ret; } /* * ======== fill_mem ======== * Fill bytes of memory at a given address with a given value by * writing from a buffer containing the given value. Write in * sets of MAXEXPR (128) bytes to avoid large stack buffer issues. */ static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr, struct ldr_section_info *info, unsigned bytes, unsigned val) { bool ret = true; char *pbuf; struct dbll_library_obj *lib; struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; lib = init_obj->lib; pbuf = NULL; /* Pass the NULL pointer to write_mem to get the start address of Shared memory. This is a trick to just get the start address, there is no writing taking place with this Writemem */ if ((lib->target_obj->attrs.write) != (dbll_write_fxn) no_op) write_mem(this, &pbuf, addr, info, 0); if (pbuf) memset(pbuf, val, bytes); return ret; } /* * ======== execute ======== */ static int execute(struct dynamic_loader_initialize *this, ldr_addr start) { struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; struct dbll_library_obj *lib; bool ret = true; lib = init_obj->lib; /* Save entry point */ if (lib != NULL) lib->entry = (u32) start; return ret; } /* * ======== release ======== */ static void release(struct dynamic_loader_initialize *this) { } #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE /** * find_symbol_context - Basic symbol context structure * @address: Symbol Address * @offset_range: Offset range where the search for the DSP symbol * started. * @cur_best_offset: Best offset to start looking for the DSP symbol * @sym_addr: Address of the DSP symbol * @name: Symbol name * */ struct find_symbol_context { /* input */ u32 address; u32 offset_range; /* state */ u32 cur_best_offset; /* output */ u32 sym_addr; char name[120]; }; /** * find_symbol_callback() - Validates symbol address and copies the symbol name * to the user data. * @elem: dsp library context * @user_data: Find symbol context * */ void find_symbol_callback(void *elem, void *user_data) { struct dbll_symbol *symbol = elem; struct find_symbol_context *context = user_data; u32 symbol_addr = symbol->value.value; u32 offset = context->address - symbol_addr; /* * Address given should be greater than symbol address, * symbol address should be within specified range * and the offset should be better than previous one */ if (context->address >= symbol_addr && symbol_addr < (u32)-1 && offset < context->cur_best_offset) { context->cur_best_offset = offset; context->sym_addr = symbol_addr; strncpy(context->name, symbol->name, sizeof(context->name)); } return; } /** * dbll_find_dsp_symbol() - This function retrieves the dsp symbol from the dsp binary. * @zl_lib: DSP binary obj library pointer * @address: Given address to find the dsp symbol * @offset_range: offset range to look for dsp symbol * @sym_addr_output: Symbol Output address * @name_output: String with the dsp symbol * * This function retrieves the dsp symbol from the dsp binary. */ bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address, u32 offset_range, u32 *sym_addr_output, char *name_output) { bool status = false; struct find_symbol_context context; context.address = address; context.offset_range = offset_range; context.cur_best_offset = offset_range; context.sym_addr = 0; context.name[0] = '\0'; gh_iterate(zl_lib->sym_tab, find_symbol_callback, &context); if (context.name[0]) { status = true; strcpy(name_output, context.name); *sym_addr_output = context.sym_addr; } return status; } #endif
gpl-2.0
ubuntu-touchCAF/android_kernel_motorola_msm8226
sound/soc/codecs/cx20442.c
7905
10685
/* * cx20442.c -- CX20442 ALSA Soc Audio driver * * Copyright 2009 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl> * * Initially based on sound/soc/codecs/wm8400.c * Copyright 2008, 2009 Wolfson Microelectronics PLC. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/tty.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/regulator/consumer.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/soc.h> #include "cx20442.h" struct cx20442_priv { void *control_data; struct regulator *por; }; #define CX20442_PM 0x0 #define CX20442_TELIN 0 #define CX20442_TELOUT 1 #define CX20442_MIC 2 #define CX20442_SPKOUT 3 #define CX20442_AGC 4 static const struct snd_soc_dapm_widget cx20442_dapm_widgets[] = { SND_SOC_DAPM_OUTPUT("TELOUT"), SND_SOC_DAPM_OUTPUT("SPKOUT"), SND_SOC_DAPM_OUTPUT("AGCOUT"), SND_SOC_DAPM_MIXER("SPKOUT Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("TELOUT Amp", CX20442_PM, CX20442_TELOUT, 0, NULL, 0), SND_SOC_DAPM_PGA("SPKOUT Amp", CX20442_PM, CX20442_SPKOUT, 0, NULL, 0), SND_SOC_DAPM_PGA("SPKOUT AGC", CX20442_PM, CX20442_AGC, 0, NULL, 0), SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_MIXER("Input Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MICBIAS("TELIN Bias", CX20442_PM, CX20442_TELIN, 0), SND_SOC_DAPM_MICBIAS("MIC Bias", CX20442_PM, CX20442_MIC, 0), SND_SOC_DAPM_PGA("MIC AGC", CX20442_PM, CX20442_AGC, 0, NULL, 0), SND_SOC_DAPM_INPUT("TELIN"), SND_SOC_DAPM_INPUT("MIC"), SND_SOC_DAPM_INPUT("AGCIN"), }; static const struct snd_soc_dapm_route cx20442_audio_map[] = { {"TELOUT", NULL, "TELOUT Amp"}, {"SPKOUT", NULL, "SPKOUT Mixer"}, {"SPKOUT Mixer", NULL, "SPKOUT Amp"}, {"TELOUT Amp", NULL, "DAC"}, {"SPKOUT Amp", NULL, "DAC"}, {"SPKOUT Mixer", NULL, "SPKOUT AGC"}, {"SPKOUT AGC", NULL, "AGCIN"}, {"AGCOUT", NULL, "MIC AGC"}, {"MIC AGC", NULL, "MIC"}, {"MIC Bias", NULL, "MIC"}, {"Input Mixer", NULL, "MIC Bias"}, {"TELIN Bias", NULL, "TELIN"}, {"Input Mixer", NULL, "TELIN Bias"}, {"ADC", NULL, "Input Mixer"}, }; static unsigned int cx20442_read_reg_cache(struct snd_soc_codec *codec, unsigned int reg) { u8 *reg_cache = codec->reg_cache; if (reg >= codec->driver->reg_cache_size) return -EINVAL; return reg_cache[reg]; } enum v253_vls { V253_VLS_NONE = 0, V253_VLS_T, V253_VLS_L, V253_VLS_LT, V253_VLS_S, V253_VLS_ST, V253_VLS_M, V253_VLS_MST, V253_VLS_S1, V253_VLS_S1T, V253_VLS_MS1T, V253_VLS_M1, V253_VLS_M1ST, V253_VLS_M1S1T, V253_VLS_H, V253_VLS_HT, V253_VLS_MS, V253_VLS_MS1, V253_VLS_M1S, V253_VLS_M1S1, V253_VLS_TEST, }; static int cx20442_pm_to_v253_vls(u8 value) { switch (value & ~(1 << CX20442_AGC)) { case 0: return V253_VLS_T; case (1 << CX20442_SPKOUT): case (1 << CX20442_MIC): case (1 << CX20442_SPKOUT) | (1 << CX20442_MIC): return V253_VLS_M1S1; case (1 << CX20442_TELOUT): case (1 << CX20442_TELIN): case (1 << CX20442_TELOUT) | (1 << CX20442_TELIN): return V253_VLS_L; case (1 << CX20442_TELOUT) | (1 << CX20442_MIC): return V253_VLS_NONE; } return -EINVAL; } static int cx20442_pm_to_v253_vsp(u8 value) { switch (value & ~(1 << CX20442_AGC)) { case (1 << CX20442_SPKOUT): case (1 << CX20442_MIC): case (1 << CX20442_SPKOUT) | (1 << CX20442_MIC): return (bool)(value & (1 << CX20442_AGC)); } return (value & (1 << CX20442_AGC)) ? -EINVAL : 0; } static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec); u8 *reg_cache = codec->reg_cache; int vls, vsp, old, len; char buf[18]; if (reg >= codec->driver->reg_cache_size) return -EINVAL; /* hw_write and control_data pointers required for talking to the modem * are expected to be set by the line discipline initialization code */ if (!codec->hw_write || !cx20442->control_data) return -EIO; old = reg_cache[reg]; reg_cache[reg] = value; vls = cx20442_pm_to_v253_vls(value); if (vls < 0) return vls; vsp = cx20442_pm_to_v253_vsp(value); if (vsp < 0) return vsp; if ((vls == V253_VLS_T) || (vls == cx20442_pm_to_v253_vls(old))) { if (vsp == cx20442_pm_to_v253_vsp(old)) return 0; len = snprintf(buf, ARRAY_SIZE(buf), "at+vsp=%d\r", vsp); } else if (vsp == cx20442_pm_to_v253_vsp(old)) len = snprintf(buf, ARRAY_SIZE(buf), "at+vls=%d\r", vls); else len = snprintf(buf, ARRAY_SIZE(buf), "at+vls=%d;+vsp=%d\r", vls, vsp); if (unlikely(len > (ARRAY_SIZE(buf) - 1))) return -ENOMEM; dev_dbg(codec->dev, "%s: %s\n", __func__, buf); if (codec->hw_write(cx20442->control_data, buf, len) != len) return -EIO; return 0; } /* * Line discpline related code * * Any of the callback functions below can be used in two ways: * 1) registerd by a machine driver as one of line discipline operations, * 2) called from a machine's provided line discipline callback function * in case when extra machine specific code must be run as well. */ /* Modem init: echo off, digital speaker off, quiet off, voice mode */ static const char *v253_init = "ate0m0q0+fclass=8\r"; /* Line discipline .open() */ static int v253_open(struct tty_struct *tty) { int ret, len = strlen(v253_init); /* Doesn't make sense without write callback */ if (!tty->ops->write) return -EINVAL; /* Won't work if no codec pointer has been passed by a card driver */ if (!tty->disc_data) return -ENODEV; if (tty->ops->write(tty, v253_init, len) != len) { ret = -EIO; goto err; } /* Actual setup will be performed after the modem responds. */ return 0; err: tty->disc_data = NULL; return ret; } /* Line discipline .close() */ static void v253_close(struct tty_struct *tty) { struct snd_soc_codec *codec = tty->disc_data; struct cx20442_priv *cx20442; tty->disc_data = NULL; if (!codec) return; cx20442 = snd_soc_codec_get_drvdata(codec); /* Prevent the codec driver from further accessing the modem */ codec->hw_write = NULL; cx20442->control_data = NULL; codec->card->pop_time = 0; } /* Line discipline .hangup() */ static int v253_hangup(struct tty_struct *tty) { v253_close(tty); return 0; } /* Line discipline .receive_buf() */ static void v253_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct snd_soc_codec *codec = tty->disc_data; struct cx20442_priv *cx20442; if (!codec) return; cx20442 = snd_soc_codec_get_drvdata(codec); if (!cx20442->control_data) { /* First modem response, complete setup procedure */ /* Set up codec driver access to modem controls */ cx20442->control_data = tty; codec->hw_write = (hw_write_t)tty->ops->write; codec->card->pop_time = 1; } } /* Line discipline .write_wakeup() */ static void v253_wakeup(struct tty_struct *tty) { } struct tty_ldisc_ops v253_ops = { .magic = TTY_LDISC_MAGIC, .name = "cx20442", .owner = THIS_MODULE, .open = v253_open, .close = v253_close, .hangup = v253_hangup, .receive_buf = v253_receive, .write_wakeup = v253_wakeup, }; EXPORT_SYMBOL_GPL(v253_ops); /* * Codec DAI */ static struct snd_soc_dai_driver cx20442_dai = { .name = "cx20442-voice", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 1, .rates = SNDRV_PCM_RATE_8000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 1, .rates = SNDRV_PCM_RATE_8000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }; static int cx20442_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec); int err = 0; switch (level) { case SND_SOC_BIAS_PREPARE: if (codec->dapm.bias_level != SND_SOC_BIAS_STANDBY) break; if (IS_ERR(cx20442->por)) err = PTR_ERR(cx20442->por); else err = regulator_enable(cx20442->por); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level != SND_SOC_BIAS_PREPARE) break; if (IS_ERR(cx20442->por)) err = PTR_ERR(cx20442->por); else err = regulator_disable(cx20442->por); break; default: break; } if (!err) codec->dapm.bias_level = level; return err; } static int cx20442_codec_probe(struct snd_soc_codec *codec) { struct cx20442_priv *cx20442; cx20442 = kzalloc(sizeof(struct cx20442_priv), GFP_KERNEL); if (cx20442 == NULL) return -ENOMEM; cx20442->por = regulator_get(codec->dev, "POR"); if (IS_ERR(cx20442->por)) dev_warn(codec->dev, "failed to get the regulator"); cx20442->control_data = NULL; snd_soc_codec_set_drvdata(codec, cx20442); codec->hw_write = NULL; codec->card->pop_time = 0; return 0; } /* power down chip */ static int cx20442_codec_remove(struct snd_soc_codec *codec) { struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec); if (cx20442->control_data) { struct tty_struct *tty = cx20442->control_data; tty_hangup(tty); } if (!IS_ERR(cx20442->por)) { /* should be already in STANDBY, hence disabled */ regulator_put(cx20442->por); } snd_soc_codec_set_drvdata(codec, NULL); kfree(cx20442); return 0; } static const u8 cx20442_reg; static struct snd_soc_codec_driver cx20442_codec_dev = { .probe = cx20442_codec_probe, .remove = cx20442_codec_remove, .set_bias_level = cx20442_set_bias_level, .reg_cache_default = &cx20442_reg, .reg_cache_size = 1, .reg_word_size = sizeof(u8), .read = cx20442_read_reg_cache, .write = cx20442_write, .dapm_widgets = cx20442_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(cx20442_dapm_widgets), .dapm_routes = cx20442_audio_map, .num_dapm_routes = ARRAY_SIZE(cx20442_audio_map), }; static int cx20442_platform_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &cx20442_codec_dev, &cx20442_dai, 1); } static int __exit cx20442_platform_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver cx20442_platform_driver = { .driver = { .name = "cx20442-codec", .owner = THIS_MODULE, }, .probe = cx20442_platform_probe, .remove = __exit_p(cx20442_platform_remove), }; module_platform_driver(cx20442_platform_driver); MODULE_DESCRIPTION("ASoC CX20442-11 voice modem codec driver"); MODULE_AUTHOR("Janusz Krzysztofik"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:cx20442-codec");
gpl-2.0
b-man/hp-kernel-tenderloin-ubuntu
arch/sparc/kernel/sparc_ksyms_32.c
9185
1127
/* * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) */ #include <linux/module.h> #include <linux/init.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/delay.h> #include <asm/head.h> #include <asm/dma.h> struct poll { int fd; short events; short revents; }; /* from entry.S */ EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay); /* from head_32.S */ EXPORT_SYMBOL(__ret_efault); EXPORT_SYMBOL(empty_zero_page); /* Defined using magic */ #ifndef CONFIG_SMP EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32)); #else EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id)); #endif EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one)); EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached)); /* Exporting a symbol from /init/main.c */ EXPORT_SYMBOL(saved_command_line);
gpl-2.0
imoseyon/leanKernel-angler
arch/powerpc/platforms/powernv/pci.c
226
16310
/* * Support PCI/PCIe on PowerNV platforms * * Currently supports only P5IOC2 * * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/msi.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/msi_bitmap.h> #include <asm/ppc-pci.h> #include <asm/opal.h> #include <asm/iommu.h> #include <asm/tce.h> #include <asm/firmware.h> #include "powernv.h" #include "pci.h" /* Delay in usec */ #define PCI_RESET_DELAY_US 3000000 #define cfg_dbg(fmt...) do { } while(0) //#define cfg_dbg(fmt...) printk(fmt) #ifdef CONFIG_PCI_MSI static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; if (pdev->no_64bit_msi && !phb->msi32_support) return -ENODEV; return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV; } static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; struct msi_desc *entry; struct msi_msg msg; int hwirq; unsigned int virq; int rc; if (WARN_ON(!phb)) return -ENODEV; list_for_each_entry(entry, &pdev->msi_list, list) { if (!entry->msi_attrib.is_64 && !phb->msi32_support) { pr_warn("%s: Supports only 64-bit MSIs\n", pci_name(pdev)); return -ENXIO; } hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1); if (hwirq < 0) { pr_warn("%s: Failed to find a free MSI\n", pci_name(pdev)); return -ENOSPC; } virq = irq_create_mapping(NULL, phb->msi_base + hwirq); if (virq == NO_IRQ) { pr_warn("%s: Failed to map MSI to linux irq\n", pci_name(pdev)); msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); return -ENOMEM; } rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq, virq, entry->msi_attrib.is_64, &msg); if (rc) { pr_warn("%s: Failed to setup MSI\n", pci_name(pdev)); irq_dispose_mapping(virq); msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); return rc; } irq_set_msi_desc(virq, entry); write_msi_msg(virq, &msg); } return 0; } static void pnv_teardown_msi_irqs(struct pci_dev *pdev) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; struct msi_desc *entry; irq_hw_number_t hwirq; if (WARN_ON(!phb)) return; list_for_each_entry(entry, &pdev->msi_list, list) { if (entry->irq == NO_IRQ) continue; hwirq = virq_to_hw(entry->irq); irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); } } #endif /* CONFIG_PCI_MSI */ static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb) { struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc; int i; pr_info("PHB %d diagnostic data:\n", phb->hose->global_number); pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl); pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg); pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus); pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus); pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus); pr_info(" slotStatus = 0x%08x\n", data->slotStatus); pr_info(" linkStatus = 0x%08x\n", data->linkStatus); pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus); pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus); pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus); pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus); pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus); pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1); pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2); pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3); pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4); pr_info(" sourceId = 0x%08x\n", data->sourceId); pr_info(" errorClass = 0x%016llx\n", data->errorClass); pr_info(" correlator = 0x%016llx\n", data->correlator); pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr); pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr); pr_info(" lemFir = 0x%016llx\n", data->lemFir); pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask); pr_info(" lemWOF = 0x%016llx\n", data->lemWOF); pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus); pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus); pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0); pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1); pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus); pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus); pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0); pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1); pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus); pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus); pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0); pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1); pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus); pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus); pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0); pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1); for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { if ((data->pestA[i] >> 63) == 0 && (data->pestB[i] >> 63) == 0) continue; pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]); pr_info(" PESTB = 0x%016llx\n", data->pestB[i]); } } static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb) { switch(phb->model) { case PNV_PHB_MODEL_P7IOC: pnv_pci_dump_p7ioc_diag_data(phb); break; default: pr_warning("PCI %d: Can't decode this PHB diag data\n", phb->hose->global_number); } } static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) { unsigned long flags, rc; int has_diag; spin_lock_irqsave(&phb->lock, flags); rc = opal_pci_get_phb_diag_data(phb->opal_id, phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE); has_diag = (rc == OPAL_SUCCESS); rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); if (rc) { pr_warning("PCI %d: Failed to clear EEH freeze state" " for PE#%d, err %ld\n", phb->hose->global_number, pe_no, rc); /* For now, let's only display the diag buffer when we fail to clear * the EEH status. We'll do more sensible things later when we have * proper EEH support. We need to make sure we don't pollute ourselves * with the normal errors generated when probing empty slots */ if (has_diag) pnv_pci_dump_phb_diag_data(phb); else pr_warning("PCI %d: No diag data available\n", phb->hose->global_number); } spin_unlock_irqrestore(&phb->lock, flags); } static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus, u32 bdfn) { s64 rc; u8 fstate; u16 pcierr; u32 pe_no; /* Get PE# if we support IODA */ pe_no = phb->bdfn_to_pe ? phb->bdfn_to_pe(phb, bus, bdfn & 0xff) : 0; /* Read freeze status */ rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &fstate, &pcierr, NULL); if (rc) { pr_warning("PCI %d: Failed to read EEH status for PE#%d," " err %lld\n", phb->hose->global_number, pe_no, rc); return; } cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n", bdfn, pe_no, fstate); if (fstate != 0) pnv_pci_handle_eeh_config(phb, pe_no); } static int pnv_pci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct pci_controller *hose = pci_bus_to_host(bus); struct pnv_phb *phb = hose->private_data; u32 bdfn = (((uint64_t)bus->number) << 8) | devfn; s64 rc; if (hose == NULL) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: { u8 v8; rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8); *val = (rc == OPAL_SUCCESS) ? v8 : 0xff; break; } case 2: { u16 v16; rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where, &v16); *val = (rc == OPAL_SUCCESS) ? v16 : 0xffff; break; } case 4: { u32 v32; rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32); *val = (rc == OPAL_SUCCESS) ? v32 : 0xffffffff; break; } default: return PCIBIOS_FUNC_NOT_SUPPORTED; } cfg_dbg("pnv_pci_read_config bus: %x devfn: %x +%x/%x -> %08x\n", bus->number, devfn, where, size, *val); /* Check if the PHB got frozen due to an error (no response) */ pnv_pci_config_check_eeh(phb, bus, bdfn); return PCIBIOS_SUCCESSFUL; } static int pnv_pci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct pci_controller *hose = pci_bus_to_host(bus); struct pnv_phb *phb = hose->private_data; u32 bdfn = (((uint64_t)bus->number) << 8) | devfn; if (hose == NULL) return PCIBIOS_DEVICE_NOT_FOUND; cfg_dbg("pnv_pci_write_config bus: %x devfn: %x +%x/%x -> %08x\n", bus->number, devfn, where, size, val); switch (size) { case 1: opal_pci_config_write_byte(phb->opal_id, bdfn, where, val); break; case 2: opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val); break; case 4: opal_pci_config_write_word(phb->opal_id, bdfn, where, val); break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } /* Check if the PHB got frozen due to an error (no response) */ pnv_pci_config_check_eeh(phb, bus, bdfn); return PCIBIOS_SUCCESSFUL; } struct pci_ops pnv_pci_ops = { .read = pnv_pci_read_config, .write = pnv_pci_write_config, }; static int pnv_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) { u64 proto_tce; u64 *tcep, *tces; u64 rpn; proto_tce = TCE_PCI_READ; // Read allowed if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset; rpn = __pa(uaddr) >> TCE_SHIFT; while (npages--) *(tcep++) = proto_tce | (rpn++ << TCE_RPN_SHIFT); /* Some implementations won't cache invalid TCEs and thus may not * need that flush. We'll probably turn it_type into a bit mask * of flags if that becomes the case */ if (tbl->it_type & TCE_PCI_SWINV_CREATE) pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1); return 0; } static void pnv_tce_free(struct iommu_table *tbl, long index, long npages) { u64 *tcep, *tces; tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset; while (npages--) *(tcep++) = 0; if (tbl->it_type & TCE_PCI_SWINV_FREE) pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1); } static unsigned long pnv_tce_get(struct iommu_table *tbl, long index) { return ((u64 *)tbl->it_base)[index - tbl->it_offset]; } void pnv_pci_setup_iommu_table(struct iommu_table *tbl, void *tce_mem, u64 tce_size, u64 dma_offset) { tbl->it_blocksize = 16; tbl->it_base = (unsigned long)tce_mem; tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT; tbl->it_index = 0; tbl->it_size = tce_size >> 3; tbl->it_busno = 0; tbl->it_type = TCE_PCI; } static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose) { struct iommu_table *tbl; const __be64 *basep, *swinvp; const __be32 *sizep; basep = of_get_property(hose->dn, "linux,tce-base", NULL); sizep = of_get_property(hose->dn, "linux,tce-size", NULL); if (basep == NULL || sizep == NULL) { pr_err("PCI: %s has missing tce entries !\n", hose->dn->full_name); return NULL; } tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node); if (WARN_ON(!tbl)) return NULL; pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)), be32_to_cpup(sizep), 0); iommu_init_table(tbl, hose->node); /* Deal with SW invalidated TCEs when needed (BML way) */ swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info", NULL); if (swinvp) { tbl->it_busno = swinvp[1]; tbl->it_index = (unsigned long)ioremap(swinvp[0], 8); tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; } return tbl; } static void pnv_pci_dma_fallback_setup(struct pci_controller *hose, struct pci_dev *pdev) { struct device_node *np = pci_bus_to_OF_node(hose->bus); struct pci_dn *pdn; if (np == NULL) return; pdn = PCI_DN(np); if (!pdn->iommu_table) pdn->iommu_table = pnv_pci_setup_bml_iommu(hose); if (!pdn->iommu_table) return; set_iommu_table_base(&pdev->dev, pdn->iommu_table); } static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; /* If we have no phb structure, try to setup a fallback based on * the device-tree (RTAS PCI for example) */ if (phb && phb->dma_dev_setup) phb->dma_dev_setup(phb, pdev); else pnv_pci_dma_fallback_setup(hose, pdev); } void pnv_pci_shutdown(void) { struct pci_controller *hose; list_for_each_entry(hose, &hose_list, list_node) { struct pnv_phb *phb = hose->private_data; if (phb && phb->shutdown) phb->shutdown(phb); } } /* Fixup wrong class code in p7ioc and p8 root complex */ static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); static int pnv_pci_probe_mode(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); const __be64 *tstamp; u64 now, target; /* We hijack this as a way to ensure we have waited long * enough since the reset was lifted on the PCI bus */ if (bus != hose->bus) return PCI_PROBE_NORMAL; tstamp = of_get_property(hose->dn, "reset-clear-timestamp", NULL); if (!tstamp || !*tstamp) return PCI_PROBE_NORMAL; now = mftb() / tb_ticks_per_usec; target = (be64_to_cpup(tstamp) / tb_ticks_per_usec) + PCI_RESET_DELAY_US; pr_devel("pci %04d: Reset target: 0x%llx now: 0x%llx\n", hose->global_number, target, now); if (now < target) msleep((target - now + 999) / 1000); return PCI_PROBE_NORMAL; } void __init pnv_pci_init(void) { struct device_node *np; pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN); /* OPAL absent, try POPAL first then RTAS detection of PHBs */ if (!firmware_has_feature(FW_FEATURE_OPAL)) { #ifdef CONFIG_PPC_POWERNV_RTAS init_pci_config_tokens(); find_and_init_phbs(); #endif /* CONFIG_PPC_POWERNV_RTAS */ } /* OPAL is here, do our normal stuff */ else { int found_ioda = 0; /* Look for IODA IO-Hubs. We don't support mixing IODA * and p5ioc2 due to the need to change some global * probing flags */ for_each_compatible_node(np, NULL, "ibm,ioda-hub") { pnv_pci_init_ioda_hub(np); found_ioda = 1; } /* Look for p5ioc2 IO-Hubs */ if (!found_ioda) for_each_compatible_node(np, NULL, "ibm,p5ioc2") pnv_pci_init_p5ioc2_hub(np); /* Look for ioda2 built-in PHB3's */ for_each_compatible_node(np, NULL, "ibm,ioda2-phb") pnv_pci_init_ioda2_phb(np); } /* Setup the linkage between OF nodes and PHBs */ pci_devs_phb_init(); /* Configure IOMMU DMA hooks */ ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup; ppc_md.tce_build = pnv_tce_build; ppc_md.tce_free = pnv_tce_free; ppc_md.tce_get = pnv_tce_get; ppc_md.pci_probe_mode = pnv_pci_probe_mode; set_pci_dma_ops(&dma_iommu_ops); /* Configure MSIs */ #ifdef CONFIG_PCI_MSI ppc_md.msi_check_device = pnv_msi_check_device; ppc_md.setup_msi_irqs = pnv_setup_msi_irqs; ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs; #endif }
gpl-2.0
sigma-random/gcc
gcc/testsuite/gcc.c-torture/execute/builtin-prefetch-1.c
226
1570
/* Test that __builtin_prefetch does no harm. Prefetch using all valid combinations of rw and locality values. These must be compile-time constants. */ #define NO_TEMPORAL_LOCALITY 0 #define LOW_TEMPORAL_LOCALITY 1 #define MODERATE_TEMPORAL_LOCALITY 1 #define HIGH_TEMPORAL_LOCALITY 3 #define WRITE_ACCESS 1 #define READ_ACCESS 0 enum locality { none, low, moderate, high }; enum rw { read, write }; int arr[10]; void good_const (const int *p) { __builtin_prefetch (p, 0, 0); __builtin_prefetch (p, 0, 1); __builtin_prefetch (p, 0, 2); __builtin_prefetch (p, READ_ACCESS, 3); __builtin_prefetch (p, 1, NO_TEMPORAL_LOCALITY); __builtin_prefetch (p, 1, LOW_TEMPORAL_LOCALITY); __builtin_prefetch (p, 1, MODERATE_TEMPORAL_LOCALITY); __builtin_prefetch (p, WRITE_ACCESS, HIGH_TEMPORAL_LOCALITY); } void good_enum (const int *p) { __builtin_prefetch (p, read, none); __builtin_prefetch (p, read, low); __builtin_prefetch (p, read, moderate); __builtin_prefetch (p, read, high); __builtin_prefetch (p, write, none); __builtin_prefetch (p, write, low); __builtin_prefetch (p, write, moderate); __builtin_prefetch (p, write, high); } void good_expr (const int *p) { __builtin_prefetch (p, 1 - 1, 6 - (2 * 3)); __builtin_prefetch (p, 1 + 0, 1 + 2); } void good_vararg (const int *p) { __builtin_prefetch (p, 0, 3); __builtin_prefetch (p, 0); __builtin_prefetch (p, 1); __builtin_prefetch (p); } int main () { good_const (arr); good_enum (arr); good_expr (arr); good_vararg (arr); exit (0); }
gpl-2.0
Linutronix/ti-linux-kernel
drivers/net/wireless/iwlwifi/mvm/utils.c
226
30547
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include <net/mac80211.h> #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-prph.h" #include "mvm.h" #include "fw-api-rs.h" /* * Will return 0 even if the cmd failed when RFKILL is asserted unless * CMD_WANT_SKB is set in cmd->flags. */ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) { int ret; #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) if (WARN_ON(mvm->d3_test_active)) return -EIO; #endif /* * Synchronous commands from this op-mode must hold * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. */ if (!(cmd->flags & CMD_ASYNC)) lockdep_assert_held(&mvm->mutex); ret = iwl_trans_send_cmd(mvm->trans, cmd); /* * If the caller wants the SKB, then don't hide any problems, the * caller might access the response buffer which will be NULL if * the command failed. */ if (cmd->flags & CMD_WANT_SKB) return ret; /* Silently ignore failures if RFKILL is asserted */ if (!ret || ret == -ERFKILL) return 0; return ret; } int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id, u32 flags, u16 len, const void *data) { struct iwl_host_cmd cmd = { .id = id, .len = { len, }, .data = { data, }, .flags = flags, }; return iwl_mvm_send_cmd(mvm, &cmd); } /* * We assume that the caller set the status to the success value */ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, u32 *status) { struct iwl_rx_packet *pkt; struct iwl_cmd_response *resp; int ret, resp_len; lockdep_assert_held(&mvm->mutex); #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) if (WARN_ON(mvm->d3_test_active)) return -EIO; #endif /* * Only synchronous commands can wait for status, * we use WANT_SKB so the caller can't. */ if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB), "cmd flags %x", cmd->flags)) return -EINVAL; cmd->flags |= CMD_WANT_SKB; ret = iwl_trans_send_cmd(mvm->trans, cmd); if (ret == -ERFKILL) { /* * The command failed because of RFKILL, don't update * the status, leave it as success and return 0. */ return 0; } else if (ret) { return ret; } pkt = cmd->resp_pkt; /* Can happen if RFKILL is asserted */ if (!pkt) { ret = 0; goto out_free_resp; } if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { ret = -EIO; goto out_free_resp; } resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { ret = -EIO; goto out_free_resp; } resp = (void *)pkt->data; *status = le32_to_cpu(resp->status); out_free_resp: iwl_free_resp(cmd); return ret; } /* * We assume that the caller set the status to the sucess value */ int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len, const void *data, u32 *status) { struct iwl_host_cmd cmd = { .id = id, .len = { len, }, .data = { data, }, }; return iwl_mvm_send_cmd_status(mvm, &cmd, status); } #define IWL_DECLARE_RATE_INFO(r) \ [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP /* * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP */ static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { IWL_DECLARE_RATE_INFO(1), IWL_DECLARE_RATE_INFO(2), IWL_DECLARE_RATE_INFO(5), IWL_DECLARE_RATE_INFO(11), IWL_DECLARE_RATE_INFO(6), IWL_DECLARE_RATE_INFO(9), IWL_DECLARE_RATE_INFO(12), IWL_DECLARE_RATE_INFO(18), IWL_DECLARE_RATE_INFO(24), IWL_DECLARE_RATE_INFO(36), IWL_DECLARE_RATE_INFO(48), IWL_DECLARE_RATE_INFO(54), }; int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) { int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; int idx; int band_offset = 0; /* Legacy rate format, search for match in table */ if (band == IEEE80211_BAND_5GHZ) band_offset = IWL_FIRST_OFDM_RATE; for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) if (fw_rate_idx_to_plcp[idx] == rate) return idx - band_offset; return -1; } u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) { /* Get PLCP rate for tx_cmd->rate_n_flags */ return fw_rate_idx_to_plcp[rate_idx]; } int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_error_resp *err_resp = (void *)pkt->data; IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n", le32_to_cpu(err_resp->error_type), err_resp->cmd_id); IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", le16_to_cpu(err_resp->bad_cmd_seq_num), le32_to_cpu(err_resp->error_service)); IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", le64_to_cpu(err_resp->timestamp)); return 0; } /* * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h. * The parameter should also be a combination of ANT_[ABC]. */ u8 first_antenna(u8 mask) { BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */ if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */ return BIT(0); return BIT(ffs(mask) - 1); } /* * Toggles between TX antennas to send the probe request on. * Receives the bitmask of valid TX antennas and the *index* used * for the last TX, and returns the next valid *index* to use. * In order to set it in the tx_cmd, must do BIT(idx). */ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) { u8 ind = last_idx; int i; for (i = 0; i < RATE_MCS_ANT_NUM; i++) { ind = (ind + 1) % RATE_MCS_ANT_NUM; if (valid & BIT(ind)) return ind; } WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); return last_idx; } static const struct { const char *name; u8 num; } advanced_lookup[] = { { "NMI_INTERRUPT_WDG", 0x34 }, { "SYSASSERT", 0x35 }, { "UCODE_VERSION_MISMATCH", 0x37 }, { "BAD_COMMAND", 0x38 }, { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, { "FATAL_ERROR", 0x3D }, { "NMI_TRM_HW_ERR", 0x46 }, { "NMI_INTERRUPT_TRM", 0x4C }, { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, { "NMI_INTERRUPT_HOST", 0x66 }, { "NMI_INTERRUPT_ACTION_PT", 0x7C }, { "NMI_INTERRUPT_UNKNOWN", 0x84 }, { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, { "ADVANCED_SYSASSERT", 0 }, }; static const char *desc_lookup(u32 num) { int i; for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) if (advanced_lookup[i].num == num) return advanced_lookup[i].name; /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ return advanced_lookup[i].name; } /* * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_error_event_table_v1 { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 pc; /* program counter */ u32 blink1; /* branch link */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 bcon_time; /* beacon timer */ u32 tsf_low; /* network timestamp function timer */ u32 tsf_hi; /* network timestamp function timer */ u32 gp1; /* GP1 timer register */ u32 gp2; /* GP2 timer register */ u32 gp3; /* GP3 timer register */ u32 ucode_ver; /* uCode version */ u32 hw_ver; /* HW Silicon version */ u32 brd_ver; /* HW board version */ u32 log_pc; /* log program counter */ u32 frame_ptr; /* frame pointer */ u32 stack_ptr; /* stack pointer */ u32 hcmd; /* last host command header */ u32 isr0; /* isr status register LMPM_NIC_ISR0: * rxtx_flag */ u32 isr1; /* isr status register LMPM_NIC_ISR1: * host_flag */ u32 isr2; /* isr status register LMPM_NIC_ISR2: * enc_flag */ u32 isr3; /* isr status register LMPM_NIC_ISR3: * time_flag */ u32 isr4; /* isr status register LMPM_NIC_ISR4: * wico interrupt */ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ u32 l2p_mhvalid; /* L2pMhValidBits */ u32 l2p_addr_match; /* L2pAddrMatchStat */ u32 lmpm_pmg_sel; /* indicate which clocks are turned on * (LMPM_PMG_SEL) */ u32 u_timestamp; /* indicate when the date and time of the * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */; struct iwl_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 pc; /* program counter */ u32 blink1; /* branch link */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 bcon_time; /* beacon timer */ u32 tsf_low; /* network timestamp function timer */ u32 tsf_hi; /* network timestamp function timer */ u32 gp1; /* GP1 timer register */ u32 gp2; /* GP2 timer register */ u32 gp3; /* GP3 timer register */ u32 major; /* uCode version major */ u32 minor; /* uCode version minor */ u32 hw_ver; /* HW Silicon version */ u32 brd_ver; /* HW board version */ u32 log_pc; /* log program counter */ u32 frame_ptr; /* frame pointer */ u32 stack_ptr; /* stack pointer */ u32 hcmd; /* last host command header */ u32 isr0; /* isr status register LMPM_NIC_ISR0: * rxtx_flag */ u32 isr1; /* isr status register LMPM_NIC_ISR1: * host_flag */ u32 isr2; /* isr status register LMPM_NIC_ISR2: * enc_flag */ u32 isr3; /* isr status register LMPM_NIC_ISR3: * time_flag */ u32 isr4; /* isr status register LMPM_NIC_ISR4: * wico interrupt */ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ u32 l2p_mhvalid; /* L2pMhValidBits */ u32 l2p_addr_match; /* L2pAddrMatchStat */ u32 lmpm_pmg_sel; /* indicate which clocks are turned on * (LMPM_PMG_SEL) */ u32 u_timestamp; /* indicate when the date and time of the * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ } __packed /* LOG_ERROR_TABLE_API_S_VER_2 */; /* * UMAC error struct - relevant starting from family 8000 chip. * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_umac_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 blink1; /* branch link */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 umac_major; u32 umac_minor; u32 frame_pointer; /* core register 27*/ u32 stack_pointer; /* core register 28 */ u32 cmd_header; /* latest host cmd sent to UMAC */ u32 nic_isr_pref; /* ISR status register */ } __packed; #define ERROR_START_OFFSET (1 * sizeof(u32)) #define ERROR_ELEM_SIZE (7 * sizeof(u32)) static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; struct iwl_umac_error_event_table table; u32 base; base = mvm->umac_error_event_table; if (base < 0x800000) { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, (mvm->cur_ucode == IWL_UCODE_INIT) ? "Init" : "RT"); return; } iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", mvm->status, table.valid); } IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major); IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor); IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; struct iwl_error_event_table_v1 table; u32 base; base = mvm->error_event_table; if (mvm->cur_ucode == IWL_UCODE_INIT) { if (!base) base = mvm->fw->init_errlog_ptr; } else { if (!base) base = mvm->fw->inst_errlog_ptr; } if (base < 0x800000) { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, (mvm->cur_ucode == IWL_UCODE_INIT) ? "Init" : "RT"); return; } iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", mvm->status, table.valid); } /* Do not change this output - scripts rely on it */ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, table.data1, table.data2, table.data3, table.blink1, table.blink2, table.ilink1, table.ilink2, table.bcon_time, table.gp1, table.gp2, table.gp3, table.ucode_ver, 0, table.hw_ver, table.brd_ver); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); IWL_ERR(mvm, "0x%08X | data1\n", table.data1); IWL_ERR(mvm, "0x%08X | data2\n", table.data2); IWL_ERR(mvm, "0x%08X | data3\n", table.data3); IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver); IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); if (mvm->support_umac_log) iwl_mvm_dump_umac_error_log(mvm); } void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; struct iwl_error_event_table table; u32 base; if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) { iwl_mvm_dump_nic_error_log_old(mvm); return; } base = mvm->error_event_table; if (mvm->cur_ucode == IWL_UCODE_INIT) { if (!base) base = mvm->fw->init_errlog_ptr; } else { if (!base) base = mvm->fw->inst_errlog_ptr; } if (base < 0x800000) { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, (mvm->cur_ucode == IWL_UCODE_INIT) ? "Init" : "RT"); return; } iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", mvm->status, table.valid); } /* Do not change this output - scripts rely on it */ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, table.data1, table.data2, table.data3, table.blink1, table.blink2, table.ilink1, table.ilink2, table.bcon_time, table.gp1, table.gp2, table.gp3, table.major, table.minor, table.hw_ver, table.brd_ver); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); IWL_ERR(mvm, "0x%08X | data1\n", table.data1); IWL_ERR(mvm, "0x%08X | data2\n", table.data2); IWL_ERR(mvm, "0x%08X | data3\n", table.data3); IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); if (mvm->support_umac_log) iwl_mvm_dump_umac_error_log(mvm); } void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .enable = 1, .window = cfg->frame_limit, .sta_id = cfg->sta_id, .ssn = cpu_to_le16(ssn), .tx_fifo = cfg->fifo, .aggregate = cfg->aggregate, .tid = cfg->tid, }; if (!iwl_mvm_is_scd_cfg_supported(mvm)) { iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, cfg, wdg_timeout); return; } iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); } void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .enable = 0, }; int ret; if (!iwl_mvm_is_scd_cfg_supported(mvm)) { iwl_trans_txq_disable(mvm->trans, queue, true); return; } iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret); } /** * iwl_mvm_send_lq_cmd() - Send link quality command * @init: This command is sent as part of station initialization right * after station has been added. * * The link quality command is sent as the last step of station creation. * This is the special case in which init is set and we call a callback in * this case to clear the state indicating that station creation is in * progress. */ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) { struct iwl_host_cmd cmd = { .id = LQ_CMD, .len = { sizeof(struct iwl_lq_cmd), }, .flags = init ? 0 : CMD_ASYNC, .data = { lq, }, }; if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT)) return -EINVAL; return iwl_mvm_send_cmd(mvm, &cmd); } /** * iwl_mvm_update_smps - Get a request to change the SMPS mode * @req_type: The part of the driver who call for a change. * @smps_requests: The request to change the SMPS mode. * * Get a requst to change the SMPS mode, * and change it according to all other requests in the driver. */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_smps_type_request req_type, enum ieee80211_smps_mode smps_request) { struct iwl_mvm_vif *mvmvif; enum ieee80211_smps_mode smps_mode; int i; lockdep_assert_held(&mvm->mutex); /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */ if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) return; if (vif->type == NL80211_IFTYPE_AP) smps_mode = IEEE80211_SMPS_OFF; else smps_mode = IEEE80211_SMPS_AUTOMATIC; mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->smps_requests[req_type] = smps_request; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { smps_mode = IEEE80211_SMPS_STATIC; break; } if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) smps_mode = IEEE80211_SMPS_DYNAMIC; } ieee80211_request_smps(vif, smps_mode); } int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) { struct iwl_statistics_cmd scmd = { .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, }; struct iwl_host_cmd cmd = { .id = STATISTICS_CMD, .len[0] = sizeof(scmd), .data[0] = &scmd, .flags = CMD_WANT_SKB, }; int ret; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) return ret; iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); iwl_free_resp(&cmd); if (clear) iwl_mvm_accu_radio_stats(mvm); return 0; } void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm) { mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time; mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time; mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf; mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan; } static void iwl_mvm_diversity_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool *result = _data; int i; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC || mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) *result = false; } } bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) { bool result = true; lockdep_assert_held(&mvm->mutex); if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) return false; if (mvm->cfg->rx_with_siso_diversity) return false; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_diversity_iter, &result); return result; } int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool value) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int res; lockdep_assert_held(&mvm->mutex); if (mvmvif->low_latency == value) return 0; mvmvif->low_latency = value; res = iwl_mvm_update_quotas(mvm, false, NULL); if (res) return res; iwl_mvm_bt_coex_vif_change(mvm); return iwl_mvm_power_update_mac(mvm); } static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { bool *result = _data; if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif))) *result = true; } bool iwl_mvm_low_latency(struct iwl_mvm *mvm) { bool result = false; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_ll_iter, &result); return result; } struct iwl_bss_iter_data { struct ieee80211_vif *vif; bool error; }; static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_bss_iter_data *data = _data; if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return; if (data->vif) { data->error = true; return; } data->vif = vif; } struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) { struct iwl_bss_iter_data bss_iter_data = {}; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bss_iface_iterator, &bss_iter_data); if (bss_iter_data.error) { IWL_ERR(mvm, "More than one managed interface active!\n"); return ERR_PTR(-EINVAL); } return bss_iter_data.vif; } unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool tdls, bool cmd_q) { struct iwl_fw_dbg_trigger_tlv *trigger; struct iwl_fw_dbg_trigger_txq_timer *txq_timer; unsigned int default_timeout = cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) return iwlmvm_mod_params.tfd_q_hang_detect ? default_timeout : IWL_WATCHDOG_DISABLED; trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); txq_timer = (void *)trigger->data; if (tdls) return le32_to_cpu(txq_timer->tdls); if (cmd_q) return le32_to_cpu(txq_timer->command_queue); if (WARN_ON(!vif)) return default_timeout; switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_ADHOC: return le32_to_cpu(txq_timer->ibss); case NL80211_IFTYPE_STATION: return le32_to_cpu(txq_timer->bss); case NL80211_IFTYPE_AP: return le32_to_cpu(txq_timer->softap); case NL80211_IFTYPE_P2P_CLIENT: return le32_to_cpu(txq_timer->p2p_client); case NL80211_IFTYPE_P2P_GO: return le32_to_cpu(txq_timer->p2p_go); case NL80211_IFTYPE_P2P_DEVICE: return le32_to_cpu(txq_timer->p2p_device); default: WARN_ON(1); return mvm->cfg->base_params->wd_timeout; } } void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) goto out; trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) goto out; if (trig_mlme->stop_connection_loss && --trig_mlme->stop_connection_loss) goto out; iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg); out: ieee80211_connection_loss(vif); }
gpl-2.0
siis/pfwall
drivers/net/wireless/ath/ath5k/gpio.c
226
3929
/* * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /****************\ GPIO Functions \****************/ #include "ath5k.h" #include "reg.h" #include "debug.h" /* * Set led state */ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) { u32 led; /*5210 has different led mode handling*/ u32 led_5210; /*Reset led status*/ if (ah->ah_version != AR5K_AR5210) AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LEDMODE | AR5K_PCICFG_LED); else AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED); /* * Some blinking values, define at your wish */ switch (state) { case AR5K_LED_SCAN: case AR5K_LED_AUTH: led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND; led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL; break; case AR5K_LED_INIT: led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE; led_5210 = AR5K_PCICFG_LED_PEND; break; case AR5K_LED_ASSOC: case AR5K_LED_RUN: led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC; led_5210 = AR5K_PCICFG_LED_ASSOC; break; default: led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE; led_5210 = AR5K_PCICFG_LED_PEND; break; } /*Write new status to the register*/ if (ah->ah_version != AR5K_AR5210) AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led); else AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210); } /* * Set GPIO inputs */ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) { if (gpio >= AR5K_NUM_GPIO) return -EINVAL; ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio)) | AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR); return 0; } /* * Set GPIO outputs */ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) { if (gpio >= AR5K_NUM_GPIO) return -EINVAL; ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio)) | AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR); return 0; } /* * Get GPIO state */ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) { if (gpio >= AR5K_NUM_GPIO) return 0xffffffff; /* GPIO input magic */ return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) & 0x1; } /* * Set GPIO state */ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) { u32 data; if (gpio >= AR5K_NUM_GPIO) return -EINVAL; /* GPIO output magic */ data = ath5k_hw_reg_read(ah, AR5K_GPIODO); data &= ~(1 << gpio); data |= (val & 1) << gpio; ath5k_hw_reg_write(ah, data, AR5K_GPIODO); return 0; } /* * Initialize the GPIO interrupt (RFKill switch) */ void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level) { u32 data; if (gpio >= AR5K_NUM_GPIO) return; /* * Set the GPIO interrupt */ data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH | AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) | (AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA); ath5k_hw_reg_write(ah, interrupt_level ? data : (data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR); ah->ah_imr |= AR5K_IMR_GPIO; /* Enable GPIO interrupts */ AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO); }
gpl-2.0
kerneldevs/RM-CAF-PECAN
net/rds/iw_recv.c
482
26183
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <rdma/rdma_cm.h> #include "rds.h" #include "iw.h" static struct kmem_cache *rds_iw_incoming_slab; static struct kmem_cache *rds_iw_frag_slab; static atomic_t rds_iw_allocation = ATOMIC_INIT(0); static void rds_iw_frag_drop_page(struct rds_page_frag *frag) { rdsdebug("frag %p page %p\n", frag, frag->f_page); __free_page(frag->f_page); frag->f_page = NULL; } static void rds_iw_frag_free(struct rds_page_frag *frag) { rdsdebug("frag %p page %p\n", frag, frag->f_page); BUG_ON(frag->f_page != NULL); kmem_cache_free(rds_iw_frag_slab, frag); } /* * We map a page at a time. Its fragments are posted in order. This * is called in fragment order as the fragments get send completion events. * Only the last frag in the page performs the unmapping. * * It's OK for ring cleanup to call this in whatever order it likes because * DMA is not in flight and so we can unmap while other ring entries still * hold page references in their frags. */ static void rds_iw_recv_unmap_page(struct rds_iw_connection *ic, struct rds_iw_recv_work *recv) { struct rds_page_frag *frag = recv->r_frag; rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page); if (frag->f_mapped) ib_dma_unmap_page(ic->i_cm_id->device, frag->f_mapped, RDS_FRAG_SIZE, DMA_FROM_DEVICE); frag->f_mapped = 0; } void rds_iw_recv_init_ring(struct rds_iw_connection *ic) { struct rds_iw_recv_work *recv; u32 i; for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { struct ib_sge *sge; recv->r_iwinc = NULL; recv->r_frag = NULL; recv->r_wr.next = NULL; recv->r_wr.wr_id = i; recv->r_wr.sg_list = recv->r_sge; recv->r_wr.num_sge = RDS_IW_RECV_SGE; sge = rds_iw_data_sge(ic, recv->r_sge); sge->addr = 0; sge->length = RDS_FRAG_SIZE; sge->lkey = 0; sge = rds_iw_header_sge(ic, recv->r_sge); sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); sge->length = sizeof(struct rds_header); sge->lkey = 0; } } static void rds_iw_recv_clear_one(struct rds_iw_connection *ic, struct rds_iw_recv_work *recv) { if (recv->r_iwinc) { rds_inc_put(&recv->r_iwinc->ii_inc); recv->r_iwinc = NULL; } if (recv->r_frag) { rds_iw_recv_unmap_page(ic, recv); if (recv->r_frag->f_page) rds_iw_frag_drop_page(recv->r_frag); rds_iw_frag_free(recv->r_frag); recv->r_frag = NULL; } } void rds_iw_recv_clear_ring(struct rds_iw_connection *ic) { u32 i; for (i = 0; i < ic->i_recv_ring.w_nr; i++) rds_iw_recv_clear_one(ic, &ic->i_recvs[i]); if (ic->i_frag.f_page) rds_iw_frag_drop_page(&ic->i_frag); } static int rds_iw_recv_refill_one(struct rds_connection *conn, struct rds_iw_recv_work *recv, gfp_t kptr_gfp, gfp_t page_gfp) { struct rds_iw_connection *ic = conn->c_transport_data; dma_addr_t dma_addr; struct ib_sge *sge; int ret = -ENOMEM; if (recv->r_iwinc == NULL) { if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) { rds_iw_stats_inc(s_iw_rx_alloc_limit); goto out; } recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, kptr_gfp); if (recv->r_iwinc == NULL) goto out; atomic_inc(&rds_iw_allocation); INIT_LIST_HEAD(&recv->r_iwinc->ii_frags); rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); } if (recv->r_frag == NULL) { recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp); if (recv->r_frag == NULL) goto out; INIT_LIST_HEAD(&recv->r_frag->f_item); recv->r_frag->f_page = NULL; } if (ic->i_frag.f_page == NULL) { ic->i_frag.f_page = alloc_page(page_gfp); if (ic->i_frag.f_page == NULL) goto out; ic->i_frag.f_offset = 0; } dma_addr = ib_dma_map_page(ic->i_cm_id->device, ic->i_frag.f_page, ic->i_frag.f_offset, RDS_FRAG_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr)) goto out; /* * Once we get the RDS_PAGE_LAST_OFF frag then rds_iw_frag_unmap() * must be called on this recv. This happens as completions hit * in order or on connection shutdown. */ recv->r_frag->f_page = ic->i_frag.f_page; recv->r_frag->f_offset = ic->i_frag.f_offset; recv->r_frag->f_mapped = dma_addr; sge = rds_iw_data_sge(ic, recv->r_sge); sge->addr = dma_addr; sge->length = RDS_FRAG_SIZE; sge = rds_iw_header_sge(ic, recv->r_sge); sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header); sge->length = sizeof(struct rds_header); get_page(recv->r_frag->f_page); if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) { ic->i_frag.f_offset += RDS_FRAG_SIZE; } else { put_page(ic->i_frag.f_page); ic->i_frag.f_page = NULL; ic->i_frag.f_offset = 0; } ret = 0; out: return ret; } /* * This tries to allocate and post unused work requests after making sure that * they have all the allocations they need to queue received fragments into * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc * pairs don't go unmatched. * * -1 is returned if posting fails due to temporary resource exhaustion. */ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, gfp_t page_gfp, int prefill) { struct rds_iw_connection *ic = conn->c_transport_data; struct rds_iw_recv_work *recv; struct ib_recv_wr *failed_wr; unsigned int posted = 0; int ret = 0; u32 pos; while ((prefill || rds_conn_up(conn)) && rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) { if (pos >= ic->i_recv_ring.w_nr) { printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", pos); ret = -EINVAL; break; } recv = &ic->i_recvs[pos]; ret = rds_iw_recv_refill_one(conn, recv, kptr_gfp, page_gfp); if (ret) { ret = -1; break; } /* XXX when can this fail? */ ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); rdsdebug("recv %p iwinc %p page %p addr %lu ret %d\n", recv, recv->r_iwinc, recv->r_frag->f_page, (long) recv->r_frag->f_mapped, ret); if (ret) { rds_iw_conn_error(conn, "recv post on " "%pI4 returned %d, disconnecting and " "reconnecting\n", &conn->c_faddr, ret); ret = -1; break; } posted++; } /* We're doing flow control - update the window. */ if (ic->i_flowctl && posted) rds_iw_advertise_credits(conn, posted); if (ret) rds_iw_ring_unalloc(&ic->i_recv_ring, 1); return ret; } void rds_iw_inc_purge(struct rds_incoming *inc) { struct rds_iw_incoming *iwinc; struct rds_page_frag *frag; struct rds_page_frag *pos; iwinc = container_of(inc, struct rds_iw_incoming, ii_inc); rdsdebug("purging iwinc %p inc %p\n", iwinc, inc); list_for_each_entry_safe(frag, pos, &iwinc->ii_frags, f_item) { list_del_init(&frag->f_item); rds_iw_frag_drop_page(frag); rds_iw_frag_free(frag); } } void rds_iw_inc_free(struct rds_incoming *inc) { struct rds_iw_incoming *iwinc; iwinc = container_of(inc, struct rds_iw_incoming, ii_inc); rds_iw_inc_purge(inc); rdsdebug("freeing iwinc %p inc %p\n", iwinc, inc); BUG_ON(!list_empty(&iwinc->ii_frags)); kmem_cache_free(rds_iw_incoming_slab, iwinc); atomic_dec(&rds_iw_allocation); BUG_ON(atomic_read(&rds_iw_allocation) < 0); } int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, size_t size) { struct rds_iw_incoming *iwinc; struct rds_page_frag *frag; struct iovec *iov = first_iov; unsigned long to_copy; unsigned long frag_off = 0; unsigned long iov_off = 0; int copied = 0; int ret; u32 len; iwinc = container_of(inc, struct rds_iw_incoming, ii_inc); frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item); len = be32_to_cpu(inc->i_hdr.h_len); while (copied < size && copied < len) { if (frag_off == RDS_FRAG_SIZE) { frag = list_entry(frag->f_item.next, struct rds_page_frag, f_item); frag_off = 0; } while (iov_off == iov->iov_len) { iov_off = 0; iov++; } to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off); to_copy = min_t(size_t, to_copy, size - copied); to_copy = min_t(unsigned long, to_copy, len - copied); rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " "[%p, %lu] + %lu\n", to_copy, iov->iov_base, iov->iov_len, iov_off, frag->f_page, frag->f_offset, frag_off); /* XXX needs + offset for multiple recvs per page */ ret = rds_page_copy_to_user(frag->f_page, frag->f_offset + frag_off, iov->iov_base + iov_off, to_copy); if (ret) { copied = ret; break; } iov_off += to_copy; frag_off += to_copy; copied += to_copy; } return copied; } /* ic starts out kzalloc()ed */ void rds_iw_recv_init_ack(struct rds_iw_connection *ic) { struct ib_send_wr *wr = &ic->i_ack_wr; struct ib_sge *sge = &ic->i_ack_sge; sge->addr = ic->i_ack_dma; sge->length = sizeof(struct rds_header); sge->lkey = rds_iw_local_dma_lkey(ic); wr->sg_list = sge; wr->num_sge = 1; wr->opcode = IB_WR_SEND; wr->wr_id = RDS_IW_ACK_WR_ID; wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; } /* * You'd think that with reliable IB connections you wouldn't need to ack * messages that have been received. The problem is that IB hardware generates * an ack message before it has DMAed the message into memory. This creates a * potential message loss if the HCA is disabled for any reason between when it * sends the ack and before the message is DMAed and processed. This is only a * potential issue if another HCA is available for fail-over. * * When the remote host receives our ack they'll free the sent message from * their send queue. To decrease the latency of this we always send an ack * immediately after we've received messages. * * For simplicity, we only have one ack in flight at a time. This puts * pressure on senders to have deep enough send queues to absorb the latency of * a single ack frame being in flight. This might not be good enough. * * This is implemented by have a long-lived send_wr and sge which point to a * statically allocated ack frame. This ack wr does not fall under the ring * accounting that the tx and rx wrs do. The QP attribute specifically makes * room for it beyond the ring size. Send completion notices its special * wr_id and avoids working with the ring in that case. */ #ifndef KERNEL_HAS_ATOMIC64 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, int ack_required) { unsigned long flags; spin_lock_irqsave(&ic->i_ack_lock, flags); ic->i_ack_next = seq; if (ack_required) set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); spin_unlock_irqrestore(&ic->i_ack_lock, flags); } static u64 rds_iw_get_ack(struct rds_iw_connection *ic) { unsigned long flags; u64 seq; clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); spin_lock_irqsave(&ic->i_ack_lock, flags); seq = ic->i_ack_next; spin_unlock_irqrestore(&ic->i_ack_lock, flags); return seq; } #else static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, int ack_required) { atomic64_set(&ic->i_ack_next, seq); if (ack_required) { smp_mb__before_clear_bit(); set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); } } static u64 rds_iw_get_ack(struct rds_iw_connection *ic) { clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); smp_mb__after_clear_bit(); return atomic64_read(&ic->i_ack_next); } #endif static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credits) { struct rds_header *hdr = ic->i_ack; struct ib_send_wr *failed_wr; u64 seq; int ret; seq = rds_iw_get_ack(ic); rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq); rds_message_populate_header(hdr, 0, 0, 0); hdr->h_ack = cpu_to_be64(seq); hdr->h_credit = adv_credits; rds_message_make_checksum(hdr); ic->i_ack_queued = jiffies; ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); if (unlikely(ret)) { /* Failed to send. Release the WR, and * force another ACK. */ clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); rds_iw_stats_inc(s_iw_ack_send_failure); /* Need to finesse this later. */ BUG(); } else rds_iw_stats_inc(s_iw_ack_sent); } /* * There are 3 ways of getting acknowledgements to the peer: * 1. We call rds_iw_attempt_ack from the recv completion handler * to send an ACK-only frame. * However, there can be only one such frame in the send queue * at any time, so we may have to postpone it. * 2. When another (data) packet is transmitted while there's * an ACK in the queue, we piggyback the ACK sequence number * on the data packet. * 3. If the ACK WR is done sending, we get called from the * send queue completion handler, and check whether there's * another ACK pending (postponed because the WR was on the * queue). If so, we transmit it. * * We maintain 2 variables: * - i_ack_flags, which keeps track of whether the ACK WR * is currently in the send queue or not (IB_ACK_IN_FLIGHT) * - i_ack_next, which is the last sequence number we received * * Potentially, send queue and receive queue handlers can run concurrently. * It would be nice to not have to use a spinlock to synchronize things, * but the one problem that rules this out is that 64bit updates are * not atomic on all platforms. Things would be a lot simpler if * we had atomic64 or maybe cmpxchg64 everywhere. * * Reconnecting complicates this picture just slightly. When we * reconnect, we may be seeing duplicate packets. The peer * is retransmitting them, because it hasn't seen an ACK for * them. It is important that we ACK these. * * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with * this flag set *MUST* be acknowledged immediately. */ /* * When we get here, we're called from the recv queue handler. * Check whether we ought to transmit an ACK. */ void rds_iw_attempt_ack(struct rds_iw_connection *ic) { unsigned int adv_credits; if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) return; if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { rds_iw_stats_inc(s_iw_ack_send_delayed); return; } /* Can we get a send credit? */ if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) { rds_iw_stats_inc(s_iw_tx_throttle); clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); return; } clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); rds_iw_send_ack(ic, adv_credits); } /* * We get here from the send completion handler, when the * adapter tells us the ACK frame was sent. */ void rds_iw_ack_send_complete(struct rds_iw_connection *ic) { clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); rds_iw_attempt_ack(ic); } /* * This is called by the regular xmit code when it wants to piggyback * an ACK on an outgoing frame. */ u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic) { if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) rds_iw_stats_inc(s_iw_ack_send_piggybacked); return rds_iw_get_ack(ic); } /* * It's kind of lame that we're copying from the posted receive pages into * long-lived bitmaps. We could have posted the bitmaps and rdma written into * them. But receiving new congestion bitmaps should be a *rare* event, so * hopefully we won't need to invest that complexity in making it more * efficient. By copying we can share a simpler core with TCP which has to * copy. */ static void rds_iw_cong_recv(struct rds_connection *conn, struct rds_iw_incoming *iwinc) { struct rds_cong_map *map; unsigned int map_off; unsigned int map_page; struct rds_page_frag *frag; unsigned long frag_off; unsigned long to_copy; unsigned long copied; uint64_t uncongested = 0; void *addr; /* catch completely corrupt packets */ if (be32_to_cpu(iwinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) return; map = conn->c_fcong; map_page = 0; map_off = 0; frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item); frag_off = 0; copied = 0; while (copied < RDS_CONG_MAP_BYTES) { uint64_t *src, *dst; unsigned int k; to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0); src = addr + frag_off; dst = (void *)map->m_page_addrs[map_page] + map_off; for (k = 0; k < to_copy; k += 8) { /* Record ports that became uncongested, ie * bits that changed from 0 to 1. */ uncongested |= ~(*src) & *dst; *dst++ = *src++; } kunmap_atomic(addr, KM_SOFTIRQ0); copied += to_copy; map_off += to_copy; if (map_off == PAGE_SIZE) { map_off = 0; map_page++; } frag_off += to_copy; if (frag_off == RDS_FRAG_SIZE) { frag = list_entry(frag->f_item.next, struct rds_page_frag, f_item); frag_off = 0; } } /* the congestion map is in little endian order */ uncongested = le64_to_cpu(uncongested); rds_cong_map_updated(map, uncongested); } /* * Rings are posted with all the allocations they'll need to queue the * incoming message to the receiving socket so this can't fail. * All fragments start with a header, so we can make sure we're not receiving * garbage, and we can tell a small 8 byte fragment from an ACK frame. */ struct rds_iw_ack_state { u64 ack_next; u64 ack_recv; unsigned int ack_required:1; unsigned int ack_next_valid:1; unsigned int ack_recv_valid:1; }; static void rds_iw_process_recv(struct rds_connection *conn, struct rds_iw_recv_work *recv, u32 byte_len, struct rds_iw_ack_state *state) { struct rds_iw_connection *ic = conn->c_transport_data; struct rds_iw_incoming *iwinc = ic->i_iwinc; struct rds_header *ihdr, *hdr; /* XXX shut down the connection if port 0,0 are seen? */ rdsdebug("ic %p iwinc %p recv %p byte len %u\n", ic, iwinc, recv, byte_len); if (byte_len < sizeof(struct rds_header)) { rds_iw_conn_error(conn, "incoming message " "from %pI4 didn't inclue a " "header, disconnecting and " "reconnecting\n", &conn->c_faddr); return; } byte_len -= sizeof(struct rds_header); ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; /* Validate the checksum. */ if (!rds_message_verify_checksum(ihdr)) { rds_iw_conn_error(conn, "incoming message " "from %pI4 has corrupted header - " "forcing a reconnect\n", &conn->c_faddr); rds_stats_inc(s_recv_drop_bad_checksum); return; } /* Process the ACK sequence which comes with every packet */ state->ack_recv = be64_to_cpu(ihdr->h_ack); state->ack_recv_valid = 1; /* Process the credits update if there was one */ if (ihdr->h_credit) rds_iw_send_add_credits(conn, ihdr->h_credit); if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && byte_len == 0) { /* This is an ACK-only packet. The fact that it gets * special treatment here is that historically, ACKs * were rather special beasts. */ rds_iw_stats_inc(s_iw_ack_received); /* * Usually the frags make their way on to incs and are then freed as * the inc is freed. We don't go that route, so we have to drop the * page ref ourselves. We can't just leave the page on the recv * because that confuses the dma mapping of pages and each recv's use * of a partial page. We can leave the frag, though, it will be * reused. * * FIXME: Fold this into the code path below. */ rds_iw_frag_drop_page(recv->r_frag); return; } /* * If we don't already have an inc on the connection then this * fragment has a header and starts a message.. copy its header * into the inc and save the inc so we can hang upcoming fragments * off its list. */ if (iwinc == NULL) { iwinc = recv->r_iwinc; recv->r_iwinc = NULL; ic->i_iwinc = iwinc; hdr = &iwinc->ii_inc.i_hdr; memcpy(hdr, ihdr, sizeof(*hdr)); ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); rdsdebug("ic %p iwinc %p rem %u flag 0x%x\n", ic, iwinc, ic->i_recv_data_rem, hdr->h_flags); } else { hdr = &iwinc->ii_inc.i_hdr; /* We can't just use memcmp here; fragments of a * single message may carry different ACKs */ if (hdr->h_sequence != ihdr->h_sequence || hdr->h_len != ihdr->h_len || hdr->h_sport != ihdr->h_sport || hdr->h_dport != ihdr->h_dport) { rds_iw_conn_error(conn, "fragment header mismatch; forcing reconnect\n"); return; } } list_add_tail(&recv->r_frag->f_item, &iwinc->ii_frags); recv->r_frag = NULL; if (ic->i_recv_data_rem > RDS_FRAG_SIZE) ic->i_recv_data_rem -= RDS_FRAG_SIZE; else { ic->i_recv_data_rem = 0; ic->i_iwinc = NULL; if (iwinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) rds_iw_cong_recv(conn, iwinc); else { rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, &iwinc->ii_inc, GFP_ATOMIC, KM_SOFTIRQ0); state->ack_next = be64_to_cpu(hdr->h_sequence); state->ack_next_valid = 1; } /* Evaluate the ACK_REQUIRED flag *after* we received * the complete frame, and after bumping the next_rx * sequence. */ if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) { rds_stats_inc(s_recv_ack_required); state->ack_required = 1; } rds_inc_put(&iwinc->ii_inc); } } /* * Plucking the oldest entry from the ring can be done concurrently with * the thread refilling the ring. Each ring operation is protected by * spinlocks and the transient state of refilling doesn't change the * recording of which entry is oldest. * * This relies on IB only calling one cq comp_handler for each cq so that * there will only be one caller of rds_recv_incoming() per RDS connection. */ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context) { struct rds_connection *conn = context; struct rds_iw_connection *ic = conn->c_transport_data; struct ib_wc wc; struct rds_iw_ack_state state = { 0, }; struct rds_iw_recv_work *recv; rdsdebug("conn %p cq %p\n", conn, cq); rds_iw_stats_inc(s_iw_rx_cq_call); ib_req_notify_cq(cq, IB_CQ_SOLICITED); while (ib_poll_cq(cq, 1, &wc) > 0) { rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", (unsigned long long)wc.wr_id, wc.status, wc.byte_len, be32_to_cpu(wc.ex.imm_data)); rds_iw_stats_inc(s_iw_rx_cq_event); recv = &ic->i_recvs[rds_iw_ring_oldest(&ic->i_recv_ring)]; rds_iw_recv_unmap_page(ic, recv); /* * Also process recvs in connecting state because it is possible * to get a recv completion _before_ the rdmacm ESTABLISHED * event is processed. */ if (rds_conn_up(conn) || rds_conn_connecting(conn)) { /* We expect errors as the qp is drained during shutdown */ if (wc.status == IB_WC_SUCCESS) { rds_iw_process_recv(conn, recv, wc.byte_len, &state); } else { rds_iw_conn_error(conn, "recv completion on " "%pI4 had status %u, disconnecting and " "reconnecting\n", &conn->c_faddr, wc.status); } } rds_iw_ring_free(&ic->i_recv_ring, 1); } if (state.ack_next_valid) rds_iw_set_ack(ic, state.ack_next, state.ack_required); if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { rds_send_drop_acked(conn, state.ack_recv, NULL); ic->i_ack_recv = state.ack_recv; } if (rds_conn_up(conn)) rds_iw_attempt_ack(ic); /* If we ever end up with a really empty receive ring, we're * in deep trouble, as the sender will definitely see RNR * timeouts. */ if (rds_iw_ring_empty(&ic->i_recv_ring)) rds_iw_stats_inc(s_iw_rx_ring_empty); /* * If the ring is running low, then schedule the thread to refill. */ if (rds_iw_ring_low(&ic->i_recv_ring)) queue_delayed_work(rds_wq, &conn->c_recv_w, 0); } int rds_iw_recv(struct rds_connection *conn) { struct rds_iw_connection *ic = conn->c_transport_data; int ret = 0; rdsdebug("conn %p\n", conn); /* * If we get a temporary posting failure in this context then * we're really low and we want the caller to back off for a bit. */ mutex_lock(&ic->i_recv_mutex); if (rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0)) ret = -ENOMEM; else rds_iw_stats_inc(s_iw_rx_refill_from_thread); mutex_unlock(&ic->i_recv_mutex); if (rds_conn_up(conn)) rds_iw_attempt_ack(ic); return ret; } int __init rds_iw_recv_init(void) { struct sysinfo si; int ret = -ENOMEM; /* Default to 30% of all available RAM for recv memory */ si_meminfo(&si); rds_iw_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE; rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming", sizeof(struct rds_iw_incoming), 0, 0, NULL); if (rds_iw_incoming_slab == NULL) goto out; rds_iw_frag_slab = kmem_cache_create("rds_iw_frag", sizeof(struct rds_page_frag), 0, 0, NULL); if (rds_iw_frag_slab == NULL) kmem_cache_destroy(rds_iw_incoming_slab); else ret = 0; out: return ret; } void rds_iw_recv_exit(void) { kmem_cache_destroy(rds_iw_incoming_slab); kmem_cache_destroy(rds_iw_frag_slab); }
gpl-2.0
EPDCenter/android_kernel_bq_dc_v1
drivers/usb/musb/ux500_dma.c
2274
12549
/* * drivers/usb/musb/ux500_dma.c * * U8500 and U5500 DMA support code * * Copyright (C) 2009 STMicroelectronics * Copyright (C) 2011 ST-Ericsson SA * Authors: * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> * Praveena Nadahally <praveen.nadahally@stericsson.com> * Rajaram Regupathy <ragupathy.rajaram@stericsson.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/pfn.h> #include <mach/usb.h> #include "musb_core.h" struct ux500_dma_channel { struct dma_channel channel; struct ux500_dma_controller *controller; struct musb_hw_ep *hw_ep; struct work_struct channel_work; struct dma_chan *dma_chan; unsigned int cur_len; dma_cookie_t cookie; u8 ch_num; u8 is_tx; u8 is_allocated; }; struct ux500_dma_controller { struct dma_controller controller; struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_CHANNELS]; struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_TX_CHANNELS]; u32 num_rx_channels; u32 num_tx_channels; void *private_data; dma_addr_t phy_base; }; /* Work function invoked from DMA callback to handle tx transfers. */ static void ux500_tx_work(struct work_struct *data) { struct ux500_dma_channel *ux500_channel = container_of(data, struct ux500_dma_channel, channel_work); struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; struct musb *musb = hw_ep->musb; unsigned long flags; DBG(4, "DMA tx transfer done on hw_ep=%d\n", hw_ep->epnum); spin_lock_irqsave(&musb->lock, flags); ux500_channel->channel.actual_len = ux500_channel->cur_len; ux500_channel->channel.status = MUSB_DMA_STATUS_FREE; musb_dma_completion(musb, hw_ep->epnum, ux500_channel->is_tx); spin_unlock_irqrestore(&musb->lock, flags); } /* Work function invoked from DMA callback to handle rx transfers. */ static void ux500_rx_work(struct work_struct *data) { struct ux500_dma_channel *ux500_channel = container_of(data, struct ux500_dma_channel, channel_work); struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; struct musb *musb = hw_ep->musb; unsigned long flags; DBG(4, "DMA rx transfer done on hw_ep=%d\n", hw_ep->epnum); spin_lock_irqsave(&musb->lock, flags); ux500_channel->channel.actual_len = ux500_channel->cur_len; ux500_channel->channel.status = MUSB_DMA_STATUS_FREE; musb_dma_completion(musb, hw_ep->epnum, ux500_channel->is_tx); spin_unlock_irqrestore(&musb->lock, flags); } void ux500_dma_callback(void *private_data) { struct dma_channel *channel = (struct dma_channel *)private_data; struct ux500_dma_channel *ux500_channel = channel->private_data; schedule_work(&ux500_channel->channel_work); } static bool ux500_configure_channel(struct dma_channel *channel, u16 packet_sz, u8 mode, dma_addr_t dma_addr, u32 len) { struct ux500_dma_channel *ux500_channel = channel->private_data; struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; struct dma_chan *dma_chan = ux500_channel->dma_chan; struct dma_async_tx_descriptor *dma_desc; enum dma_data_direction direction; struct scatterlist sg; struct dma_slave_config slave_conf; enum dma_slave_buswidth addr_width; dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) + ux500_channel->controller->phy_base); DBG(4, "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", packet_sz, mode, dma_addr, len, ux500_channel->is_tx); ux500_channel->cur_len = len; sg_init_table(&sg, 1); sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_addr)), len, offset_in_page(dma_addr)); sg_dma_address(&sg) = dma_addr; sg_dma_len(&sg) = len; direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE; addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE : DMA_SLAVE_BUSWIDTH_4_BYTES; slave_conf.direction = direction; if (direction == DMA_FROM_DEVICE) { slave_conf.src_addr = usb_fifo_addr; slave_conf.src_addr_width = addr_width; slave_conf.src_maxburst = 16; } else { slave_conf.dst_addr = usb_fifo_addr; slave_conf.dst_addr_width = addr_width; slave_conf.dst_maxburst = 16; } dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, (unsigned long) &slave_conf); dma_desc = dma_chan->device-> device_prep_slave_sg(dma_chan, &sg, 1, direction, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!dma_desc) return false; dma_desc->callback = ux500_dma_callback; dma_desc->callback_param = channel; ux500_channel->cookie = dma_desc->tx_submit(dma_desc); dma_async_issue_pending(dma_chan); return true; } static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c, struct musb_hw_ep *hw_ep, u8 is_tx) { struct ux500_dma_controller *controller = container_of(c, struct ux500_dma_controller, controller); struct ux500_dma_channel *ux500_channel = NULL; u8 ch_num = hw_ep->epnum - 1; u32 max_ch; /* Max 8 DMA channels (0 - 7). Each DMA channel can only be allocated * to specified hw_ep. For example DMA channel 0 can only be allocated * to hw_ep 1 and 9. */ if (ch_num > 7) ch_num -= 8; max_ch = is_tx ? controller->num_tx_channels : controller->num_rx_channels; if (ch_num >= max_ch) return NULL; ux500_channel = is_tx ? &(controller->tx_channel[ch_num]) : &(controller->rx_channel[ch_num]) ; /* Check if channel is already used. */ if (ux500_channel->is_allocated) return NULL; ux500_channel->hw_ep = hw_ep; ux500_channel->is_allocated = 1; DBG(7, "hw_ep=%d, is_tx=0x%x, channel=%d\n", hw_ep->epnum, is_tx, ch_num); return &(ux500_channel->channel); } static void ux500_dma_channel_release(struct dma_channel *channel) { struct ux500_dma_channel *ux500_channel = channel->private_data; DBG(7, "channel=%d\n", ux500_channel->ch_num); if (ux500_channel->is_allocated) { ux500_channel->is_allocated = 0; channel->status = MUSB_DMA_STATUS_FREE; channel->actual_len = 0; } } static int ux500_dma_is_compatible(struct dma_channel *channel, u16 maxpacket, void *buf, u32 length) { if ((maxpacket & 0x3) || ((int)buf & 0x3) || (length < 512) || (length & 0x3)) return false; else return true; } static int ux500_dma_channel_program(struct dma_channel *channel, u16 packet_sz, u8 mode, dma_addr_t dma_addr, u32 len) { int ret; BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || channel->status == MUSB_DMA_STATUS_BUSY); if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len)) return false; channel->status = MUSB_DMA_STATUS_BUSY; channel->actual_len = 0; ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len); if (!ret) channel->status = MUSB_DMA_STATUS_FREE; return ret; } static int ux500_dma_channel_abort(struct dma_channel *channel) { struct ux500_dma_channel *ux500_channel = channel->private_data; struct ux500_dma_controller *controller = ux500_channel->controller; struct musb *musb = controller->private_data; void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs; u16 csr; DBG(4, "channel=%d, is_tx=%d\n", ux500_channel->ch_num, ux500_channel->is_tx); if (channel->status == MUSB_DMA_STATUS_BUSY) { if (ux500_channel->is_tx) { csr = musb_readw(epio, MUSB_TXCSR); csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE); musb_writew(epio, MUSB_TXCSR, csr); } else { csr = musb_readw(epio, MUSB_RXCSR); csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB | MUSB_RXCSR_DMAMODE); musb_writew(epio, MUSB_RXCSR, csr); } ux500_channel->dma_chan->device-> device_control(ux500_channel->dma_chan, DMA_TERMINATE_ALL, 0); channel->status = MUSB_DMA_STATUS_FREE; } return 0; } static int ux500_dma_controller_stop(struct dma_controller *c) { struct ux500_dma_controller *controller = container_of(c, struct ux500_dma_controller, controller); struct ux500_dma_channel *ux500_channel; struct dma_channel *channel; u8 ch_num; for (ch_num = 0; ch_num < controller->num_rx_channels; ch_num++) { channel = &controller->rx_channel[ch_num].channel; ux500_channel = channel->private_data; ux500_dma_channel_release(channel); if (ux500_channel->dma_chan) dma_release_channel(ux500_channel->dma_chan); } for (ch_num = 0; ch_num < controller->num_tx_channels; ch_num++) { channel = &controller->tx_channel[ch_num].channel; ux500_channel = channel->private_data; ux500_dma_channel_release(channel); if (ux500_channel->dma_chan) dma_release_channel(ux500_channel->dma_chan); } return 0; } static int ux500_dma_controller_start(struct dma_controller *c) { struct ux500_dma_controller *controller = container_of(c, struct ux500_dma_controller, controller); struct ux500_dma_channel *ux500_channel = NULL; struct musb *musb = controller->private_data; struct device *dev = musb->controller; struct musb_hdrc_platform_data *plat = dev->platform_data; struct ux500_musb_board_data *data = plat->board_data; struct dma_channel *dma_channel = NULL; u32 ch_num; u8 dir; u8 is_tx = 0; void **param_array; struct ux500_dma_channel *channel_array; u32 ch_count; void (*musb_channel_work)(struct work_struct *); dma_cap_mask_t mask; if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) || (data->num_tx_channels > UX500_MUSB_DMA_NUM_TX_CHANNELS)) return -EINVAL; controller->num_rx_channels = data->num_rx_channels; controller->num_tx_channels = data->num_tx_channels; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); /* Prepare the loop for RX channels */ channel_array = controller->rx_channel; ch_count = data->num_rx_channels; param_array = data->dma_rx_param_array; musb_channel_work = ux500_rx_work; for (dir = 0; dir < 2; dir++) { for (ch_num = 0; ch_num < ch_count; ch_num++) { ux500_channel = &channel_array[ch_num]; ux500_channel->controller = controller; ux500_channel->ch_num = ch_num; ux500_channel->is_tx = is_tx; dma_channel = &(ux500_channel->channel); dma_channel->private_data = ux500_channel; dma_channel->status = MUSB_DMA_STATUS_FREE; dma_channel->max_len = SZ_16M; ux500_channel->dma_chan = dma_request_channel(mask, data->dma_filter, param_array[ch_num]); if (!ux500_channel->dma_chan) { ERR("Dma pipe allocation error dir=%d ch=%d\n", dir, ch_num); /* Release already allocated channels */ ux500_dma_controller_stop(c); return -EBUSY; } INIT_WORK(&ux500_channel->channel_work, musb_channel_work); } /* Prepare the loop for TX channels */ channel_array = controller->tx_channel; ch_count = data->num_tx_channels; param_array = data->dma_tx_param_array; musb_channel_work = ux500_tx_work; is_tx = 1; } return 0; } void dma_controller_destroy(struct dma_controller *c) { struct ux500_dma_controller *controller = container_of(c, struct ux500_dma_controller, controller); kfree(controller); } struct dma_controller *__init dma_controller_create(struct musb *musb, void __iomem *base) { struct ux500_dma_controller *controller; struct platform_device *pdev = to_platform_device(musb->controller); struct resource *iomem; controller = kzalloc(sizeof(*controller), GFP_KERNEL); if (!controller) return NULL; controller->private_data = musb; /* Save physical address for DMA controller. */ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); controller->phy_base = (dma_addr_t) iomem->start; controller->controller.start = ux500_dma_controller_start; controller->controller.stop = ux500_dma_controller_stop; controller->controller.channel_alloc = ux500_dma_channel_allocate; controller->controller.channel_release = ux500_dma_channel_release; controller->controller.channel_program = ux500_dma_channel_program; controller->controller.channel_abort = ux500_dma_channel_abort; controller->controller.is_compatible = ux500_dma_is_compatible; return &controller->controller; }
gpl-2.0
osmc/vero2-linux
drivers/pinctrl/pinctrl-tegra30.c
2530
112739
/* * Pinctrl data for the NVIDIA Tegra30 pinmux * * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include "pinctrl-tegra.h" /* * Most pins affected by the pinmux can also be GPIOs. Define these first. * These must match how the GPIO driver names/numbers its pins. */ #define _GPIO(offset) (offset) #define TEGRA_PIN_CLK_32K_OUT_PA0 _GPIO(0) #define TEGRA_PIN_UART3_CTS_N_PA1 _GPIO(1) #define TEGRA_PIN_DAP2_FS_PA2 _GPIO(2) #define TEGRA_PIN_DAP2_SCLK_PA3 _GPIO(3) #define TEGRA_PIN_DAP2_DIN_PA4 _GPIO(4) #define TEGRA_PIN_DAP2_DOUT_PA5 _GPIO(5) #define TEGRA_PIN_SDMMC3_CLK_PA6 _GPIO(6) #define TEGRA_PIN_SDMMC3_CMD_PA7 _GPIO(7) #define TEGRA_PIN_GMI_A17_PB0 _GPIO(8) #define TEGRA_PIN_GMI_A18_PB1 _GPIO(9) #define TEGRA_PIN_LCD_PWR0_PB2 _GPIO(10) #define TEGRA_PIN_LCD_PCLK_PB3 _GPIO(11) #define TEGRA_PIN_SDMMC3_DAT3_PB4 _GPIO(12) #define TEGRA_PIN_SDMMC3_DAT2_PB5 _GPIO(13) #define TEGRA_PIN_SDMMC3_DAT1_PB6 _GPIO(14) #define TEGRA_PIN_SDMMC3_DAT0_PB7 _GPIO(15) #define TEGRA_PIN_UART3_RTS_N_PC0 _GPIO(16) #define TEGRA_PIN_LCD_PWR1_PC1 _GPIO(17) #define TEGRA_PIN_UART2_TXD_PC2 _GPIO(18) #define TEGRA_PIN_UART2_RXD_PC3 _GPIO(19) #define TEGRA_PIN_GEN1_I2C_SCL_PC4 _GPIO(20) #define TEGRA_PIN_GEN1_I2C_SDA_PC5 _GPIO(21) #define TEGRA_PIN_LCD_PWR2_PC6 _GPIO(22) #define TEGRA_PIN_GMI_WP_N_PC7 _GPIO(23) #define TEGRA_PIN_SDMMC3_DAT5_PD0 _GPIO(24) #define TEGRA_PIN_SDMMC3_DAT4_PD1 _GPIO(25) #define TEGRA_PIN_LCD_DC1_PD2 _GPIO(26) #define TEGRA_PIN_SDMMC3_DAT6_PD3 _GPIO(27) #define TEGRA_PIN_SDMMC3_DAT7_PD4 _GPIO(28) #define TEGRA_PIN_VI_D1_PD5 _GPIO(29) #define TEGRA_PIN_VI_VSYNC_PD6 _GPIO(30) #define TEGRA_PIN_VI_HSYNC_PD7 _GPIO(31) #define TEGRA_PIN_LCD_D0_PE0 _GPIO(32) #define TEGRA_PIN_LCD_D1_PE1 _GPIO(33) #define TEGRA_PIN_LCD_D2_PE2 _GPIO(34) #define TEGRA_PIN_LCD_D3_PE3 _GPIO(35) #define TEGRA_PIN_LCD_D4_PE4 _GPIO(36) #define TEGRA_PIN_LCD_D5_PE5 _GPIO(37) #define TEGRA_PIN_LCD_D6_PE6 _GPIO(38) #define TEGRA_PIN_LCD_D7_PE7 _GPIO(39) #define TEGRA_PIN_LCD_D8_PF0 _GPIO(40) #define TEGRA_PIN_LCD_D9_PF1 _GPIO(41) #define TEGRA_PIN_LCD_D10_PF2 _GPIO(42) #define TEGRA_PIN_LCD_D11_PF3 _GPIO(43) #define TEGRA_PIN_LCD_D12_PF4 _GPIO(44) #define TEGRA_PIN_LCD_D13_PF5 _GPIO(45) #define TEGRA_PIN_LCD_D14_PF6 _GPIO(46) #define TEGRA_PIN_LCD_D15_PF7 _GPIO(47) #define TEGRA_PIN_GMI_AD0_PG0 _GPIO(48) #define TEGRA_PIN_GMI_AD1_PG1 _GPIO(49) #define TEGRA_PIN_GMI_AD2_PG2 _GPIO(50) #define TEGRA_PIN_GMI_AD3_PG3 _GPIO(51) #define TEGRA_PIN_GMI_AD4_PG4 _GPIO(52) #define TEGRA_PIN_GMI_AD5_PG5 _GPIO(53) #define TEGRA_PIN_GMI_AD6_PG6 _GPIO(54) #define TEGRA_PIN_GMI_AD7_PG7 _GPIO(55) #define TEGRA_PIN_GMI_AD8_PH0 _GPIO(56) #define TEGRA_PIN_GMI_AD9_PH1 _GPIO(57) #define TEGRA_PIN_GMI_AD10_PH2 _GPIO(58) #define TEGRA_PIN_GMI_AD11_PH3 _GPIO(59) #define TEGRA_PIN_GMI_AD12_PH4 _GPIO(60) #define TEGRA_PIN_GMI_AD13_PH5 _GPIO(61) #define TEGRA_PIN_GMI_AD14_PH6 _GPIO(62) #define TEGRA_PIN_GMI_AD15_PH7 _GPIO(63) #define TEGRA_PIN_GMI_WR_N_PI0 _GPIO(64) #define TEGRA_PIN_GMI_OE_N_PI1 _GPIO(65) #define TEGRA_PIN_GMI_DQS_PI2 _GPIO(66) #define TEGRA_PIN_GMI_CS6_N_PI3 _GPIO(67) #define TEGRA_PIN_GMI_RST_N_PI4 _GPIO(68) #define TEGRA_PIN_GMI_IORDY_PI5 _GPIO(69) #define TEGRA_PIN_GMI_CS7_N_PI6 _GPIO(70) #define TEGRA_PIN_GMI_WAIT_PI7 _GPIO(71) #define TEGRA_PIN_GMI_CS0_N_PJ0 _GPIO(72) #define TEGRA_PIN_LCD_DE_PJ1 _GPIO(73) #define TEGRA_PIN_GMI_CS1_N_PJ2 _GPIO(74) #define TEGRA_PIN_LCD_HSYNC_PJ3 _GPIO(75) #define TEGRA_PIN_LCD_VSYNC_PJ4 _GPIO(76) #define TEGRA_PIN_UART2_CTS_N_PJ5 _GPIO(77) #define TEGRA_PIN_UART2_RTS_N_PJ6 _GPIO(78) #define TEGRA_PIN_GMI_A16_PJ7 _GPIO(79) #define TEGRA_PIN_GMI_ADV_N_PK0 _GPIO(80) #define TEGRA_PIN_GMI_CLK_PK1 _GPIO(81) #define TEGRA_PIN_GMI_CS4_N_PK2 _GPIO(82) #define TEGRA_PIN_GMI_CS2_N_PK3 _GPIO(83) #define TEGRA_PIN_GMI_CS3_N_PK4 _GPIO(84) #define TEGRA_PIN_SPDIF_OUT_PK5 _GPIO(85) #define TEGRA_PIN_SPDIF_IN_PK6 _GPIO(86) #define TEGRA_PIN_GMI_A19_PK7 _GPIO(87) #define TEGRA_PIN_VI_D2_PL0 _GPIO(88) #define TEGRA_PIN_VI_D3_PL1 _GPIO(89) #define TEGRA_PIN_VI_D4_PL2 _GPIO(90) #define TEGRA_PIN_VI_D5_PL3 _GPIO(91) #define TEGRA_PIN_VI_D6_PL4 _GPIO(92) #define TEGRA_PIN_VI_D7_PL5 _GPIO(93) #define TEGRA_PIN_VI_D8_PL6 _GPIO(94) #define TEGRA_PIN_VI_D9_PL7 _GPIO(95) #define TEGRA_PIN_LCD_D16_PM0 _GPIO(96) #define TEGRA_PIN_LCD_D17_PM1 _GPIO(97) #define TEGRA_PIN_LCD_D18_PM2 _GPIO(98) #define TEGRA_PIN_LCD_D19_PM3 _GPIO(99) #define TEGRA_PIN_LCD_D20_PM4 _GPIO(100) #define TEGRA_PIN_LCD_D21_PM5 _GPIO(101) #define TEGRA_PIN_LCD_D22_PM6 _GPIO(102) #define TEGRA_PIN_LCD_D23_PM7 _GPIO(103) #define TEGRA_PIN_DAP1_FS_PN0 _GPIO(104) #define TEGRA_PIN_DAP1_DIN_PN1 _GPIO(105) #define TEGRA_PIN_DAP1_DOUT_PN2 _GPIO(106) #define TEGRA_PIN_DAP1_SCLK_PN3 _GPIO(107) #define TEGRA_PIN_LCD_CS0_N_PN4 _GPIO(108) #define TEGRA_PIN_LCD_SDOUT_PN5 _GPIO(109) #define TEGRA_PIN_LCD_DC0_PN6 _GPIO(110) #define TEGRA_PIN_HDMI_INT_PN7 _GPIO(111) #define TEGRA_PIN_ULPI_DATA7_PO0 _GPIO(112) #define TEGRA_PIN_ULPI_DATA0_PO1 _GPIO(113) #define TEGRA_PIN_ULPI_DATA1_PO2 _GPIO(114) #define TEGRA_PIN_ULPI_DATA2_PO3 _GPIO(115) #define TEGRA_PIN_ULPI_DATA3_PO4 _GPIO(116) #define TEGRA_PIN_ULPI_DATA4_PO5 _GPIO(117) #define TEGRA_PIN_ULPI_DATA5_PO6 _GPIO(118) #define TEGRA_PIN_ULPI_DATA6_PO7 _GPIO(119) #define TEGRA_PIN_DAP3_FS_PP0 _GPIO(120) #define TEGRA_PIN_DAP3_DIN_PP1 _GPIO(121) #define TEGRA_PIN_DAP3_DOUT_PP2 _GPIO(122) #define TEGRA_PIN_DAP3_SCLK_PP3 _GPIO(123) #define TEGRA_PIN_DAP4_FS_PP4 _GPIO(124) #define TEGRA_PIN_DAP4_DIN_PP5 _GPIO(125) #define TEGRA_PIN_DAP4_DOUT_PP6 _GPIO(126) #define TEGRA_PIN_DAP4_SCLK_PP7 _GPIO(127) #define TEGRA_PIN_KB_COL0_PQ0 _GPIO(128) #define TEGRA_PIN_KB_COL1_PQ1 _GPIO(129) #define TEGRA_PIN_KB_COL2_PQ2 _GPIO(130) #define TEGRA_PIN_KB_COL3_PQ3 _GPIO(131) #define TEGRA_PIN_KB_COL4_PQ4 _GPIO(132) #define TEGRA_PIN_KB_COL5_PQ5 _GPIO(133) #define TEGRA_PIN_KB_COL6_PQ6 _GPIO(134) #define TEGRA_PIN_KB_COL7_PQ7 _GPIO(135) #define TEGRA_PIN_KB_ROW0_PR0 _GPIO(136) #define TEGRA_PIN_KB_ROW1_PR1 _GPIO(137) #define TEGRA_PIN_KB_ROW2_PR2 _GPIO(138) #define TEGRA_PIN_KB_ROW3_PR3 _GPIO(139) #define TEGRA_PIN_KB_ROW4_PR4 _GPIO(140) #define TEGRA_PIN_KB_ROW5_PR5 _GPIO(141) #define TEGRA_PIN_KB_ROW6_PR6 _GPIO(142) #define TEGRA_PIN_KB_ROW7_PR7 _GPIO(143) #define TEGRA_PIN_KB_ROW8_PS0 _GPIO(144) #define TEGRA_PIN_KB_ROW9_PS1 _GPIO(145) #define TEGRA_PIN_KB_ROW10_PS2 _GPIO(146) #define TEGRA_PIN_KB_ROW11_PS3 _GPIO(147) #define TEGRA_PIN_KB_ROW12_PS4 _GPIO(148) #define TEGRA_PIN_KB_ROW13_PS5 _GPIO(149) #define TEGRA_PIN_KB_ROW14_PS6 _GPIO(150) #define TEGRA_PIN_KB_ROW15_PS7 _GPIO(151) #define TEGRA_PIN_VI_PCLK_PT0 _GPIO(152) #define TEGRA_PIN_VI_MCLK_PT1 _GPIO(153) #define TEGRA_PIN_VI_D10_PT2 _GPIO(154) #define TEGRA_PIN_VI_D11_PT3 _GPIO(155) #define TEGRA_PIN_VI_D0_PT4 _GPIO(156) #define TEGRA_PIN_GEN2_I2C_SCL_PT5 _GPIO(157) #define TEGRA_PIN_GEN2_I2C_SDA_PT6 _GPIO(158) #define TEGRA_PIN_SDMMC4_CMD_PT7 _GPIO(159) #define TEGRA_PIN_PU0 _GPIO(160) #define TEGRA_PIN_PU1 _GPIO(161) #define TEGRA_PIN_PU2 _GPIO(162) #define TEGRA_PIN_PU3 _GPIO(163) #define TEGRA_PIN_PU4 _GPIO(164) #define TEGRA_PIN_PU5 _GPIO(165) #define TEGRA_PIN_PU6 _GPIO(166) #define TEGRA_PIN_JTAG_RTCK_PU7 _GPIO(167) #define TEGRA_PIN_PV0 _GPIO(168) #define TEGRA_PIN_PV1 _GPIO(169) #define TEGRA_PIN_PV2 _GPIO(170) #define TEGRA_PIN_PV3 _GPIO(171) #define TEGRA_PIN_DDC_SCL_PV4 _GPIO(172) #define TEGRA_PIN_DDC_SDA_PV5 _GPIO(173) #define TEGRA_PIN_CRT_HSYNC_PV6 _GPIO(174) #define TEGRA_PIN_CRT_VSYNC_PV7 _GPIO(175) #define TEGRA_PIN_LCD_CS1_N_PW0 _GPIO(176) #define TEGRA_PIN_LCD_M1_PW1 _GPIO(177) #define TEGRA_PIN_SPI2_CS1_N_PW2 _GPIO(178) #define TEGRA_PIN_SPI2_CS2_N_PW3 _GPIO(179) #define TEGRA_PIN_CLK1_OUT_PW4 _GPIO(180) #define TEGRA_PIN_CLK2_OUT_PW5 _GPIO(181) #define TEGRA_PIN_UART3_TXD_PW6 _GPIO(182) #define TEGRA_PIN_UART3_RXD_PW7 _GPIO(183) #define TEGRA_PIN_SPI2_MOSI_PX0 _GPIO(184) #define TEGRA_PIN_SPI2_MISO_PX1 _GPIO(185) #define TEGRA_PIN_SPI2_SCK_PX2 _GPIO(186) #define TEGRA_PIN_SPI2_CS0_N_PX3 _GPIO(187) #define TEGRA_PIN_SPI1_MOSI_PX4 _GPIO(188) #define TEGRA_PIN_SPI1_SCK_PX5 _GPIO(189) #define TEGRA_PIN_SPI1_CS0_N_PX6 _GPIO(190) #define TEGRA_PIN_SPI1_MISO_PX7 _GPIO(191) #define TEGRA_PIN_ULPI_CLK_PY0 _GPIO(192) #define TEGRA_PIN_ULPI_DIR_PY1 _GPIO(193) #define TEGRA_PIN_ULPI_NXT_PY2 _GPIO(194) #define TEGRA_PIN_ULPI_STP_PY3 _GPIO(195) #define TEGRA_PIN_SDMMC1_DAT3_PY4 _GPIO(196) #define TEGRA_PIN_SDMMC1_DAT2_PY5 _GPIO(197) #define TEGRA_PIN_SDMMC1_DAT1_PY6 _GPIO(198) #define TEGRA_PIN_SDMMC1_DAT0_PY7 _GPIO(199) #define TEGRA_PIN_SDMMC1_CLK_PZ0 _GPIO(200) #define TEGRA_PIN_SDMMC1_CMD_PZ1 _GPIO(201) #define TEGRA_PIN_LCD_SDIN_PZ2 _GPIO(202) #define TEGRA_PIN_LCD_WR_N_PZ3 _GPIO(203) #define TEGRA_PIN_LCD_SCK_PZ4 _GPIO(204) #define TEGRA_PIN_SYS_CLK_REQ_PZ5 _GPIO(205) #define TEGRA_PIN_PWR_I2C_SCL_PZ6 _GPIO(206) #define TEGRA_PIN_PWR_I2C_SDA_PZ7 _GPIO(207) #define TEGRA_PIN_SDMMC4_DAT0_PAA0 _GPIO(208) #define TEGRA_PIN_SDMMC4_DAT1_PAA1 _GPIO(209) #define TEGRA_PIN_SDMMC4_DAT2_PAA2 _GPIO(210) #define TEGRA_PIN_SDMMC4_DAT3_PAA3 _GPIO(211) #define TEGRA_PIN_SDMMC4_DAT4_PAA4 _GPIO(212) #define TEGRA_PIN_SDMMC4_DAT5_PAA5 _GPIO(213) #define TEGRA_PIN_SDMMC4_DAT6_PAA6 _GPIO(214) #define TEGRA_PIN_SDMMC4_DAT7_PAA7 _GPIO(215) #define TEGRA_PIN_PBB0 _GPIO(216) #define TEGRA_PIN_CAM_I2C_SCL_PBB1 _GPIO(217) #define TEGRA_PIN_CAM_I2C_SDA_PBB2 _GPIO(218) #define TEGRA_PIN_PBB3 _GPIO(219) #define TEGRA_PIN_PBB4 _GPIO(220) #define TEGRA_PIN_PBB5 _GPIO(221) #define TEGRA_PIN_PBB6 _GPIO(222) #define TEGRA_PIN_PBB7 _GPIO(223) #define TEGRA_PIN_CAM_MCLK_PCC0 _GPIO(224) #define TEGRA_PIN_PCC1 _GPIO(225) #define TEGRA_PIN_PCC2 _GPIO(226) #define TEGRA_PIN_SDMMC4_RST_N_PCC3 _GPIO(227) #define TEGRA_PIN_SDMMC4_CLK_PCC4 _GPIO(228) #define TEGRA_PIN_CLK2_REQ_PCC5 _GPIO(229) #define TEGRA_PIN_PEX_L2_RST_N_PCC6 _GPIO(230) #define TEGRA_PIN_PEX_L2_CLKREQ_N_PCC7 _GPIO(231) #define TEGRA_PIN_PEX_L0_PRSNT_N_PDD0 _GPIO(232) #define TEGRA_PIN_PEX_L0_RST_N_PDD1 _GPIO(233) #define TEGRA_PIN_PEX_L0_CLKREQ_N_PDD2 _GPIO(234) #define TEGRA_PIN_PEX_WAKE_N_PDD3 _GPIO(235) #define TEGRA_PIN_PEX_L1_PRSNT_N_PDD4 _GPIO(236) #define TEGRA_PIN_PEX_L1_RST_N_PDD5 _GPIO(237) #define TEGRA_PIN_PEX_L1_CLKREQ_N_PDD6 _GPIO(238) #define TEGRA_PIN_PEX_L2_PRSNT_N_PDD7 _GPIO(239) #define TEGRA_PIN_CLK3_OUT_PEE0 _GPIO(240) #define TEGRA_PIN_CLK3_REQ_PEE1 _GPIO(241) #define TEGRA_PIN_CLK1_REQ_PEE2 _GPIO(242) #define TEGRA_PIN_HDMI_CEC_PEE3 _GPIO(243) #define TEGRA_PIN_PEE4 _GPIO(244) #define TEGRA_PIN_PEE5 _GPIO(245) #define TEGRA_PIN_PEE6 _GPIO(246) #define TEGRA_PIN_PEE7 _GPIO(247) /* All non-GPIO pins follow */ #define NUM_GPIOS (TEGRA_PIN_PEE7 + 1) #define _PIN(offset) (NUM_GPIOS + (offset)) /* Non-GPIO pins */ #define TEGRA_PIN_CLK_32K_IN _PIN(0) #define TEGRA_PIN_CORE_PWR_REQ _PIN(1) #define TEGRA_PIN_CPU_PWR_REQ _PIN(2) #define TEGRA_PIN_JTAG_TCK _PIN(3) #define TEGRA_PIN_JTAG_TDI _PIN(4) #define TEGRA_PIN_JTAG_TDO _PIN(5) #define TEGRA_PIN_JTAG_TMS _PIN(6) #define TEGRA_PIN_JTAG_TRST_N _PIN(7) #define TEGRA_PIN_OWR _PIN(8) #define TEGRA_PIN_PWR_INT_N _PIN(9) #define TEGRA_PIN_SYS_RESET_N _PIN(10) #define TEGRA_PIN_TEST_MODE_EN _PIN(11) static const struct pinctrl_pin_desc tegra30_pins[] = { PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"), PINCTRL_PIN(TEGRA_PIN_UART3_CTS_N_PA1, "UART3_CTS_N PA1"), PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PA2, "DAP2_FS PA2"), PINCTRL_PIN(TEGRA_PIN_DAP2_SCLK_PA3, "DAP2_SCLK PA3"), PINCTRL_PIN(TEGRA_PIN_DAP2_DIN_PA4, "DAP2_DIN PA4"), PINCTRL_PIN(TEGRA_PIN_DAP2_DOUT_PA5, "DAP2_DOUT PA5"), PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_PA6, "SDMMC3_CLK PA6"), PINCTRL_PIN(TEGRA_PIN_SDMMC3_CMD_PA7, "SDMMC3_CMD PA7"), PINCTRL_PIN(TEGRA_PIN_GMI_A17_PB0, "GMI_A17 PB0"), PINCTRL_PIN(TEGRA_PIN_GMI_A18_PB1, "GMI_A18 PB1"), PINCTRL_PIN(TEGRA_PIN_LCD_PWR0_PB2, "LCD_PWR0 PB2"), PINCTRL_PIN(TEGRA_PIN_LCD_PCLK_PB3, "LCD_PCLK PB3"), PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT3_PB4, "SDMMC3_DAT3 PB4"), PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT2_PB5, "SDMMC3_DAT2 PB5"), PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT1_PB6, "SDMMC3_DAT1 PB6"), PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT0_PB7, "SDMMC3_DAT0 PB7"), PINCTRL_PIN(TEGRA_PIN_UART3_RTS_N_PC0, "UART3_RTS_N PC0"), PINCTRL_PIN(TEGRA_PIN_LCD_PWR1_PC1, "LCD_PWR1 PC1"), PINCTRL_PIN(TEGRA_PIN_UART2_TXD_PC2, "UART2_TXD PC2"), PINCTRL_PIN(TEGRA_PIN_UART2_RXD_PC3, "UART2_RXD PC3"), PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SCL_PC4, "GEN1_I2C_SCL PC4"), PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SDA_PC5, "GEN1_I2C_SDA PC5"), PINCTRL_PIN(TEGRA_PIN_LCD_PWR2_PC6, "LCD_PWR2 PC6"), PINCTRL_PIN(TEGRA_PIN_GMI_WP_N_PC7, "GMI_WP_N PC7"), PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT5_PD0, "SDMMC3_DAT5 PD0"), PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT4_PD1, "SDMMC3_DAT4 PD1"), PINCTRL_PIN(TEGRA_PIN_LCD_DC1_PD2, "LCD_DC1 PD2"), PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT6_PD3, "SDMMC3_DAT6 PD3"), PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT7_PD4, "SDMMC3_DAT7 PD4"), PINCTRL_PIN(TEGRA_PIN_VI_D1_PD5, "VI_D1 PD5"), PINCTRL_PIN(TEGRA_PIN_VI_VSYNC_PD6, "VI_VSYNC PD6"), PINCTRL_PIN(TEGRA_PIN_VI_HSYNC_PD7, "VI_HSYNC PD7"), PINCTRL_PIN(TEGRA_PIN_LCD_D0_PE0, "LCD_D0 PE0"), PINCTRL_PIN(TEGRA_PIN_LCD_D1_PE1, "LCD_D1 PE1"), PINCTRL_PIN(TEGRA_PIN_LCD_D2_PE2, "LCD_D2 PE2"), PINCTRL_PIN(TEGRA_PIN_LCD_D3_PE3, "LCD_D3 PE3"), PINCTRL_PIN(TEGRA_PIN_LCD_D4_PE4, "LCD_D4 PE4"), PINCTRL_PIN(TEGRA_PIN_LCD_D5_PE5, "LCD_D5 PE5"), PINCTRL_PIN(TEGRA_PIN_LCD_D6_PE6, "LCD_D6 PE6"), PINCTRL_PIN(TEGRA_PIN_LCD_D7_PE7, "LCD_D7 PE7"), PINCTRL_PIN(TEGRA_PIN_LCD_D8_PF0, "LCD_D8 PF0"), PINCTRL_PIN(TEGRA_PIN_LCD_D9_PF1, "LCD_D9 PF1"), PINCTRL_PIN(TEGRA_PIN_LCD_D10_PF2, "LCD_D10 PF2"), PINCTRL_PIN(TEGRA_PIN_LCD_D11_PF3, "LCD_D11 PF3"), PINCTRL_PIN(TEGRA_PIN_LCD_D12_PF4, "LCD_D12 PF4"), PINCTRL_PIN(TEGRA_PIN_LCD_D13_PF5, "LCD_D13 PF5"), PINCTRL_PIN(TEGRA_PIN_LCD_D14_PF6, "LCD_D14 PF6"), PINCTRL_PIN(TEGRA_PIN_LCD_D15_PF7, "LCD_D15 PF7"), PINCTRL_PIN(TEGRA_PIN_GMI_AD0_PG0, "GMI_AD0 PG0"), PINCTRL_PIN(TEGRA_PIN_GMI_AD1_PG1, "GMI_AD1 PG1"), PINCTRL_PIN(TEGRA_PIN_GMI_AD2_PG2, "GMI_AD2 PG2"), PINCTRL_PIN(TEGRA_PIN_GMI_AD3_PG3, "GMI_AD3 PG3"), PINCTRL_PIN(TEGRA_PIN_GMI_AD4_PG4, "GMI_AD4 PG4"), PINCTRL_PIN(TEGRA_PIN_GMI_AD5_PG5, "GMI_AD5 PG5"), PINCTRL_PIN(TEGRA_PIN_GMI_AD6_PG6, "GMI_AD6 PG6"), PINCTRL_PIN(TEGRA_PIN_GMI_AD7_PG7, "GMI_AD7 PG7"), PINCTRL_PIN(TEGRA_PIN_GMI_AD8_PH0, "GMI_AD8 PH0"), PINCTRL_PIN(TEGRA_PIN_GMI_AD9_PH1, "GMI_AD9 PH1"), PINCTRL_PIN(TEGRA_PIN_GMI_AD10_PH2, "GMI_AD10 PH2"), PINCTRL_PIN(TEGRA_PIN_GMI_AD11_PH3, "GMI_AD11 PH3"), PINCTRL_PIN(TEGRA_PIN_GMI_AD12_PH4, "GMI_AD12 PH4"), PINCTRL_PIN(TEGRA_PIN_GMI_AD13_PH5, "GMI_AD13 PH5"), PINCTRL_PIN(TEGRA_PIN_GMI_AD14_PH6, "GMI_AD14 PH6"), PINCTRL_PIN(TEGRA_PIN_GMI_AD15_PH7, "GMI_AD15 PH7"), PINCTRL_PIN(TEGRA_PIN_GMI_WR_N_PI0, "GMI_WR_N PI0"), PINCTRL_PIN(TEGRA_PIN_GMI_OE_N_PI1, "GMI_OE_N PI1"), PINCTRL_PIN(TEGRA_PIN_GMI_DQS_PI2, "GMI_DQS PI2"), PINCTRL_PIN(TEGRA_PIN_GMI_CS6_N_PI3, "GMI_CS6_N PI3"), PINCTRL_PIN(TEGRA_PIN_GMI_RST_N_PI4, "GMI_RST_N PI4"), PINCTRL_PIN(TEGRA_PIN_GMI_IORDY_PI5, "GMI_IORDY PI5"), PINCTRL_PIN(TEGRA_PIN_GMI_CS7_N_PI6, "GMI_CS7_N PI6"), PINCTRL_PIN(TEGRA_PIN_GMI_WAIT_PI7, "GMI_WAIT PI7"), PINCTRL_PIN(TEGRA_PIN_GMI_CS0_N_PJ0, "GMI_CS0_N PJ0"), PINCTRL_PIN(TEGRA_PIN_LCD_DE_PJ1, "LCD_DE PJ1"), PINCTRL_PIN(TEGRA_PIN_GMI_CS1_N_PJ2, "GMI_CS1_N PJ2"), PINCTRL_PIN(TEGRA_PIN_LCD_HSYNC_PJ3, "LCD_HSYNC PJ3"), PINCTRL_PIN(TEGRA_PIN_LCD_VSYNC_PJ4, "LCD_VSYNC PJ4"), PINCTRL_PIN(TEGRA_PIN_UART2_CTS_N_PJ5, "UART2_CTS_N PJ5"), PINCTRL_PIN(TEGRA_PIN_UART2_RTS_N_PJ6, "UART2_RTS_N PJ6"), PINCTRL_PIN(TEGRA_PIN_GMI_A16_PJ7, "GMI_A16 PJ7"), PINCTRL_PIN(TEGRA_PIN_GMI_ADV_N_PK0, "GMI_ADV_N PK0"), PINCTRL_PIN(TEGRA_PIN_GMI_CLK_PK1, "GMI_CLK PK1"), PINCTRL_PIN(TEGRA_PIN_GMI_CS4_N_PK2, "GMI_CS4_N PK2"), PINCTRL_PIN(TEGRA_PIN_GMI_CS2_N_PK3, "GMI_CS2_N PK3"), PINCTRL_PIN(TEGRA_PIN_GMI_CS3_N_PK4, "GMI_CS3_N PK4"), PINCTRL_PIN(TEGRA_PIN_SPDIF_OUT_PK5, "SPDIF_OUT PK5"), PINCTRL_PIN(TEGRA_PIN_SPDIF_IN_PK6, "SPDIF_IN PK6"), PINCTRL_PIN(TEGRA_PIN_GMI_A19_PK7, "GMI_A19 PK7"), PINCTRL_PIN(TEGRA_PIN_VI_D2_PL0, "VI_D2 PL0"), PINCTRL_PIN(TEGRA_PIN_VI_D3_PL1, "VI_D3 PL1"), PINCTRL_PIN(TEGRA_PIN_VI_D4_PL2, "VI_D4 PL2"), PINCTRL_PIN(TEGRA_PIN_VI_D5_PL3, "VI_D5 PL3"), PINCTRL_PIN(TEGRA_PIN_VI_D6_PL4, "VI_D6 PL4"), PINCTRL_PIN(TEGRA_PIN_VI_D7_PL5, "VI_D7 PL5"), PINCTRL_PIN(TEGRA_PIN_VI_D8_PL6, "VI_D8 PL6"), PINCTRL_PIN(TEGRA_PIN_VI_D9_PL7, "VI_D9 PL7"), PINCTRL_PIN(TEGRA_PIN_LCD_D16_PM0, "LCD_D16 PM0"), PINCTRL_PIN(TEGRA_PIN_LCD_D17_PM1, "LCD_D17 PM1"), PINCTRL_PIN(TEGRA_PIN_LCD_D18_PM2, "LCD_D18 PM2"), PINCTRL_PIN(TEGRA_PIN_LCD_D19_PM3, "LCD_D19 PM3"), PINCTRL_PIN(TEGRA_PIN_LCD_D20_PM4, "LCD_D20 PM4"), PINCTRL_PIN(TEGRA_PIN_LCD_D21_PM5, "LCD_D21 PM5"), PINCTRL_PIN(TEGRA_PIN_LCD_D22_PM6, "LCD_D22 PM6"), PINCTRL_PIN(TEGRA_PIN_LCD_D23_PM7, "LCD_D23 PM7"), PINCTRL_PIN(TEGRA_PIN_DAP1_FS_PN0, "DAP1_FS PN0"), PINCTRL_PIN(TEGRA_PIN_DAP1_DIN_PN1, "DAP1_DIN PN1"), PINCTRL_PIN(TEGRA_PIN_DAP1_DOUT_PN2, "DAP1_DOUT PN2"), PINCTRL_PIN(TEGRA_PIN_DAP1_SCLK_PN3, "DAP1_SCLK PN3"), PINCTRL_PIN(TEGRA_PIN_LCD_CS0_N_PN4, "LCD_CS0_N PN4"), PINCTRL_PIN(TEGRA_PIN_LCD_SDOUT_PN5, "LCD_SDOUT PN5"), PINCTRL_PIN(TEGRA_PIN_LCD_DC0_PN6, "LCD_DC0 PN6"), PINCTRL_PIN(TEGRA_PIN_HDMI_INT_PN7, "HDMI_INT PN7"), PINCTRL_PIN(TEGRA_PIN_ULPI_DATA7_PO0, "ULPI_DATA7 PO0"), PINCTRL_PIN(TEGRA_PIN_ULPI_DATA0_PO1, "ULPI_DATA0 PO1"), PINCTRL_PIN(TEGRA_PIN_ULPI_DATA1_PO2, "ULPI_DATA1 PO2"), PINCTRL_PIN(TEGRA_PIN_ULPI_DATA2_PO3, "ULPI_DATA2 PO3"), PINCTRL_PIN(TEGRA_PIN_ULPI_DATA3_PO4, "ULPI_DATA3 PO4"), PINCTRL_PIN(TEGRA_PIN_ULPI_DATA4_PO5, "ULPI_DATA4 PO5"), PINCTRL_PIN(TEGRA_PIN_ULPI_DATA5_PO6, "ULPI_DATA5 PO6"), PINCTRL_PIN(TEGRA_PIN_ULPI_DATA6_PO7, "ULPI_DATA6 PO7"), PINCTRL_PIN(TEGRA_PIN_DAP3_FS_PP0, "DAP3_FS PP0"), PINCTRL_PIN(TEGRA_PIN_DAP3_DIN_PP1, "DAP3_DIN PP1"), PINCTRL_PIN(TEGRA_PIN_DAP3_DOUT_PP2, "DAP3_DOUT PP2"), PINCTRL_PIN(TEGRA_PIN_DAP3_SCLK_PP3, "DAP3_SCLK PP3"), PINCTRL_PIN(TEGRA_PIN_DAP4_FS_PP4, "DAP4_FS PP4"), PINCTRL_PIN(TEGRA_PIN_DAP4_DIN_PP5, "DAP4_DIN PP5"), PINCTRL_PIN(TEGRA_PIN_DAP4_DOUT_PP6, "DAP4_DOUT PP6"), PINCTRL_PIN(TEGRA_PIN_DAP4_SCLK_PP7, "DAP4_SCLK PP7"), PINCTRL_PIN(TEGRA_PIN_KB_COL0_PQ0, "KB_COL0 PQ0"), PINCTRL_PIN(TEGRA_PIN_KB_COL1_PQ1, "KB_COL1 PQ1"), PINCTRL_PIN(TEGRA_PIN_KB_COL2_PQ2, "KB_COL2 PQ2"), PINCTRL_PIN(TEGRA_PIN_KB_COL3_PQ3, "KB_COL3 PQ3"), PINCTRL_PIN(TEGRA_PIN_KB_COL4_PQ4, "KB_COL4 PQ4"), PINCTRL_PIN(TEGRA_PIN_KB_COL5_PQ5, "KB_COL5 PQ5"), PINCTRL_PIN(TEGRA_PIN_KB_COL6_PQ6, "KB_COL6 PQ6"), PINCTRL_PIN(TEGRA_PIN_KB_COL7_PQ7, "KB_COL7 PQ7"), PINCTRL_PIN(TEGRA_PIN_KB_ROW0_PR0, "KB_ROW0 PR0"), PINCTRL_PIN(TEGRA_PIN_KB_ROW1_PR1, "KB_ROW1 PR1"), PINCTRL_PIN(TEGRA_PIN_KB_ROW2_PR2, "KB_ROW2 PR2"), PINCTRL_PIN(TEGRA_PIN_KB_ROW3_PR3, "KB_ROW3 PR3"), PINCTRL_PIN(TEGRA_PIN_KB_ROW4_PR4, "KB_ROW4 PR4"), PINCTRL_PIN(TEGRA_PIN_KB_ROW5_PR5, "KB_ROW5 PR5"), PINCTRL_PIN(TEGRA_PIN_KB_ROW6_PR6, "KB_ROW6 PR6"), PINCTRL_PIN(TEGRA_PIN_KB_ROW7_PR7, "KB_ROW7 PR7"), PINCTRL_PIN(TEGRA_PIN_KB_ROW8_PS0, "KB_ROW8 PS0"), PINCTRL_PIN(TEGRA_PIN_KB_ROW9_PS1, "KB_ROW9 PS1"), PINCTRL_PIN(TEGRA_PIN_KB_ROW10_PS2, "KB_ROW10 PS2"), PINCTRL_PIN(TEGRA_PIN_KB_ROW11_PS3, "KB_ROW11 PS3"), PINCTRL_PIN(TEGRA_PIN_KB_ROW12_PS4, "KB_ROW12 PS4"), PINCTRL_PIN(TEGRA_PIN_KB_ROW13_PS5, "KB_ROW13 PS5"), PINCTRL_PIN(TEGRA_PIN_KB_ROW14_PS6, "KB_ROW14 PS6"), PINCTRL_PIN(TEGRA_PIN_KB_ROW15_PS7, "KB_ROW15 PS7"), PINCTRL_PIN(TEGRA_PIN_VI_PCLK_PT0, "VI_PCLK PT0"), PINCTRL_PIN(TEGRA_PIN_VI_MCLK_PT1, "VI_MCLK PT1"), PINCTRL_PIN(TEGRA_PIN_VI_D10_PT2, "VI_D10 PT2"), PINCTRL_PIN(TEGRA_PIN_VI_D11_PT3, "VI_D11 PT3"), PINCTRL_PIN(TEGRA_PIN_VI_D0_PT4, "VI_D0 PT4"), PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SCL_PT5, "GEN2_I2C_SCL PT5"), PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SDA_PT6, "GEN2_I2C_SDA PT6"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_CMD_PT7, "SDMMC4_CMD PT7"), PINCTRL_PIN(TEGRA_PIN_PU0, "PU0"), PINCTRL_PIN(TEGRA_PIN_PU1, "PU1"), PINCTRL_PIN(TEGRA_PIN_PU2, "PU2"), PINCTRL_PIN(TEGRA_PIN_PU3, "PU3"), PINCTRL_PIN(TEGRA_PIN_PU4, "PU4"), PINCTRL_PIN(TEGRA_PIN_PU5, "PU5"), PINCTRL_PIN(TEGRA_PIN_PU6, "PU6"), PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK_PU7, "JTAG_RTCK PU7"), PINCTRL_PIN(TEGRA_PIN_PV0, "PV0"), PINCTRL_PIN(TEGRA_PIN_PV1, "PV1"), PINCTRL_PIN(TEGRA_PIN_PV2, "PV2"), PINCTRL_PIN(TEGRA_PIN_PV3, "PV3"), PINCTRL_PIN(TEGRA_PIN_DDC_SCL_PV4, "DDC_SCL PV4"), PINCTRL_PIN(TEGRA_PIN_DDC_SDA_PV5, "DDC_SDA PV5"), PINCTRL_PIN(TEGRA_PIN_CRT_HSYNC_PV6, "CRT_HSYNC PV6"), PINCTRL_PIN(TEGRA_PIN_CRT_VSYNC_PV7, "CRT_VSYNC PV7"), PINCTRL_PIN(TEGRA_PIN_LCD_CS1_N_PW0, "LCD_CS1_N PW0"), PINCTRL_PIN(TEGRA_PIN_LCD_M1_PW1, "LCD_M1 PW1"), PINCTRL_PIN(TEGRA_PIN_SPI2_CS1_N_PW2, "SPI2_CS1_N PW2"), PINCTRL_PIN(TEGRA_PIN_SPI2_CS2_N_PW3, "SPI2_CS2_N PW3"), PINCTRL_PIN(TEGRA_PIN_CLK1_OUT_PW4, "CLK1_OUT PW4"), PINCTRL_PIN(TEGRA_PIN_CLK2_OUT_PW5, "CLK2_OUT PW5"), PINCTRL_PIN(TEGRA_PIN_UART3_TXD_PW6, "UART3_TXD PW6"), PINCTRL_PIN(TEGRA_PIN_UART3_RXD_PW7, "UART3_RXD PW7"), PINCTRL_PIN(TEGRA_PIN_SPI2_MOSI_PX0, "SPI2_MOSI PX0"), PINCTRL_PIN(TEGRA_PIN_SPI2_MISO_PX1, "SPI2_MISO PX1"), PINCTRL_PIN(TEGRA_PIN_SPI2_SCK_PX2, "SPI2_SCK PX2"), PINCTRL_PIN(TEGRA_PIN_SPI2_CS0_N_PX3, "SPI2_CS0_N PX3"), PINCTRL_PIN(TEGRA_PIN_SPI1_MOSI_PX4, "SPI1_MOSI PX4"), PINCTRL_PIN(TEGRA_PIN_SPI1_SCK_PX5, "SPI1_SCK PX5"), PINCTRL_PIN(TEGRA_PIN_SPI1_CS0_N_PX6, "SPI1_CS0_N PX6"), PINCTRL_PIN(TEGRA_PIN_SPI1_MISO_PX7, "SPI1_MISO PX7"), PINCTRL_PIN(TEGRA_PIN_ULPI_CLK_PY0, "ULPI_CLK PY0"), PINCTRL_PIN(TEGRA_PIN_ULPI_DIR_PY1, "ULPI_DIR PY1"), PINCTRL_PIN(TEGRA_PIN_ULPI_NXT_PY2, "ULPI_NXT PY2"), PINCTRL_PIN(TEGRA_PIN_ULPI_STP_PY3, "ULPI_STP PY3"), PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT3_PY4, "SDMMC1_DAT3 PY4"), PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT2_PY5, "SDMMC1_DAT2 PY5"), PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT1_PY6, "SDMMC1_DAT1 PY6"), PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT0_PY7, "SDMMC1_DAT0 PY7"), PINCTRL_PIN(TEGRA_PIN_SDMMC1_CLK_PZ0, "SDMMC1_CLK PZ0"), PINCTRL_PIN(TEGRA_PIN_SDMMC1_CMD_PZ1, "SDMMC1_CMD PZ1"), PINCTRL_PIN(TEGRA_PIN_LCD_SDIN_PZ2, "LCD_SDIN PZ2"), PINCTRL_PIN(TEGRA_PIN_LCD_WR_N_PZ3, "LCD_WR_N PZ3"), PINCTRL_PIN(TEGRA_PIN_LCD_SCK_PZ4, "LCD_SCK PZ4"), PINCTRL_PIN(TEGRA_PIN_SYS_CLK_REQ_PZ5, "SYS_CLK_REQ PZ5"), PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SCL_PZ6, "PWR_I2C_SCL PZ6"), PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SDA_PZ7, "PWR_I2C_SDA PZ7"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT0_PAA0, "SDMMC4_DAT0 PAA0"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT1_PAA1, "SDMMC4_DAT1 PAA1"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT2_PAA2, "SDMMC4_DAT2 PAA2"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT3_PAA3, "SDMMC4_DAT3 PAA3"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT4_PAA4, "SDMMC4_DAT4 PAA4"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT5_PAA5, "SDMMC4_DAT5 PAA5"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT6_PAA6, "SDMMC4_DAT6 PAA6"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT7_PAA7, "SDMMC4_DAT7 PAA7"), PINCTRL_PIN(TEGRA_PIN_PBB0, "PBB0"), PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SCL_PBB1, "CAM_I2C_SCL PBB1"), PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SDA_PBB2, "CAM_I2C_SDA PBB2"), PINCTRL_PIN(TEGRA_PIN_PBB3, "PBB3"), PINCTRL_PIN(TEGRA_PIN_PBB4, "PBB4"), PINCTRL_PIN(TEGRA_PIN_PBB5, "PBB5"), PINCTRL_PIN(TEGRA_PIN_PBB6, "PBB6"), PINCTRL_PIN(TEGRA_PIN_PBB7, "PBB7"), PINCTRL_PIN(TEGRA_PIN_CAM_MCLK_PCC0, "CAM_MCLK PCC0"), PINCTRL_PIN(TEGRA_PIN_PCC1, "PCC1"), PINCTRL_PIN(TEGRA_PIN_PCC2, "PCC2"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_RST_N_PCC3, "SDMMC4_RST_N PCC3"), PINCTRL_PIN(TEGRA_PIN_SDMMC4_CLK_PCC4, "SDMMC4_CLK PCC4"), PINCTRL_PIN(TEGRA_PIN_CLK2_REQ_PCC5, "CLK2_REQ PCC5"), PINCTRL_PIN(TEGRA_PIN_PEX_L2_RST_N_PCC6, "PEX_L2_RST_N PCC6"), PINCTRL_PIN(TEGRA_PIN_PEX_L2_CLKREQ_N_PCC7, "PEX_L2_CLKREQ_N PCC7"), PINCTRL_PIN(TEGRA_PIN_PEX_L0_PRSNT_N_PDD0, "PEX_L0_PRSNT_N PDD0"), PINCTRL_PIN(TEGRA_PIN_PEX_L0_RST_N_PDD1, "PEX_L0_RST_N PDD1"), PINCTRL_PIN(TEGRA_PIN_PEX_L0_CLKREQ_N_PDD2, "PEX_L0_CLKREQ_N PDD2"), PINCTRL_PIN(TEGRA_PIN_PEX_WAKE_N_PDD3, "PEX_WAKE_N PDD3"), PINCTRL_PIN(TEGRA_PIN_PEX_L1_PRSNT_N_PDD4, "PEX_L1_PRSNT_N PDD4"), PINCTRL_PIN(TEGRA_PIN_PEX_L1_RST_N_PDD5, "PEX_L1_RST_N PDD5"), PINCTRL_PIN(TEGRA_PIN_PEX_L1_CLKREQ_N_PDD6, "PEX_L1_CLKREQ_N PDD6"), PINCTRL_PIN(TEGRA_PIN_PEX_L2_PRSNT_N_PDD7, "PEX_L2_PRSNT_N PDD7"), PINCTRL_PIN(TEGRA_PIN_CLK3_OUT_PEE0, "CLK3_OUT PEE0"), PINCTRL_PIN(TEGRA_PIN_CLK3_REQ_PEE1, "CLK3_REQ PEE1"), PINCTRL_PIN(TEGRA_PIN_CLK1_REQ_PEE2, "CLK1_REQ PEE2"), PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PEE3, "HDMI_CEC PEE3"), PINCTRL_PIN(TEGRA_PIN_PEE4, "PEE4"), PINCTRL_PIN(TEGRA_PIN_PEE5, "PEE5"), PINCTRL_PIN(TEGRA_PIN_PEE6, "PEE6"), PINCTRL_PIN(TEGRA_PIN_PEE7, "PEE7"), PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"), PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"), PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"), PINCTRL_PIN(TEGRA_PIN_JTAG_TCK, "JTAG_TCK"), PINCTRL_PIN(TEGRA_PIN_JTAG_TDI, "JTAG_TDI"), PINCTRL_PIN(TEGRA_PIN_JTAG_TDO, "JTAG_TDO"), PINCTRL_PIN(TEGRA_PIN_JTAG_TMS, "JTAG_TMS"), PINCTRL_PIN(TEGRA_PIN_JTAG_TRST_N, "JTAG_TRST_N"), PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"), PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"), PINCTRL_PIN(TEGRA_PIN_SYS_RESET_N, "SYS_RESET_N"), PINCTRL_PIN(TEGRA_PIN_TEST_MODE_EN, "TEST_MODE_EN"), }; static const unsigned clk_32k_out_pa0_pins[] = { TEGRA_PIN_CLK_32K_OUT_PA0, }; static const unsigned uart3_cts_n_pa1_pins[] = { TEGRA_PIN_UART3_CTS_N_PA1, }; static const unsigned dap2_fs_pa2_pins[] = { TEGRA_PIN_DAP2_FS_PA2, }; static const unsigned dap2_sclk_pa3_pins[] = { TEGRA_PIN_DAP2_SCLK_PA3, }; static const unsigned dap2_din_pa4_pins[] = { TEGRA_PIN_DAP2_DIN_PA4, }; static const unsigned dap2_dout_pa5_pins[] = { TEGRA_PIN_DAP2_DOUT_PA5, }; static const unsigned sdmmc3_clk_pa6_pins[] = { TEGRA_PIN_SDMMC3_CLK_PA6, }; static const unsigned sdmmc3_cmd_pa7_pins[] = { TEGRA_PIN_SDMMC3_CMD_PA7, }; static const unsigned gmi_a17_pb0_pins[] = { TEGRA_PIN_GMI_A17_PB0, }; static const unsigned gmi_a18_pb1_pins[] = { TEGRA_PIN_GMI_A18_PB1, }; static const unsigned lcd_pwr0_pb2_pins[] = { TEGRA_PIN_LCD_PWR0_PB2, }; static const unsigned lcd_pclk_pb3_pins[] = { TEGRA_PIN_LCD_PCLK_PB3, }; static const unsigned sdmmc3_dat3_pb4_pins[] = { TEGRA_PIN_SDMMC3_DAT3_PB4, }; static const unsigned sdmmc3_dat2_pb5_pins[] = { TEGRA_PIN_SDMMC3_DAT2_PB5, }; static const unsigned sdmmc3_dat1_pb6_pins[] = { TEGRA_PIN_SDMMC3_DAT1_PB6, }; static const unsigned sdmmc3_dat0_pb7_pins[] = { TEGRA_PIN_SDMMC3_DAT0_PB7, }; static const unsigned uart3_rts_n_pc0_pins[] = { TEGRA_PIN_UART3_RTS_N_PC0, }; static const unsigned lcd_pwr1_pc1_pins[] = { TEGRA_PIN_LCD_PWR1_PC1, }; static const unsigned uart2_txd_pc2_pins[] = { TEGRA_PIN_UART2_TXD_PC2, }; static const unsigned uart2_rxd_pc3_pins[] = { TEGRA_PIN_UART2_RXD_PC3, }; static const unsigned gen1_i2c_scl_pc4_pins[] = { TEGRA_PIN_GEN1_I2C_SCL_PC4, }; static const unsigned gen1_i2c_sda_pc5_pins[] = { TEGRA_PIN_GEN1_I2C_SDA_PC5, }; static const unsigned lcd_pwr2_pc6_pins[] = { TEGRA_PIN_LCD_PWR2_PC6, }; static const unsigned gmi_wp_n_pc7_pins[] = { TEGRA_PIN_GMI_WP_N_PC7, }; static const unsigned sdmmc3_dat5_pd0_pins[] = { TEGRA_PIN_SDMMC3_DAT5_PD0, }; static const unsigned sdmmc3_dat4_pd1_pins[] = { TEGRA_PIN_SDMMC3_DAT4_PD1, }; static const unsigned lcd_dc1_pd2_pins[] = { TEGRA_PIN_LCD_DC1_PD2, }; static const unsigned sdmmc3_dat6_pd3_pins[] = { TEGRA_PIN_SDMMC3_DAT6_PD3, }; static const unsigned sdmmc3_dat7_pd4_pins[] = { TEGRA_PIN_SDMMC3_DAT7_PD4, }; static const unsigned vi_d1_pd5_pins[] = { TEGRA_PIN_VI_D1_PD5, }; static const unsigned vi_vsync_pd6_pins[] = { TEGRA_PIN_VI_VSYNC_PD6, }; static const unsigned vi_hsync_pd7_pins[] = { TEGRA_PIN_VI_HSYNC_PD7, }; static const unsigned lcd_d0_pe0_pins[] = { TEGRA_PIN_LCD_D0_PE0, }; static const unsigned lcd_d1_pe1_pins[] = { TEGRA_PIN_LCD_D1_PE1, }; static const unsigned lcd_d2_pe2_pins[] = { TEGRA_PIN_LCD_D2_PE2, }; static const unsigned lcd_d3_pe3_pins[] = { TEGRA_PIN_LCD_D3_PE3, }; static const unsigned lcd_d4_pe4_pins[] = { TEGRA_PIN_LCD_D4_PE4, }; static const unsigned lcd_d5_pe5_pins[] = { TEGRA_PIN_LCD_D5_PE5, }; static const unsigned lcd_d6_pe6_pins[] = { TEGRA_PIN_LCD_D6_PE6, }; static const unsigned lcd_d7_pe7_pins[] = { TEGRA_PIN_LCD_D7_PE7, }; static const unsigned lcd_d8_pf0_pins[] = { TEGRA_PIN_LCD_D8_PF0, }; static const unsigned lcd_d9_pf1_pins[] = { TEGRA_PIN_LCD_D9_PF1, }; static const unsigned lcd_d10_pf2_pins[] = { TEGRA_PIN_LCD_D10_PF2, }; static const unsigned lcd_d11_pf3_pins[] = { TEGRA_PIN_LCD_D11_PF3, }; static const unsigned lcd_d12_pf4_pins[] = { TEGRA_PIN_LCD_D12_PF4, }; static const unsigned lcd_d13_pf5_pins[] = { TEGRA_PIN_LCD_D13_PF5, }; static const unsigned lcd_d14_pf6_pins[] = { TEGRA_PIN_LCD_D14_PF6, }; static const unsigned lcd_d15_pf7_pins[] = { TEGRA_PIN_LCD_D15_PF7, }; static const unsigned gmi_ad0_pg0_pins[] = { TEGRA_PIN_GMI_AD0_PG0, }; static const unsigned gmi_ad1_pg1_pins[] = { TEGRA_PIN_GMI_AD1_PG1, }; static const unsigned gmi_ad2_pg2_pins[] = { TEGRA_PIN_GMI_AD2_PG2, }; static const unsigned gmi_ad3_pg3_pins[] = { TEGRA_PIN_GMI_AD3_PG3, }; static const unsigned gmi_ad4_pg4_pins[] = { TEGRA_PIN_GMI_AD4_PG4, }; static const unsigned gmi_ad5_pg5_pins[] = { TEGRA_PIN_GMI_AD5_PG5, }; static const unsigned gmi_ad6_pg6_pins[] = { TEGRA_PIN_GMI_AD6_PG6, }; static const unsigned gmi_ad7_pg7_pins[] = { TEGRA_PIN_GMI_AD7_PG7, }; static const unsigned gmi_ad8_ph0_pins[] = { TEGRA_PIN_GMI_AD8_PH0, }; static const unsigned gmi_ad9_ph1_pins[] = { TEGRA_PIN_GMI_AD9_PH1, }; static const unsigned gmi_ad10_ph2_pins[] = { TEGRA_PIN_GMI_AD10_PH2, }; static const unsigned gmi_ad11_ph3_pins[] = { TEGRA_PIN_GMI_AD11_PH3, }; static const unsigned gmi_ad12_ph4_pins[] = { TEGRA_PIN_GMI_AD12_PH4, }; static const unsigned gmi_ad13_ph5_pins[] = { TEGRA_PIN_GMI_AD13_PH5, }; static const unsigned gmi_ad14_ph6_pins[] = { TEGRA_PIN_GMI_AD14_PH6, }; static const unsigned gmi_ad15_ph7_pins[] = { TEGRA_PIN_GMI_AD15_PH7, }; static const unsigned gmi_wr_n_pi0_pins[] = { TEGRA_PIN_GMI_WR_N_PI0, }; static const unsigned gmi_oe_n_pi1_pins[] = { TEGRA_PIN_GMI_OE_N_PI1, }; static const unsigned gmi_dqs_pi2_pins[] = { TEGRA_PIN_GMI_DQS_PI2, }; static const unsigned gmi_cs6_n_pi3_pins[] = { TEGRA_PIN_GMI_CS6_N_PI3, }; static const unsigned gmi_rst_n_pi4_pins[] = { TEGRA_PIN_GMI_RST_N_PI4, }; static const unsigned gmi_iordy_pi5_pins[] = { TEGRA_PIN_GMI_IORDY_PI5, }; static const unsigned gmi_cs7_n_pi6_pins[] = { TEGRA_PIN_GMI_CS7_N_PI6, }; static const unsigned gmi_wait_pi7_pins[] = { TEGRA_PIN_GMI_WAIT_PI7, }; static const unsigned gmi_cs0_n_pj0_pins[] = { TEGRA_PIN_GMI_CS0_N_PJ0, }; static const unsigned lcd_de_pj1_pins[] = { TEGRA_PIN_LCD_DE_PJ1, }; static const unsigned gmi_cs1_n_pj2_pins[] = { TEGRA_PIN_GMI_CS1_N_PJ2, }; static const unsigned lcd_hsync_pj3_pins[] = { TEGRA_PIN_LCD_HSYNC_PJ3, }; static const unsigned lcd_vsync_pj4_pins[] = { TEGRA_PIN_LCD_VSYNC_PJ4, }; static const unsigned uart2_cts_n_pj5_pins[] = { TEGRA_PIN_UART2_CTS_N_PJ5, }; static const unsigned uart2_rts_n_pj6_pins[] = { TEGRA_PIN_UART2_RTS_N_PJ6, }; static const unsigned gmi_a16_pj7_pins[] = { TEGRA_PIN_GMI_A16_PJ7, }; static const unsigned gmi_adv_n_pk0_pins[] = { TEGRA_PIN_GMI_ADV_N_PK0, }; static const unsigned gmi_clk_pk1_pins[] = { TEGRA_PIN_GMI_CLK_PK1, }; static const unsigned gmi_cs4_n_pk2_pins[] = { TEGRA_PIN_GMI_CS4_N_PK2, }; static const unsigned gmi_cs2_n_pk3_pins[] = { TEGRA_PIN_GMI_CS2_N_PK3, }; static const unsigned gmi_cs3_n_pk4_pins[] = { TEGRA_PIN_GMI_CS3_N_PK4, }; static const unsigned spdif_out_pk5_pins[] = { TEGRA_PIN_SPDIF_OUT_PK5, }; static const unsigned spdif_in_pk6_pins[] = { TEGRA_PIN_SPDIF_IN_PK6, }; static const unsigned gmi_a19_pk7_pins[] = { TEGRA_PIN_GMI_A19_PK7, }; static const unsigned vi_d2_pl0_pins[] = { TEGRA_PIN_VI_D2_PL0, }; static const unsigned vi_d3_pl1_pins[] = { TEGRA_PIN_VI_D3_PL1, }; static const unsigned vi_d4_pl2_pins[] = { TEGRA_PIN_VI_D4_PL2, }; static const unsigned vi_d5_pl3_pins[] = { TEGRA_PIN_VI_D5_PL3, }; static const unsigned vi_d6_pl4_pins[] = { TEGRA_PIN_VI_D6_PL4, }; static const unsigned vi_d7_pl5_pins[] = { TEGRA_PIN_VI_D7_PL5, }; static const unsigned vi_d8_pl6_pins[] = { TEGRA_PIN_VI_D8_PL6, }; static const unsigned vi_d9_pl7_pins[] = { TEGRA_PIN_VI_D9_PL7, }; static const unsigned lcd_d16_pm0_pins[] = { TEGRA_PIN_LCD_D16_PM0, }; static const unsigned lcd_d17_pm1_pins[] = { TEGRA_PIN_LCD_D17_PM1, }; static const unsigned lcd_d18_pm2_pins[] = { TEGRA_PIN_LCD_D18_PM2, }; static const unsigned lcd_d19_pm3_pins[] = { TEGRA_PIN_LCD_D19_PM3, }; static const unsigned lcd_d20_pm4_pins[] = { TEGRA_PIN_LCD_D20_PM4, }; static const unsigned lcd_d21_pm5_pins[] = { TEGRA_PIN_LCD_D21_PM5, }; static const unsigned lcd_d22_pm6_pins[] = { TEGRA_PIN_LCD_D22_PM6, }; static const unsigned lcd_d23_pm7_pins[] = { TEGRA_PIN_LCD_D23_PM7, }; static const unsigned dap1_fs_pn0_pins[] = { TEGRA_PIN_DAP1_FS_PN0, }; static const unsigned dap1_din_pn1_pins[] = { TEGRA_PIN_DAP1_DIN_PN1, }; static const unsigned dap1_dout_pn2_pins[] = { TEGRA_PIN_DAP1_DOUT_PN2, }; static const unsigned dap1_sclk_pn3_pins[] = { TEGRA_PIN_DAP1_SCLK_PN3, }; static const unsigned lcd_cs0_n_pn4_pins[] = { TEGRA_PIN_LCD_CS0_N_PN4, }; static const unsigned lcd_sdout_pn5_pins[] = { TEGRA_PIN_LCD_SDOUT_PN5, }; static const unsigned lcd_dc0_pn6_pins[] = { TEGRA_PIN_LCD_DC0_PN6, }; static const unsigned hdmi_int_pn7_pins[] = { TEGRA_PIN_HDMI_INT_PN7, }; static const unsigned ulpi_data7_po0_pins[] = { TEGRA_PIN_ULPI_DATA7_PO0, }; static const unsigned ulpi_data0_po1_pins[] = { TEGRA_PIN_ULPI_DATA0_PO1, }; static const unsigned ulpi_data1_po2_pins[] = { TEGRA_PIN_ULPI_DATA1_PO2, }; static const unsigned ulpi_data2_po3_pins[] = { TEGRA_PIN_ULPI_DATA2_PO3, }; static const unsigned ulpi_data3_po4_pins[] = { TEGRA_PIN_ULPI_DATA3_PO4, }; static const unsigned ulpi_data4_po5_pins[] = { TEGRA_PIN_ULPI_DATA4_PO5, }; static const unsigned ulpi_data5_po6_pins[] = { TEGRA_PIN_ULPI_DATA5_PO6, }; static const unsigned ulpi_data6_po7_pins[] = { TEGRA_PIN_ULPI_DATA6_PO7, }; static const unsigned dap3_fs_pp0_pins[] = { TEGRA_PIN_DAP3_FS_PP0, }; static const unsigned dap3_din_pp1_pins[] = { TEGRA_PIN_DAP3_DIN_PP1, }; static const unsigned dap3_dout_pp2_pins[] = { TEGRA_PIN_DAP3_DOUT_PP2, }; static const unsigned dap3_sclk_pp3_pins[] = { TEGRA_PIN_DAP3_SCLK_PP3, }; static const unsigned dap4_fs_pp4_pins[] = { TEGRA_PIN_DAP4_FS_PP4, }; static const unsigned dap4_din_pp5_pins[] = { TEGRA_PIN_DAP4_DIN_PP5, }; static const unsigned dap4_dout_pp6_pins[] = { TEGRA_PIN_DAP4_DOUT_PP6, }; static const unsigned dap4_sclk_pp7_pins[] = { TEGRA_PIN_DAP4_SCLK_PP7, }; static const unsigned kb_col0_pq0_pins[] = { TEGRA_PIN_KB_COL0_PQ0, }; static const unsigned kb_col1_pq1_pins[] = { TEGRA_PIN_KB_COL1_PQ1, }; static const unsigned kb_col2_pq2_pins[] = { TEGRA_PIN_KB_COL2_PQ2, }; static const unsigned kb_col3_pq3_pins[] = { TEGRA_PIN_KB_COL3_PQ3, }; static const unsigned kb_col4_pq4_pins[] = { TEGRA_PIN_KB_COL4_PQ4, }; static const unsigned kb_col5_pq5_pins[] = { TEGRA_PIN_KB_COL5_PQ5, }; static const unsigned kb_col6_pq6_pins[] = { TEGRA_PIN_KB_COL6_PQ6, }; static const unsigned kb_col7_pq7_pins[] = { TEGRA_PIN_KB_COL7_PQ7, }; static const unsigned kb_row0_pr0_pins[] = { TEGRA_PIN_KB_ROW0_PR0, }; static const unsigned kb_row1_pr1_pins[] = { TEGRA_PIN_KB_ROW1_PR1, }; static const unsigned kb_row2_pr2_pins[] = { TEGRA_PIN_KB_ROW2_PR2, }; static const unsigned kb_row3_pr3_pins[] = { TEGRA_PIN_KB_ROW3_PR3, }; static const unsigned kb_row4_pr4_pins[] = { TEGRA_PIN_KB_ROW4_PR4, }; static const unsigned kb_row5_pr5_pins[] = { TEGRA_PIN_KB_ROW5_PR5, }; static const unsigned kb_row6_pr6_pins[] = { TEGRA_PIN_KB_ROW6_PR6, }; static const unsigned kb_row7_pr7_pins[] = { TEGRA_PIN_KB_ROW7_PR7, }; static const unsigned kb_row8_ps0_pins[] = { TEGRA_PIN_KB_ROW8_PS0, }; static const unsigned kb_row9_ps1_pins[] = { TEGRA_PIN_KB_ROW9_PS1, }; static const unsigned kb_row10_ps2_pins[] = { TEGRA_PIN_KB_ROW10_PS2, }; static const unsigned kb_row11_ps3_pins[] = { TEGRA_PIN_KB_ROW11_PS3, }; static const unsigned kb_row12_ps4_pins[] = { TEGRA_PIN_KB_ROW12_PS4, }; static const unsigned kb_row13_ps5_pins[] = { TEGRA_PIN_KB_ROW13_PS5, }; static const unsigned kb_row14_ps6_pins[] = { TEGRA_PIN_KB_ROW14_PS6, }; static const unsigned kb_row15_ps7_pins[] = { TEGRA_PIN_KB_ROW15_PS7, }; static const unsigned vi_pclk_pt0_pins[] = { TEGRA_PIN_VI_PCLK_PT0, }; static const unsigned vi_mclk_pt1_pins[] = { TEGRA_PIN_VI_MCLK_PT1, }; static const unsigned vi_d10_pt2_pins[] = { TEGRA_PIN_VI_D10_PT2, }; static const unsigned vi_d11_pt3_pins[] = { TEGRA_PIN_VI_D11_PT3, }; static const unsigned vi_d0_pt4_pins[] = { TEGRA_PIN_VI_D0_PT4, }; static const unsigned gen2_i2c_scl_pt5_pins[] = { TEGRA_PIN_GEN2_I2C_SCL_PT5, }; static const unsigned gen2_i2c_sda_pt6_pins[] = { TEGRA_PIN_GEN2_I2C_SDA_PT6, }; static const unsigned sdmmc4_cmd_pt7_pins[] = { TEGRA_PIN_SDMMC4_CMD_PT7, }; static const unsigned pu0_pins[] = { TEGRA_PIN_PU0, }; static const unsigned pu1_pins[] = { TEGRA_PIN_PU1, }; static const unsigned pu2_pins[] = { TEGRA_PIN_PU2, }; static const unsigned pu3_pins[] = { TEGRA_PIN_PU3, }; static const unsigned pu4_pins[] = { TEGRA_PIN_PU4, }; static const unsigned pu5_pins[] = { TEGRA_PIN_PU5, }; static const unsigned pu6_pins[] = { TEGRA_PIN_PU6, }; static const unsigned jtag_rtck_pu7_pins[] = { TEGRA_PIN_JTAG_RTCK_PU7, }; static const unsigned pv0_pins[] = { TEGRA_PIN_PV0, }; static const unsigned pv1_pins[] = { TEGRA_PIN_PV1, }; static const unsigned pv2_pins[] = { TEGRA_PIN_PV2, }; static const unsigned pv3_pins[] = { TEGRA_PIN_PV3, }; static const unsigned ddc_scl_pv4_pins[] = { TEGRA_PIN_DDC_SCL_PV4, }; static const unsigned ddc_sda_pv5_pins[] = { TEGRA_PIN_DDC_SDA_PV5, }; static const unsigned crt_hsync_pv6_pins[] = { TEGRA_PIN_CRT_HSYNC_PV6, }; static const unsigned crt_vsync_pv7_pins[] = { TEGRA_PIN_CRT_VSYNC_PV7, }; static const unsigned lcd_cs1_n_pw0_pins[] = { TEGRA_PIN_LCD_CS1_N_PW0, }; static const unsigned lcd_m1_pw1_pins[] = { TEGRA_PIN_LCD_M1_PW1, }; static const unsigned spi2_cs1_n_pw2_pins[] = { TEGRA_PIN_SPI2_CS1_N_PW2, }; static const unsigned spi2_cs2_n_pw3_pins[] = { TEGRA_PIN_SPI2_CS2_N_PW3, }; static const unsigned clk1_out_pw4_pins[] = { TEGRA_PIN_CLK1_OUT_PW4, }; static const unsigned clk2_out_pw5_pins[] = { TEGRA_PIN_CLK2_OUT_PW5, }; static const unsigned uart3_txd_pw6_pins[] = { TEGRA_PIN_UART3_TXD_PW6, }; static const unsigned uart3_rxd_pw7_pins[] = { TEGRA_PIN_UART3_RXD_PW7, }; static const unsigned spi2_mosi_px0_pins[] = { TEGRA_PIN_SPI2_MOSI_PX0, }; static const unsigned spi2_miso_px1_pins[] = { TEGRA_PIN_SPI2_MISO_PX1, }; static const unsigned spi2_sck_px2_pins[] = { TEGRA_PIN_SPI2_SCK_PX2, }; static const unsigned spi2_cs0_n_px3_pins[] = { TEGRA_PIN_SPI2_CS0_N_PX3, }; static const unsigned spi1_mosi_px4_pins[] = { TEGRA_PIN_SPI1_MOSI_PX4, }; static const unsigned spi1_sck_px5_pins[] = { TEGRA_PIN_SPI1_SCK_PX5, }; static const unsigned spi1_cs0_n_px6_pins[] = { TEGRA_PIN_SPI1_CS0_N_PX6, }; static const unsigned spi1_miso_px7_pins[] = { TEGRA_PIN_SPI1_MISO_PX7, }; static const unsigned ulpi_clk_py0_pins[] = { TEGRA_PIN_ULPI_CLK_PY0, }; static const unsigned ulpi_dir_py1_pins[] = { TEGRA_PIN_ULPI_DIR_PY1, }; static const unsigned ulpi_nxt_py2_pins[] = { TEGRA_PIN_ULPI_NXT_PY2, }; static const unsigned ulpi_stp_py3_pins[] = { TEGRA_PIN_ULPI_STP_PY3, }; static const unsigned sdmmc1_dat3_py4_pins[] = { TEGRA_PIN_SDMMC1_DAT3_PY4, }; static const unsigned sdmmc1_dat2_py5_pins[] = { TEGRA_PIN_SDMMC1_DAT2_PY5, }; static const unsigned sdmmc1_dat1_py6_pins[] = { TEGRA_PIN_SDMMC1_DAT1_PY6, }; static const unsigned sdmmc1_dat0_py7_pins[] = { TEGRA_PIN_SDMMC1_DAT0_PY7, }; static const unsigned sdmmc1_clk_pz0_pins[] = { TEGRA_PIN_SDMMC1_CLK_PZ0, }; static const unsigned sdmmc1_cmd_pz1_pins[] = { TEGRA_PIN_SDMMC1_CMD_PZ1, }; static const unsigned lcd_sdin_pz2_pins[] = { TEGRA_PIN_LCD_SDIN_PZ2, }; static const unsigned lcd_wr_n_pz3_pins[] = { TEGRA_PIN_LCD_WR_N_PZ3, }; static const unsigned lcd_sck_pz4_pins[] = { TEGRA_PIN_LCD_SCK_PZ4, }; static const unsigned sys_clk_req_pz5_pins[] = { TEGRA_PIN_SYS_CLK_REQ_PZ5, }; static const unsigned pwr_i2c_scl_pz6_pins[] = { TEGRA_PIN_PWR_I2C_SCL_PZ6, }; static const unsigned pwr_i2c_sda_pz7_pins[] = { TEGRA_PIN_PWR_I2C_SDA_PZ7, }; static const unsigned sdmmc4_dat0_paa0_pins[] = { TEGRA_PIN_SDMMC4_DAT0_PAA0, }; static const unsigned sdmmc4_dat1_paa1_pins[] = { TEGRA_PIN_SDMMC4_DAT1_PAA1, }; static const unsigned sdmmc4_dat2_paa2_pins[] = { TEGRA_PIN_SDMMC4_DAT2_PAA2, }; static const unsigned sdmmc4_dat3_paa3_pins[] = { TEGRA_PIN_SDMMC4_DAT3_PAA3, }; static const unsigned sdmmc4_dat4_paa4_pins[] = { TEGRA_PIN_SDMMC4_DAT4_PAA4, }; static const unsigned sdmmc4_dat5_paa5_pins[] = { TEGRA_PIN_SDMMC4_DAT5_PAA5, }; static const unsigned sdmmc4_dat6_paa6_pins[] = { TEGRA_PIN_SDMMC4_DAT6_PAA6, }; static const unsigned sdmmc4_dat7_paa7_pins[] = { TEGRA_PIN_SDMMC4_DAT7_PAA7, }; static const unsigned pbb0_pins[] = { TEGRA_PIN_PBB0, }; static const unsigned cam_i2c_scl_pbb1_pins[] = { TEGRA_PIN_CAM_I2C_SCL_PBB1, }; static const unsigned cam_i2c_sda_pbb2_pins[] = { TEGRA_PIN_CAM_I2C_SDA_PBB2, }; static const unsigned pbb3_pins[] = { TEGRA_PIN_PBB3, }; static const unsigned pbb4_pins[] = { TEGRA_PIN_PBB4, }; static const unsigned pbb5_pins[] = { TEGRA_PIN_PBB5, }; static const unsigned pbb6_pins[] = { TEGRA_PIN_PBB6, }; static const unsigned pbb7_pins[] = { TEGRA_PIN_PBB7, }; static const unsigned cam_mclk_pcc0_pins[] = { TEGRA_PIN_CAM_MCLK_PCC0, }; static const unsigned pcc1_pins[] = { TEGRA_PIN_PCC1, }; static const unsigned pcc2_pins[] = { TEGRA_PIN_PCC2, }; static const unsigned sdmmc4_rst_n_pcc3_pins[] = { TEGRA_PIN_SDMMC4_RST_N_PCC3, }; static const unsigned sdmmc4_clk_pcc4_pins[] = { TEGRA_PIN_SDMMC4_CLK_PCC4, }; static const unsigned clk2_req_pcc5_pins[] = { TEGRA_PIN_CLK2_REQ_PCC5, }; static const unsigned pex_l2_rst_n_pcc6_pins[] = { TEGRA_PIN_PEX_L2_RST_N_PCC6, }; static const unsigned pex_l2_clkreq_n_pcc7_pins[] = { TEGRA_PIN_PEX_L2_CLKREQ_N_PCC7, }; static const unsigned pex_l0_prsnt_n_pdd0_pins[] = { TEGRA_PIN_PEX_L0_PRSNT_N_PDD0, }; static const unsigned pex_l0_rst_n_pdd1_pins[] = { TEGRA_PIN_PEX_L0_RST_N_PDD1, }; static const unsigned pex_l0_clkreq_n_pdd2_pins[] = { TEGRA_PIN_PEX_L0_CLKREQ_N_PDD2, }; static const unsigned pex_wake_n_pdd3_pins[] = { TEGRA_PIN_PEX_WAKE_N_PDD3, }; static const unsigned pex_l1_prsnt_n_pdd4_pins[] = { TEGRA_PIN_PEX_L1_PRSNT_N_PDD4, }; static const unsigned pex_l1_rst_n_pdd5_pins[] = { TEGRA_PIN_PEX_L1_RST_N_PDD5, }; static const unsigned pex_l1_clkreq_n_pdd6_pins[] = { TEGRA_PIN_PEX_L1_CLKREQ_N_PDD6, }; static const unsigned pex_l2_prsnt_n_pdd7_pins[] = { TEGRA_PIN_PEX_L2_PRSNT_N_PDD7, }; static const unsigned clk3_out_pee0_pins[] = { TEGRA_PIN_CLK3_OUT_PEE0, }; static const unsigned clk3_req_pee1_pins[] = { TEGRA_PIN_CLK3_REQ_PEE1, }; static const unsigned clk1_req_pee2_pins[] = { TEGRA_PIN_CLK1_REQ_PEE2, }; static const unsigned hdmi_cec_pee3_pins[] = { TEGRA_PIN_HDMI_CEC_PEE3, }; static const unsigned clk_32k_in_pins[] = { TEGRA_PIN_CLK_32K_IN, }; static const unsigned core_pwr_req_pins[] = { TEGRA_PIN_CORE_PWR_REQ, }; static const unsigned cpu_pwr_req_pins[] = { TEGRA_PIN_CPU_PWR_REQ, }; static const unsigned owr_pins[] = { TEGRA_PIN_OWR, }; static const unsigned pwr_int_n_pins[] = { TEGRA_PIN_PWR_INT_N, }; static const unsigned drive_ao1_pins[] = { TEGRA_PIN_KB_ROW0_PR0, TEGRA_PIN_KB_ROW1_PR1, TEGRA_PIN_KB_ROW2_PR2, TEGRA_PIN_KB_ROW3_PR3, TEGRA_PIN_KB_ROW4_PR4, TEGRA_PIN_KB_ROW5_PR5, TEGRA_PIN_KB_ROW6_PR6, TEGRA_PIN_KB_ROW7_PR7, TEGRA_PIN_PWR_I2C_SCL_PZ6, TEGRA_PIN_PWR_I2C_SDA_PZ7, TEGRA_PIN_SYS_RESET_N, }; static const unsigned drive_ao2_pins[] = { TEGRA_PIN_CLK_32K_OUT_PA0, TEGRA_PIN_KB_COL0_PQ0, TEGRA_PIN_KB_COL1_PQ1, TEGRA_PIN_KB_COL2_PQ2, TEGRA_PIN_KB_COL3_PQ3, TEGRA_PIN_KB_COL4_PQ4, TEGRA_PIN_KB_COL5_PQ5, TEGRA_PIN_KB_COL6_PQ6, TEGRA_PIN_KB_COL7_PQ7, TEGRA_PIN_KB_ROW8_PS0, TEGRA_PIN_KB_ROW9_PS1, TEGRA_PIN_KB_ROW10_PS2, TEGRA_PIN_KB_ROW11_PS3, TEGRA_PIN_KB_ROW12_PS4, TEGRA_PIN_KB_ROW13_PS5, TEGRA_PIN_KB_ROW14_PS6, TEGRA_PIN_KB_ROW15_PS7, TEGRA_PIN_SYS_CLK_REQ_PZ5, TEGRA_PIN_CLK_32K_IN, TEGRA_PIN_CORE_PWR_REQ, TEGRA_PIN_CPU_PWR_REQ, TEGRA_PIN_PWR_INT_N, }; static const unsigned drive_at1_pins[] = { TEGRA_PIN_GMI_AD8_PH0, TEGRA_PIN_GMI_AD9_PH1, TEGRA_PIN_GMI_AD10_PH2, TEGRA_PIN_GMI_AD11_PH3, TEGRA_PIN_GMI_AD12_PH4, TEGRA_PIN_GMI_AD13_PH5, TEGRA_PIN_GMI_AD14_PH6, TEGRA_PIN_GMI_AD15_PH7, TEGRA_PIN_GMI_IORDY_PI5, TEGRA_PIN_GMI_CS7_N_PI6, }; static const unsigned drive_at2_pins[] = { TEGRA_PIN_GMI_AD0_PG0, TEGRA_PIN_GMI_AD1_PG1, TEGRA_PIN_GMI_AD2_PG2, TEGRA_PIN_GMI_AD3_PG3, TEGRA_PIN_GMI_AD4_PG4, TEGRA_PIN_GMI_AD5_PG5, TEGRA_PIN_GMI_AD6_PG6, TEGRA_PIN_GMI_AD7_PG7, TEGRA_PIN_GMI_WR_N_PI0, TEGRA_PIN_GMI_OE_N_PI1, TEGRA_PIN_GMI_DQS_PI2, TEGRA_PIN_GMI_CS6_N_PI3, TEGRA_PIN_GMI_RST_N_PI4, TEGRA_PIN_GMI_WAIT_PI7, TEGRA_PIN_GMI_ADV_N_PK0, TEGRA_PIN_GMI_CLK_PK1, TEGRA_PIN_GMI_CS4_N_PK2, TEGRA_PIN_GMI_CS2_N_PK3, TEGRA_PIN_GMI_CS3_N_PK4, }; static const unsigned drive_at3_pins[] = { TEGRA_PIN_GMI_WP_N_PC7, TEGRA_PIN_GMI_CS0_N_PJ0, }; static const unsigned drive_at4_pins[] = { TEGRA_PIN_GMI_A17_PB0, TEGRA_PIN_GMI_A18_PB1, TEGRA_PIN_GMI_CS1_N_PJ2, TEGRA_PIN_GMI_A16_PJ7, TEGRA_PIN_GMI_A19_PK7, }; static const unsigned drive_at5_pins[] = { TEGRA_PIN_GEN2_I2C_SCL_PT5, TEGRA_PIN_GEN2_I2C_SDA_PT6, }; static const unsigned drive_cdev1_pins[] = { TEGRA_PIN_CLK1_OUT_PW4, TEGRA_PIN_CLK1_REQ_PEE2, }; static const unsigned drive_cdev2_pins[] = { TEGRA_PIN_CLK2_OUT_PW5, TEGRA_PIN_CLK2_REQ_PCC5, }; static const unsigned drive_cec_pins[] = { TEGRA_PIN_HDMI_CEC_PEE3, }; static const unsigned drive_crt_pins[] = { TEGRA_PIN_CRT_HSYNC_PV6, TEGRA_PIN_CRT_VSYNC_PV7, }; static const unsigned drive_csus_pins[] = { TEGRA_PIN_VI_MCLK_PT1, }; static const unsigned drive_dap1_pins[] = { TEGRA_PIN_SPDIF_OUT_PK5, TEGRA_PIN_SPDIF_IN_PK6, TEGRA_PIN_DAP1_FS_PN0, TEGRA_PIN_DAP1_DIN_PN1, TEGRA_PIN_DAP1_DOUT_PN2, TEGRA_PIN_DAP1_SCLK_PN3, }; static const unsigned drive_dap2_pins[] = { TEGRA_PIN_DAP2_FS_PA2, TEGRA_PIN_DAP2_SCLK_PA3, TEGRA_PIN_DAP2_DIN_PA4, TEGRA_PIN_DAP2_DOUT_PA5, }; static const unsigned drive_dap3_pins[] = { TEGRA_PIN_DAP3_FS_PP0, TEGRA_PIN_DAP3_DIN_PP1, TEGRA_PIN_DAP3_DOUT_PP2, TEGRA_PIN_DAP3_SCLK_PP3, }; static const unsigned drive_dap4_pins[] = { TEGRA_PIN_DAP4_FS_PP4, TEGRA_PIN_DAP4_DIN_PP5, TEGRA_PIN_DAP4_DOUT_PP6, TEGRA_PIN_DAP4_SCLK_PP7, }; static const unsigned drive_dbg_pins[] = { TEGRA_PIN_GEN1_I2C_SCL_PC4, TEGRA_PIN_GEN1_I2C_SDA_PC5, TEGRA_PIN_PU0, TEGRA_PIN_PU1, TEGRA_PIN_PU2, TEGRA_PIN_PU3, TEGRA_PIN_PU4, TEGRA_PIN_PU5, TEGRA_PIN_PU6, TEGRA_PIN_JTAG_RTCK_PU7, TEGRA_PIN_JTAG_TCK, TEGRA_PIN_JTAG_TDI, TEGRA_PIN_JTAG_TDO, TEGRA_PIN_JTAG_TMS, TEGRA_PIN_JTAG_TRST_N, TEGRA_PIN_TEST_MODE_EN, }; static const unsigned drive_ddc_pins[] = { TEGRA_PIN_DDC_SCL_PV4, TEGRA_PIN_DDC_SDA_PV5, }; static const unsigned drive_dev3_pins[] = { TEGRA_PIN_CLK3_OUT_PEE0, TEGRA_PIN_CLK3_REQ_PEE1, }; static const unsigned drive_gma_pins[] = { TEGRA_PIN_SDMMC4_DAT0_PAA0, TEGRA_PIN_SDMMC4_DAT1_PAA1, TEGRA_PIN_SDMMC4_DAT2_PAA2, TEGRA_PIN_SDMMC4_DAT3_PAA3, TEGRA_PIN_SDMMC4_RST_N_PCC3, }; static const unsigned drive_gmb_pins[] = { TEGRA_PIN_SDMMC4_DAT4_PAA4, TEGRA_PIN_SDMMC4_DAT5_PAA5, TEGRA_PIN_SDMMC4_DAT6_PAA6, TEGRA_PIN_SDMMC4_DAT7_PAA7, }; static const unsigned drive_gmc_pins[] = { TEGRA_PIN_SDMMC4_CLK_PCC4, }; static const unsigned drive_gmd_pins[] = { TEGRA_PIN_SDMMC4_CMD_PT7, }; static const unsigned drive_gme_pins[] = { TEGRA_PIN_PBB0, TEGRA_PIN_CAM_I2C_SCL_PBB1, TEGRA_PIN_CAM_I2C_SDA_PBB2, TEGRA_PIN_PBB3, TEGRA_PIN_PCC2, }; static const unsigned drive_gmf_pins[] = { TEGRA_PIN_PBB4, TEGRA_PIN_PBB5, TEGRA_PIN_PBB6, TEGRA_PIN_PBB7, }; static const unsigned drive_gmg_pins[] = { TEGRA_PIN_CAM_MCLK_PCC0, }; static const unsigned drive_gmh_pins[] = { TEGRA_PIN_PCC1, }; static const unsigned drive_gpv_pins[] = { TEGRA_PIN_PEX_L2_RST_N_PCC6, TEGRA_PIN_PEX_L2_CLKREQ_N_PCC7, TEGRA_PIN_PEX_L0_PRSNT_N_PDD0, TEGRA_PIN_PEX_L0_RST_N_PDD1, TEGRA_PIN_PEX_L0_CLKREQ_N_PDD2, TEGRA_PIN_PEX_WAKE_N_PDD3, TEGRA_PIN_PEX_L1_PRSNT_N_PDD4, TEGRA_PIN_PEX_L1_RST_N_PDD5, TEGRA_PIN_PEX_L1_CLKREQ_N_PDD6, TEGRA_PIN_PEX_L2_PRSNT_N_PDD7, }; static const unsigned drive_lcd1_pins[] = { TEGRA_PIN_LCD_PWR1_PC1, TEGRA_PIN_LCD_PWR2_PC6, TEGRA_PIN_LCD_CS0_N_PN4, TEGRA_PIN_LCD_SDOUT_PN5, TEGRA_PIN_LCD_DC0_PN6, TEGRA_PIN_LCD_SDIN_PZ2, TEGRA_PIN_LCD_WR_N_PZ3, TEGRA_PIN_LCD_SCK_PZ4, }; static const unsigned drive_lcd2_pins[] = { TEGRA_PIN_LCD_PWR0_PB2, TEGRA_PIN_LCD_PCLK_PB3, TEGRA_PIN_LCD_DC1_PD2, TEGRA_PIN_LCD_D0_PE0, TEGRA_PIN_LCD_D1_PE1, TEGRA_PIN_LCD_D2_PE2, TEGRA_PIN_LCD_D3_PE3, TEGRA_PIN_LCD_D4_PE4, TEGRA_PIN_LCD_D5_PE5, TEGRA_PIN_LCD_D6_PE6, TEGRA_PIN_LCD_D7_PE7, TEGRA_PIN_LCD_D8_PF0, TEGRA_PIN_LCD_D9_PF1, TEGRA_PIN_LCD_D10_PF2, TEGRA_PIN_LCD_D11_PF3, TEGRA_PIN_LCD_D12_PF4, TEGRA_PIN_LCD_D13_PF5, TEGRA_PIN_LCD_D14_PF6, TEGRA_PIN_LCD_D15_PF7, TEGRA_PIN_LCD_DE_PJ1, TEGRA_PIN_LCD_HSYNC_PJ3, TEGRA_PIN_LCD_VSYNC_PJ4, TEGRA_PIN_LCD_D16_PM0, TEGRA_PIN_LCD_D17_PM1, TEGRA_PIN_LCD_D18_PM2, TEGRA_PIN_LCD_D19_PM3, TEGRA_PIN_LCD_D20_PM4, TEGRA_PIN_LCD_D21_PM5, TEGRA_PIN_LCD_D22_PM6, TEGRA_PIN_LCD_D23_PM7, TEGRA_PIN_HDMI_INT_PN7, TEGRA_PIN_LCD_CS1_N_PW0, TEGRA_PIN_LCD_M1_PW1, }; static const unsigned drive_owr_pins[] = { TEGRA_PIN_OWR, }; static const unsigned drive_sdio1_pins[] = { TEGRA_PIN_SDMMC1_DAT3_PY4, TEGRA_PIN_SDMMC1_DAT2_PY5, TEGRA_PIN_SDMMC1_DAT1_PY6, TEGRA_PIN_SDMMC1_DAT0_PY7, TEGRA_PIN_SDMMC1_CLK_PZ0, TEGRA_PIN_SDMMC1_CMD_PZ1, }; static const unsigned drive_sdio2_pins[] = { TEGRA_PIN_SDMMC3_DAT5_PD0, TEGRA_PIN_SDMMC3_DAT4_PD1, TEGRA_PIN_SDMMC3_DAT6_PD3, TEGRA_PIN_SDMMC3_DAT7_PD4, }; static const unsigned drive_sdio3_pins[] = { TEGRA_PIN_SDMMC3_CLK_PA6, TEGRA_PIN_SDMMC3_CMD_PA7, TEGRA_PIN_SDMMC3_DAT3_PB4, TEGRA_PIN_SDMMC3_DAT2_PB5, TEGRA_PIN_SDMMC3_DAT1_PB6, TEGRA_PIN_SDMMC3_DAT0_PB7, }; static const unsigned drive_spi_pins[] = { TEGRA_PIN_SPI2_CS1_N_PW2, TEGRA_PIN_SPI2_CS2_N_PW3, TEGRA_PIN_SPI2_MOSI_PX0, TEGRA_PIN_SPI2_MISO_PX1, TEGRA_PIN_SPI2_SCK_PX2, TEGRA_PIN_SPI2_CS0_N_PX3, TEGRA_PIN_SPI1_MOSI_PX4, TEGRA_PIN_SPI1_SCK_PX5, TEGRA_PIN_SPI1_CS0_N_PX6, TEGRA_PIN_SPI1_MISO_PX7, }; static const unsigned drive_uaa_pins[] = { TEGRA_PIN_ULPI_DATA0_PO1, TEGRA_PIN_ULPI_DATA1_PO2, TEGRA_PIN_ULPI_DATA2_PO3, TEGRA_PIN_ULPI_DATA3_PO4, }; static const unsigned drive_uab_pins[] = { TEGRA_PIN_ULPI_DATA7_PO0, TEGRA_PIN_ULPI_DATA4_PO5, TEGRA_PIN_ULPI_DATA5_PO6, TEGRA_PIN_ULPI_DATA6_PO7, TEGRA_PIN_PV0, TEGRA_PIN_PV1, TEGRA_PIN_PV2, TEGRA_PIN_PV3, }; static const unsigned drive_uart2_pins[] = { TEGRA_PIN_UART2_TXD_PC2, TEGRA_PIN_UART2_RXD_PC3, TEGRA_PIN_UART2_CTS_N_PJ5, TEGRA_PIN_UART2_RTS_N_PJ6, }; static const unsigned drive_uart3_pins[] = { TEGRA_PIN_UART3_CTS_N_PA1, TEGRA_PIN_UART3_RTS_N_PC0, TEGRA_PIN_UART3_TXD_PW6, TEGRA_PIN_UART3_RXD_PW7, }; static const unsigned drive_uda_pins[] = { TEGRA_PIN_ULPI_CLK_PY0, TEGRA_PIN_ULPI_DIR_PY1, TEGRA_PIN_ULPI_NXT_PY2, TEGRA_PIN_ULPI_STP_PY3, }; static const unsigned drive_vi1_pins[] = { TEGRA_PIN_VI_D1_PD5, TEGRA_PIN_VI_VSYNC_PD6, TEGRA_PIN_VI_HSYNC_PD7, TEGRA_PIN_VI_D2_PL0, TEGRA_PIN_VI_D3_PL1, TEGRA_PIN_VI_D4_PL2, TEGRA_PIN_VI_D5_PL3, TEGRA_PIN_VI_D6_PL4, TEGRA_PIN_VI_D7_PL5, TEGRA_PIN_VI_D8_PL6, TEGRA_PIN_VI_D9_PL7, TEGRA_PIN_VI_PCLK_PT0, TEGRA_PIN_VI_D10_PT2, TEGRA_PIN_VI_D11_PT3, TEGRA_PIN_VI_D0_PT4, }; enum tegra_mux { TEGRA_MUX_BLINK, TEGRA_MUX_CEC, TEGRA_MUX_CLK_12M_OUT, TEGRA_MUX_CLK_32K_IN, TEGRA_MUX_CORE_PWR_REQ, TEGRA_MUX_CPU_PWR_REQ, TEGRA_MUX_CRT, TEGRA_MUX_DAP, TEGRA_MUX_DDR, TEGRA_MUX_DEV3, TEGRA_MUX_DISPLAYA, TEGRA_MUX_DISPLAYB, TEGRA_MUX_DTV, TEGRA_MUX_EXTPERIPH1, TEGRA_MUX_EXTPERIPH2, TEGRA_MUX_EXTPERIPH3, TEGRA_MUX_GMI, TEGRA_MUX_GMI_ALT, TEGRA_MUX_HDA, TEGRA_MUX_HDCP, TEGRA_MUX_HDMI, TEGRA_MUX_HSI, TEGRA_MUX_I2C1, TEGRA_MUX_I2C2, TEGRA_MUX_I2C3, TEGRA_MUX_I2C4, TEGRA_MUX_I2CPWR, TEGRA_MUX_I2S0, TEGRA_MUX_I2S1, TEGRA_MUX_I2S2, TEGRA_MUX_I2S3, TEGRA_MUX_I2S4, TEGRA_MUX_INVALID, TEGRA_MUX_KBC, TEGRA_MUX_MIO, TEGRA_MUX_NAND, TEGRA_MUX_NAND_ALT, TEGRA_MUX_OWR, TEGRA_MUX_PCIE, TEGRA_MUX_PWM0, TEGRA_MUX_PWM1, TEGRA_MUX_PWM2, TEGRA_MUX_PWM3, TEGRA_MUX_PWR_INT_N, TEGRA_MUX_RSVD1, TEGRA_MUX_RSVD2, TEGRA_MUX_RSVD3, TEGRA_MUX_RSVD4, TEGRA_MUX_RTCK, TEGRA_MUX_SATA, TEGRA_MUX_SDMMC1, TEGRA_MUX_SDMMC2, TEGRA_MUX_SDMMC3, TEGRA_MUX_SDMMC4, TEGRA_MUX_SPDIF, TEGRA_MUX_SPI1, TEGRA_MUX_SPI2, TEGRA_MUX_SPI2_ALT, TEGRA_MUX_SPI3, TEGRA_MUX_SPI4, TEGRA_MUX_SPI5, TEGRA_MUX_SPI6, TEGRA_MUX_SYSCLK, TEGRA_MUX_TEST, TEGRA_MUX_TRACE, TEGRA_MUX_UARTA, TEGRA_MUX_UARTB, TEGRA_MUX_UARTC, TEGRA_MUX_UARTD, TEGRA_MUX_UARTE, TEGRA_MUX_ULPI, TEGRA_MUX_VGP1, TEGRA_MUX_VGP2, TEGRA_MUX_VGP3, TEGRA_MUX_VGP4, TEGRA_MUX_VGP5, TEGRA_MUX_VGP6, TEGRA_MUX_VI, TEGRA_MUX_VI_ALT1, TEGRA_MUX_VI_ALT2, TEGRA_MUX_VI_ALT3, }; static const char * const blink_groups[] = { "clk_32k_out_pa0", }; static const char * const cec_groups[] = { "hdmi_cec_pee3", "owr", }; static const char * const clk_12m_out_groups[] = { "pv3", }; static const char * const clk_32k_in_groups[] = { "clk_32k_in", }; static const char * const core_pwr_req_groups[] = { "core_pwr_req", }; static const char * const cpu_pwr_req_groups[] = { "cpu_pwr_req", }; static const char * const crt_groups[] = { "crt_hsync_pv6", "crt_vsync_pv7", }; static const char * const dap_groups[] = { "clk1_req_pee2", "clk2_req_pcc5", }; static const char * const ddr_groups[] = { "vi_d0_pt4", "vi_d1_pd5", "vi_d10_pt2", "vi_d11_pt3", "vi_d2_pl0", "vi_d3_pl1", "vi_d4_pl2", "vi_d5_pl3", "vi_d6_pl4", "vi_d7_pl5", "vi_d8_pl6", "vi_d9_pl7", "vi_hsync_pd7", "vi_vsync_pd6", }; static const char * const dev3_groups[] = { "clk3_req_pee1", }; static const char * const displaya_groups[] = { "dap3_din_pp1", "dap3_dout_pp2", "dap3_fs_pp0", "dap3_sclk_pp3", "pbb3", "pbb4", "pbb5", "pbb6", "lcd_cs0_n_pn4", "lcd_cs1_n_pw0", "lcd_d0_pe0", "lcd_d1_pe1", "lcd_d10_pf2", "lcd_d11_pf3", "lcd_d12_pf4", "lcd_d13_pf5", "lcd_d14_pf6", "lcd_d15_pf7", "lcd_d16_pm0", "lcd_d17_pm1", "lcd_d18_pm2", "lcd_d19_pm3", "lcd_d2_pe2", "lcd_d20_pm4", "lcd_d21_pm5", "lcd_d22_pm6", "lcd_d23_pm7", "lcd_d3_pe3", "lcd_d4_pe4", "lcd_d5_pe5", "lcd_d6_pe6", "lcd_d7_pe7", "lcd_d8_pf0", "lcd_d9_pf1", "lcd_dc0_pn6", "lcd_dc1_pd2", "lcd_de_pj1", "lcd_hsync_pj3", "lcd_m1_pw1", "lcd_pclk_pb3", "lcd_pwr0_pb2", "lcd_pwr1_pc1", "lcd_pwr2_pc6", "lcd_sck_pz4", "lcd_sdin_pz2", "lcd_sdout_pn5", "lcd_vsync_pj4", "lcd_wr_n_pz3", }; static const char * const displayb_groups[] = { "dap3_din_pp1", "dap3_dout_pp2", "dap3_fs_pp0", "dap3_sclk_pp3", "pbb3", "pbb4", "pbb5", "pbb6", "lcd_cs0_n_pn4", "lcd_cs1_n_pw0", "lcd_d0_pe0", "lcd_d1_pe1", "lcd_d10_pf2", "lcd_d11_pf3", "lcd_d12_pf4", "lcd_d13_pf5", "lcd_d14_pf6", "lcd_d15_pf7", "lcd_d16_pm0", "lcd_d17_pm1", "lcd_d18_pm2", "lcd_d19_pm3", "lcd_d2_pe2", "lcd_d20_pm4", "lcd_d21_pm5", "lcd_d22_pm6", "lcd_d23_pm7", "lcd_d3_pe3", "lcd_d4_pe4", "lcd_d5_pe5", "lcd_d6_pe6", "lcd_d7_pe7", "lcd_d8_pf0", "lcd_d9_pf1", "lcd_dc0_pn6", "lcd_dc1_pd2", "lcd_de_pj1", "lcd_hsync_pj3", "lcd_m1_pw1", "lcd_pclk_pb3", "lcd_pwr0_pb2", "lcd_pwr1_pc1", "lcd_pwr2_pc6", "lcd_sck_pz4", "lcd_sdin_pz2", "lcd_sdout_pn5", "lcd_vsync_pj4", "lcd_wr_n_pz3", }; static const char * const dtv_groups[] = { "gmi_a17_pb0", "gmi_a18_pb1", "gmi_cs0_n_pj0", "gmi_cs1_n_pj2", }; static const char * const extperiph1_groups[] = { "clk1_out_pw4", }; static const char * const extperiph2_groups[] = { "clk2_out_pw5", }; static const char * const extperiph3_groups[] = { "clk3_out_pee0", }; static const char * const gmi_groups[] = { "dap1_din_pn1", "dap1_dout_pn2", "dap1_fs_pn0", "dap1_sclk_pn3", "dap2_din_pa4", "dap2_dout_pa5", "dap2_fs_pa2", "dap2_sclk_pa3", "dap4_din_pp5", "dap4_dout_pp6", "dap4_fs_pp4", "dap4_sclk_pp7", "gen2_i2c_scl_pt5", "gen2_i2c_sda_pt6", "gmi_a16_pj7", "gmi_a17_pb0", "gmi_a18_pb1", "gmi_a19_pk7", "gmi_ad0_pg0", "gmi_ad1_pg1", "gmi_ad10_ph2", "gmi_ad11_ph3", "gmi_ad12_ph4", "gmi_ad13_ph5", "gmi_ad14_ph6", "gmi_ad15_ph7", "gmi_ad2_pg2", "gmi_ad3_pg3", "gmi_ad4_pg4", "gmi_ad5_pg5", "gmi_ad6_pg6", "gmi_ad7_pg7", "gmi_ad8_ph0", "gmi_ad9_ph1", "gmi_adv_n_pk0", "gmi_clk_pk1", "gmi_cs0_n_pj0", "gmi_cs1_n_pj2", "gmi_cs2_n_pk3", "gmi_cs3_n_pk4", "gmi_cs4_n_pk2", "gmi_cs6_n_pi3", "gmi_cs7_n_pi6", "gmi_dqs_pi2", "gmi_iordy_pi5", "gmi_oe_n_pi1", "gmi_rst_n_pi4", "gmi_wait_pi7", "gmi_wp_n_pc7", "gmi_wr_n_pi0", "pu0", "pu1", "pu2", "pu3", "pu4", "pu5", "pu6", "sdmmc4_clk_pcc4", "sdmmc4_cmd_pt7", "sdmmc4_dat0_paa0", "sdmmc4_dat1_paa1", "sdmmc4_dat2_paa2", "sdmmc4_dat3_paa3", "sdmmc4_dat4_paa4", "sdmmc4_dat5_paa5", "sdmmc4_dat6_paa6", "sdmmc4_dat7_paa7", "spi1_cs0_n_px6", "spi1_mosi_px4", "spi1_sck_px5", "spi2_cs0_n_px3", "spi2_miso_px1", "spi2_mosi_px0", "spi2_sck_px2", "uart2_cts_n_pj5", "uart2_rts_n_pj6", "uart3_cts_n_pa1", "uart3_rts_n_pc0", "uart3_rxd_pw7", "uart3_txd_pw6", }; static const char * const gmi_alt_groups[] = { "gmi_a16_pj7", "gmi_cs3_n_pk4", "gmi_cs7_n_pi6", "gmi_wp_n_pc7", }; static const char * const hda_groups[] = { "clk1_req_pee2", "dap1_din_pn1", "dap1_dout_pn2", "dap1_fs_pn0", "dap1_sclk_pn3", "dap2_din_pa4", "dap2_dout_pa5", "dap2_fs_pa2", "dap2_sclk_pa3", "pex_l0_clkreq_n_pdd2", "pex_l0_prsnt_n_pdd0", "pex_l0_rst_n_pdd1", "pex_l1_clkreq_n_pdd6", "pex_l1_prsnt_n_pdd4", "pex_l1_rst_n_pdd5", "pex_l2_clkreq_n_pcc7", "pex_l2_prsnt_n_pdd7", "pex_l2_rst_n_pcc6", "pex_wake_n_pdd3", "spdif_in_pk6", }; static const char * const hdcp_groups[] = { "gen2_i2c_scl_pt5", "gen2_i2c_sda_pt6", "lcd_pwr0_pb2", "lcd_pwr2_pc6", "lcd_sck_pz4", "lcd_sdout_pn5", "lcd_wr_n_pz3", }; static const char * const hdmi_groups[] = { "hdmi_int_pn7", }; static const char * const hsi_groups[] = { "ulpi_data0_po1", "ulpi_data1_po2", "ulpi_data2_po3", "ulpi_data3_po4", "ulpi_data4_po5", "ulpi_data5_po6", "ulpi_data6_po7", "ulpi_data7_po0", }; static const char * const i2c1_groups[] = { "gen1_i2c_scl_pc4", "gen1_i2c_sda_pc5", "spdif_in_pk6", "spdif_out_pk5", "spi2_cs1_n_pw2", "spi2_cs2_n_pw3", }; static const char * const i2c2_groups[] = { "gen2_i2c_scl_pt5", "gen2_i2c_sda_pt6", }; static const char * const i2c3_groups[] = { "cam_i2c_scl_pbb1", "cam_i2c_sda_pbb2", "sdmmc4_cmd_pt7", "sdmmc4_dat4_paa4", }; static const char * const i2c4_groups[] = { "ddc_scl_pv4", "ddc_sda_pv5", }; static const char * const i2cpwr_groups[] = { "pwr_i2c_scl_pz6", "pwr_i2c_sda_pz7", }; static const char * const i2s0_groups[] = { "dap1_din_pn1", "dap1_dout_pn2", "dap1_fs_pn0", "dap1_sclk_pn3", }; static const char * const i2s1_groups[] = { "dap2_din_pa4", "dap2_dout_pa5", "dap2_fs_pa2", "dap2_sclk_pa3", }; static const char * const i2s2_groups[] = { "dap3_din_pp1", "dap3_dout_pp2", "dap3_fs_pp0", "dap3_sclk_pp3", }; static const char * const i2s3_groups[] = { "dap4_din_pp5", "dap4_dout_pp6", "dap4_fs_pp4", "dap4_sclk_pp7", }; static const char * const i2s4_groups[] = { "pbb0", "pbb7", "pcc1", "pcc2", "sdmmc4_dat4_paa4", "sdmmc4_dat5_paa5", "sdmmc4_dat6_paa6", "sdmmc4_dat7_paa7", }; static const char * const invalid_groups[] = { "kb_row3_pr3", "sdmmc4_clk_pcc4", }; static const char * const kbc_groups[] = { "kb_col0_pq0", "kb_col1_pq1", "kb_col2_pq2", "kb_col3_pq3", "kb_col4_pq4", "kb_col5_pq5", "kb_col6_pq6", "kb_col7_pq7", "kb_row0_pr0", "kb_row1_pr1", "kb_row10_ps2", "kb_row11_ps3", "kb_row12_ps4", "kb_row13_ps5", "kb_row14_ps6", "kb_row15_ps7", "kb_row2_pr2", "kb_row3_pr3", "kb_row4_pr4", "kb_row5_pr5", "kb_row6_pr6", "kb_row7_pr7", "kb_row8_ps0", "kb_row9_ps1", }; static const char * const mio_groups[] = { "kb_col6_pq6", "kb_col7_pq7", "kb_row10_ps2", "kb_row11_ps3", "kb_row12_ps4", "kb_row13_ps5", "kb_row14_ps6", "kb_row15_ps7", "kb_row6_pr6", "kb_row7_pr7", "kb_row8_ps0", "kb_row9_ps1", }; static const char * const nand_groups[] = { "gmi_ad0_pg0", "gmi_ad1_pg1", "gmi_ad10_ph2", "gmi_ad11_ph3", "gmi_ad12_ph4", "gmi_ad13_ph5", "gmi_ad14_ph6", "gmi_ad15_ph7", "gmi_ad2_pg2", "gmi_ad3_pg3", "gmi_ad4_pg4", "gmi_ad5_pg5", "gmi_ad6_pg6", "gmi_ad7_pg7", "gmi_ad8_ph0", "gmi_ad9_ph1", "gmi_adv_n_pk0", "gmi_clk_pk1", "gmi_cs0_n_pj0", "gmi_cs1_n_pj2", "gmi_cs2_n_pk3", "gmi_cs3_n_pk4", "gmi_cs4_n_pk2", "gmi_cs6_n_pi3", "gmi_cs7_n_pi6", "gmi_dqs_pi2", "gmi_iordy_pi5", "gmi_oe_n_pi1", "gmi_rst_n_pi4", "gmi_wait_pi7", "gmi_wp_n_pc7", "gmi_wr_n_pi0", "kb_col0_pq0", "kb_col1_pq1", "kb_col2_pq2", "kb_col3_pq3", "kb_col4_pq4", "kb_col5_pq5", "kb_col6_pq6", "kb_col7_pq7", "kb_row0_pr0", "kb_row1_pr1", "kb_row10_ps2", "kb_row11_ps3", "kb_row12_ps4", "kb_row13_ps5", "kb_row14_ps6", "kb_row15_ps7", "kb_row2_pr2", "kb_row3_pr3", "kb_row4_pr4", "kb_row5_pr5", "kb_row6_pr6", "kb_row7_pr7", "kb_row8_ps0", "kb_row9_ps1", "sdmmc4_clk_pcc4", "sdmmc4_cmd_pt7", }; static const char * const nand_alt_groups[] = { "gmi_cs6_n_pi3", "gmi_cs7_n_pi6", "gmi_rst_n_pi4", }; static const char * const owr_groups[] = { "pu0", "pv2", "kb_row5_pr5", "owr", }; static const char * const pcie_groups[] = { "pex_l0_clkreq_n_pdd2", "pex_l0_prsnt_n_pdd0", "pex_l0_rst_n_pdd1", "pex_l1_clkreq_n_pdd6", "pex_l1_prsnt_n_pdd4", "pex_l1_rst_n_pdd5", "pex_l2_clkreq_n_pcc7", "pex_l2_prsnt_n_pdd7", "pex_l2_rst_n_pcc6", "pex_wake_n_pdd3", }; static const char * const pwm0_groups[] = { "gmi_ad8_ph0", "pu3", "sdmmc3_dat3_pb4", "sdmmc3_dat5_pd0", "uart3_rts_n_pc0", }; static const char * const pwm1_groups[] = { "gmi_ad9_ph1", "pu4", "sdmmc3_dat2_pb5", "sdmmc3_dat4_pd1", }; static const char * const pwm2_groups[] = { "gmi_ad10_ph2", "pu5", "sdmmc3_clk_pa6", }; static const char * const pwm3_groups[] = { "gmi_ad11_ph3", "pu6", "sdmmc3_cmd_pa7", }; static const char * const pwr_int_n_groups[] = { "pwr_int_n", }; static const char * const rsvd1_groups[] = { "gmi_ad0_pg0", "gmi_ad1_pg1", "gmi_ad12_ph4", "gmi_ad13_ph5", "gmi_ad14_ph6", "gmi_ad15_ph7", "gmi_ad2_pg2", "gmi_ad3_pg3", "gmi_ad4_pg4", "gmi_ad5_pg5", "gmi_ad6_pg6", "gmi_ad7_pg7", "gmi_adv_n_pk0", "gmi_clk_pk1", "gmi_cs0_n_pj0", "gmi_cs1_n_pj2", "gmi_cs2_n_pk3", "gmi_cs3_n_pk4", "gmi_cs4_n_pk2", "gmi_dqs_pi2", "gmi_iordy_pi5", "gmi_oe_n_pi1", "gmi_wait_pi7", "gmi_wp_n_pc7", "gmi_wr_n_pi0", "pu1", "pu2", "pv0", "pv1", "sdmmc3_dat0_pb7", "sdmmc3_dat1_pb6", "sdmmc3_dat2_pb5", "sdmmc3_dat3_pb4", "vi_pclk_pt0", }; static const char * const rsvd2_groups[] = { "clk1_out_pw4", "clk2_out_pw5", "clk2_req_pcc5", "clk3_out_pee0", "clk3_req_pee1", "clk_32k_in", "clk_32k_out_pa0", "core_pwr_req", "cpu_pwr_req", "crt_hsync_pv6", "crt_vsync_pv7", "dap3_din_pp1", "dap3_dout_pp2", "dap3_fs_pp0", "dap3_sclk_pp3", "dap4_din_pp5", "dap4_dout_pp6", "dap4_fs_pp4", "dap4_sclk_pp7", "ddc_scl_pv4", "ddc_sda_pv5", "gen1_i2c_scl_pc4", "gen1_i2c_sda_pc5", "pbb0", "pbb7", "pcc1", "pcc2", "pv0", "pv1", "pv2", "pv3", "hdmi_cec_pee3", "hdmi_int_pn7", "jtag_rtck_pu7", "pwr_i2c_scl_pz6", "pwr_i2c_sda_pz7", "pwr_int_n", "sdmmc1_clk_pz0", "sdmmc1_cmd_pz1", "sdmmc1_dat0_py7", "sdmmc1_dat1_py6", "sdmmc1_dat2_py5", "sdmmc1_dat3_py4", "sdmmc3_dat0_pb7", "sdmmc3_dat1_pb6", "sdmmc4_rst_n_pcc3", "spdif_out_pk5", "sys_clk_req_pz5", "uart3_cts_n_pa1", "uart3_rxd_pw7", "uart3_txd_pw6", "ulpi_clk_py0", "ulpi_dir_py1", "ulpi_nxt_py2", "ulpi_stp_py3", "vi_d0_pt4", "vi_d10_pt2", "vi_d11_pt3", "vi_hsync_pd7", "vi_vsync_pd6", }; static const char * const rsvd3_groups[] = { "cam_i2c_scl_pbb1", "cam_i2c_sda_pbb2", "clk1_out_pw4", "clk1_req_pee2", "clk2_out_pw5", "clk2_req_pcc5", "clk3_out_pee0", "clk3_req_pee1", "clk_32k_in", "clk_32k_out_pa0", "core_pwr_req", "cpu_pwr_req", "crt_hsync_pv6", "crt_vsync_pv7", "dap2_din_pa4", "dap2_dout_pa5", "dap2_fs_pa2", "dap2_sclk_pa3", "ddc_scl_pv4", "ddc_sda_pv5", "gen1_i2c_scl_pc4", "gen1_i2c_sda_pc5", "pbb0", "pbb7", "pcc1", "pcc2", "pv0", "pv1", "pv2", "pv3", "hdmi_cec_pee3", "hdmi_int_pn7", "jtag_rtck_pu7", "kb_row0_pr0", "kb_row1_pr1", "kb_row2_pr2", "kb_row3_pr3", "lcd_d0_pe0", "lcd_d1_pe1", "lcd_d10_pf2", "lcd_d11_pf3", "lcd_d12_pf4", "lcd_d13_pf5", "lcd_d14_pf6", "lcd_d15_pf7", "lcd_d16_pm0", "lcd_d17_pm1", "lcd_d18_pm2", "lcd_d19_pm3", "lcd_d2_pe2", "lcd_d20_pm4", "lcd_d21_pm5", "lcd_d22_pm6", "lcd_d23_pm7", "lcd_d3_pe3", "lcd_d4_pe4", "lcd_d5_pe5", "lcd_d6_pe6", "lcd_d7_pe7", "lcd_d8_pf0", "lcd_d9_pf1", "lcd_dc0_pn6", "lcd_dc1_pd2", "lcd_de_pj1", "lcd_hsync_pj3", "lcd_m1_pw1", "lcd_pclk_pb3", "lcd_pwr1_pc1", "lcd_vsync_pj4", "owr", "pex_l0_clkreq_n_pdd2", "pex_l0_prsnt_n_pdd0", "pex_l0_rst_n_pdd1", "pex_l1_clkreq_n_pdd6", "pex_l1_prsnt_n_pdd4", "pex_l1_rst_n_pdd5", "pex_l2_clkreq_n_pcc7", "pex_l2_prsnt_n_pdd7", "pex_l2_rst_n_pcc6", "pex_wake_n_pdd3", "pwr_i2c_scl_pz6", "pwr_i2c_sda_pz7", "pwr_int_n", "sdmmc1_clk_pz0", "sdmmc1_cmd_pz1", "sdmmc4_rst_n_pcc3", "sys_clk_req_pz5", }; static const char * const rsvd4_groups[] = { "clk1_out_pw4", "clk1_req_pee2", "clk2_out_pw5", "clk2_req_pcc5", "clk3_out_pee0", "clk3_req_pee1", "clk_32k_in", "clk_32k_out_pa0", "core_pwr_req", "cpu_pwr_req", "crt_hsync_pv6", "crt_vsync_pv7", "dap4_din_pp5", "dap4_dout_pp6", "dap4_fs_pp4", "dap4_sclk_pp7", "ddc_scl_pv4", "ddc_sda_pv5", "gen1_i2c_scl_pc4", "gen1_i2c_sda_pc5", "gen2_i2c_scl_pt5", "gen2_i2c_sda_pt6", "gmi_a19_pk7", "gmi_ad0_pg0", "gmi_ad1_pg1", "gmi_ad10_ph2", "gmi_ad11_ph3", "gmi_ad12_ph4", "gmi_ad13_ph5", "gmi_ad14_ph6", "gmi_ad15_ph7", "gmi_ad2_pg2", "gmi_ad3_pg3", "gmi_ad4_pg4", "gmi_ad5_pg5", "gmi_ad6_pg6", "gmi_ad7_pg7", "gmi_ad8_ph0", "gmi_ad9_ph1", "gmi_adv_n_pk0", "gmi_clk_pk1", "gmi_cs2_n_pk3", "gmi_cs4_n_pk2", "gmi_dqs_pi2", "gmi_iordy_pi5", "gmi_oe_n_pi1", "gmi_rst_n_pi4", "gmi_wait_pi7", "gmi_wr_n_pi0", "pcc2", "pu0", "pu1", "pu2", "pu3", "pu4", "pu5", "pu6", "pv0", "pv1", "pv2", "pv3", "hdmi_cec_pee3", "hdmi_int_pn7", "jtag_rtck_pu7", "kb_col2_pq2", "kb_col3_pq3", "kb_col4_pq4", "kb_col5_pq5", "kb_row0_pr0", "kb_row1_pr1", "kb_row2_pr2", "kb_row4_pr4", "lcd_cs0_n_pn4", "lcd_cs1_n_pw0", "lcd_d0_pe0", "lcd_d1_pe1", "lcd_d10_pf2", "lcd_d11_pf3", "lcd_d12_pf4", "lcd_d13_pf5", "lcd_d14_pf6", "lcd_d15_pf7", "lcd_d16_pm0", "lcd_d17_pm1", "lcd_d18_pm2", "lcd_d19_pm3", "lcd_d2_pe2", "lcd_d20_pm4", "lcd_d21_pm5", "lcd_d22_pm6", "lcd_d23_pm7", "lcd_d3_pe3", "lcd_d4_pe4", "lcd_d5_pe5", "lcd_d6_pe6", "lcd_d7_pe7", "lcd_d8_pf0", "lcd_d9_pf1", "lcd_dc0_pn6", "lcd_dc1_pd2", "lcd_de_pj1", "lcd_hsync_pj3", "lcd_m1_pw1", "lcd_pclk_pb3", "lcd_pwr1_pc1", "lcd_sdin_pz2", "lcd_vsync_pj4", "owr", "pex_l0_clkreq_n_pdd2", "pex_l0_prsnt_n_pdd0", "pex_l0_rst_n_pdd1", "pex_l1_clkreq_n_pdd6", "pex_l1_prsnt_n_pdd4", "pex_l1_rst_n_pdd5", "pex_l2_clkreq_n_pcc7", "pex_l2_prsnt_n_pdd7", "pex_l2_rst_n_pcc6", "pex_wake_n_pdd3", "pwr_i2c_scl_pz6", "pwr_i2c_sda_pz7", "pwr_int_n", "spi1_miso_px7", "sys_clk_req_pz5", "uart3_cts_n_pa1", "uart3_rts_n_pc0", "uart3_rxd_pw7", "uart3_txd_pw6", "vi_d0_pt4", "vi_d1_pd5", "vi_d10_pt2", "vi_d11_pt3", "vi_d2_pl0", "vi_d3_pl1", "vi_d4_pl2", "vi_d5_pl3", "vi_d6_pl4", "vi_d7_pl5", "vi_d8_pl6", "vi_d9_pl7", "vi_hsync_pd7", "vi_pclk_pt0", "vi_vsync_pd6", }; static const char * const rtck_groups[] = { "jtag_rtck_pu7", }; static const char * const sata_groups[] = { "gmi_cs6_n_pi3", }; static const char * const sdmmc1_groups[] = { "sdmmc1_clk_pz0", "sdmmc1_cmd_pz1", "sdmmc1_dat0_py7", "sdmmc1_dat1_py6", "sdmmc1_dat2_py5", "sdmmc1_dat3_py4", }; static const char * const sdmmc2_groups[] = { "dap1_din_pn1", "dap1_dout_pn2", "dap1_fs_pn0", "dap1_sclk_pn3", "kb_row10_ps2", "kb_row11_ps3", "kb_row12_ps4", "kb_row13_ps5", "kb_row14_ps6", "kb_row15_ps7", "kb_row6_pr6", "kb_row7_pr7", "kb_row8_ps0", "kb_row9_ps1", "spdif_in_pk6", "spdif_out_pk5", "vi_d1_pd5", "vi_d2_pl0", "vi_d3_pl1", "vi_d4_pl2", "vi_d5_pl3", "vi_d6_pl4", "vi_d7_pl5", "vi_d8_pl6", "vi_d9_pl7", "vi_pclk_pt0", }; static const char * const sdmmc3_groups[] = { "sdmmc3_clk_pa6", "sdmmc3_cmd_pa7", "sdmmc3_dat0_pb7", "sdmmc3_dat1_pb6", "sdmmc3_dat2_pb5", "sdmmc3_dat3_pb4", "sdmmc3_dat4_pd1", "sdmmc3_dat5_pd0", "sdmmc3_dat6_pd3", "sdmmc3_dat7_pd4", }; static const char * const sdmmc4_groups[] = { "cam_i2c_scl_pbb1", "cam_i2c_sda_pbb2", "cam_mclk_pcc0", "pbb0", "pbb3", "pbb4", "pbb5", "pbb6", "pbb7", "pcc1", "sdmmc4_clk_pcc4", "sdmmc4_cmd_pt7", "sdmmc4_dat0_paa0", "sdmmc4_dat1_paa1", "sdmmc4_dat2_paa2", "sdmmc4_dat3_paa3", "sdmmc4_dat4_paa4", "sdmmc4_dat5_paa5", "sdmmc4_dat6_paa6", "sdmmc4_dat7_paa7", "sdmmc4_rst_n_pcc3", }; static const char * const spdif_groups[] = { "sdmmc3_dat6_pd3", "sdmmc3_dat7_pd4", "spdif_in_pk6", "spdif_out_pk5", "uart2_rxd_pc3", "uart2_txd_pc2", }; static const char * const spi1_groups[] = { "spi1_cs0_n_px6", "spi1_miso_px7", "spi1_mosi_px4", "spi1_sck_px5", "ulpi_clk_py0", "ulpi_dir_py1", "ulpi_nxt_py2", "ulpi_stp_py3", }; static const char * const spi2_groups[] = { "sdmmc3_cmd_pa7", "sdmmc3_dat4_pd1", "sdmmc3_dat5_pd0", "sdmmc3_dat6_pd3", "sdmmc3_dat7_pd4", "spi1_cs0_n_px6", "spi1_mosi_px4", "spi1_sck_px5", "spi2_cs0_n_px3", "spi2_cs1_n_pw2", "spi2_cs2_n_pw3", "spi2_miso_px1", "spi2_mosi_px0", "spi2_sck_px2", "ulpi_data4_po5", "ulpi_data5_po6", "ulpi_data6_po7", "ulpi_data7_po0", }; static const char * const spi2_alt_groups[] = { "spi1_cs0_n_px6", "spi1_miso_px7", "spi1_mosi_px4", "spi1_sck_px5", "spi2_cs1_n_pw2", "spi2_cs2_n_pw3", }; static const char * const spi3_groups[] = { "sdmmc3_clk_pa6", "sdmmc3_dat0_pb7", "sdmmc3_dat1_pb6", "sdmmc3_dat2_pb5", "sdmmc3_dat3_pb4", "sdmmc4_dat0_paa0", "sdmmc4_dat1_paa1", "sdmmc4_dat2_paa2", "sdmmc4_dat3_paa3", "spi1_miso_px7", "spi2_cs0_n_px3", "spi2_cs1_n_pw2", "spi2_cs2_n_pw3", "spi2_miso_px1", "spi2_mosi_px0", "spi2_sck_px2", "ulpi_data0_po1", "ulpi_data1_po2", "ulpi_data2_po3", "ulpi_data3_po4", }; static const char * const spi4_groups[] = { "gmi_a16_pj7", "gmi_a17_pb0", "gmi_a18_pb1", "gmi_a19_pk7", "sdmmc3_dat4_pd1", "sdmmc3_dat5_pd0", "sdmmc3_dat6_pd3", "sdmmc3_dat7_pd4", "uart2_cts_n_pj5", "uart2_rts_n_pj6", "uart2_rxd_pc3", "uart2_txd_pc2", }; static const char * const spi5_groups[] = { "lcd_cs0_n_pn4", "lcd_cs1_n_pw0", "lcd_pwr0_pb2", "lcd_pwr2_pc6", "lcd_sck_pz4", "lcd_sdin_pz2", "lcd_sdout_pn5", "lcd_wr_n_pz3", }; static const char * const spi6_groups[] = { "spi2_cs0_n_px3", "spi2_miso_px1", "spi2_mosi_px0", "spi2_sck_px2", }; static const char * const sysclk_groups[] = { "sys_clk_req_pz5", }; static const char * const test_groups[] = { "kb_col0_pq0", "kb_col1_pq1", }; static const char * const trace_groups[] = { "kb_col0_pq0", "kb_col1_pq1", "kb_col2_pq2", "kb_col3_pq3", "kb_col4_pq4", "kb_col5_pq5", "kb_col6_pq6", "kb_col7_pq7", "kb_row4_pr4", "kb_row5_pr5", }; static const char * const uarta_groups[] = { "pu0", "pu1", "pu2", "pu3", "pu4", "pu5", "pu6", "sdmmc1_clk_pz0", "sdmmc1_cmd_pz1", "sdmmc1_dat0_py7", "sdmmc1_dat1_py6", "sdmmc1_dat2_py5", "sdmmc1_dat3_py4", "sdmmc3_clk_pa6", "sdmmc3_cmd_pa7", "uart2_cts_n_pj5", "uart2_rts_n_pj6", "uart2_rxd_pc3", "uart2_txd_pc2", "ulpi_data0_po1", "ulpi_data1_po2", "ulpi_data2_po3", "ulpi_data3_po4", "ulpi_data4_po5", "ulpi_data5_po6", "ulpi_data6_po7", "ulpi_data7_po0", }; static const char * const uartb_groups[] = { "uart2_cts_n_pj5", "uart2_rts_n_pj6", "uart2_rxd_pc3", "uart2_txd_pc2", }; static const char * const uartc_groups[] = { "uart3_cts_n_pa1", "uart3_rts_n_pc0", "uart3_rxd_pw7", "uart3_txd_pw6", }; static const char * const uartd_groups[] = { "gmi_a16_pj7", "gmi_a17_pb0", "gmi_a18_pb1", "gmi_a19_pk7", "ulpi_clk_py0", "ulpi_dir_py1", "ulpi_nxt_py2", "ulpi_stp_py3", }; static const char * const uarte_groups[] = { "sdmmc1_dat0_py7", "sdmmc1_dat1_py6", "sdmmc1_dat2_py5", "sdmmc1_dat3_py4", "sdmmc4_dat0_paa0", "sdmmc4_dat1_paa1", "sdmmc4_dat2_paa2", "sdmmc4_dat3_paa3", }; static const char * const ulpi_groups[] = { "ulpi_clk_py0", "ulpi_data0_po1", "ulpi_data1_po2", "ulpi_data2_po3", "ulpi_data3_po4", "ulpi_data4_po5", "ulpi_data5_po6", "ulpi_data6_po7", "ulpi_data7_po0", "ulpi_dir_py1", "ulpi_nxt_py2", "ulpi_stp_py3", }; static const char * const vgp1_groups[] = { "cam_i2c_scl_pbb1", }; static const char * const vgp2_groups[] = { "cam_i2c_sda_pbb2", }; static const char * const vgp3_groups[] = { "pbb3", "sdmmc4_dat5_paa5", }; static const char * const vgp4_groups[] = { "pbb4", "sdmmc4_dat6_paa6", }; static const char * const vgp5_groups[] = { "pbb5", "sdmmc4_dat7_paa7", }; static const char * const vgp6_groups[] = { "pbb6", "sdmmc4_rst_n_pcc3", }; static const char * const vi_groups[] = { "cam_mclk_pcc0", "vi_d0_pt4", "vi_d1_pd5", "vi_d10_pt2", "vi_d11_pt3", "vi_d2_pl0", "vi_d3_pl1", "vi_d4_pl2", "vi_d5_pl3", "vi_d6_pl4", "vi_d7_pl5", "vi_d8_pl6", "vi_d9_pl7", "vi_hsync_pd7", "vi_mclk_pt1", "vi_pclk_pt0", "vi_vsync_pd6", }; static const char * const vi_alt1_groups[] = { "cam_mclk_pcc0", "vi_mclk_pt1", }; static const char * const vi_alt2_groups[] = { "vi_mclk_pt1", }; static const char * const vi_alt3_groups[] = { "cam_mclk_pcc0", "vi_mclk_pt1", }; #define FUNCTION(fname) \ { \ .name = #fname, \ .groups = fname##_groups, \ .ngroups = ARRAY_SIZE(fname##_groups), \ } static const struct tegra_function tegra30_functions[] = { FUNCTION(blink), FUNCTION(cec), FUNCTION(clk_12m_out), FUNCTION(clk_32k_in), FUNCTION(core_pwr_req), FUNCTION(cpu_pwr_req), FUNCTION(crt), FUNCTION(dap), FUNCTION(ddr), FUNCTION(dev3), FUNCTION(displaya), FUNCTION(displayb), FUNCTION(dtv), FUNCTION(extperiph1), FUNCTION(extperiph2), FUNCTION(extperiph3), FUNCTION(gmi), FUNCTION(gmi_alt), FUNCTION(hda), FUNCTION(hdcp), FUNCTION(hdmi), FUNCTION(hsi), FUNCTION(i2c1), FUNCTION(i2c2), FUNCTION(i2c3), FUNCTION(i2c4), FUNCTION(i2cpwr), FUNCTION(i2s0), FUNCTION(i2s1), FUNCTION(i2s2), FUNCTION(i2s3), FUNCTION(i2s4), FUNCTION(invalid), FUNCTION(kbc), FUNCTION(mio), FUNCTION(nand), FUNCTION(nand_alt), FUNCTION(owr), FUNCTION(pcie), FUNCTION(pwm0), FUNCTION(pwm1), FUNCTION(pwm2), FUNCTION(pwm3), FUNCTION(pwr_int_n), FUNCTION(rsvd1), FUNCTION(rsvd2), FUNCTION(rsvd3), FUNCTION(rsvd4), FUNCTION(rtck), FUNCTION(sata), FUNCTION(sdmmc1), FUNCTION(sdmmc2), FUNCTION(sdmmc3), FUNCTION(sdmmc4), FUNCTION(spdif), FUNCTION(spi1), FUNCTION(spi2), FUNCTION(spi2_alt), FUNCTION(spi3), FUNCTION(spi4), FUNCTION(spi5), FUNCTION(spi6), FUNCTION(sysclk), FUNCTION(test), FUNCTION(trace), FUNCTION(uarta), FUNCTION(uartb), FUNCTION(uartc), FUNCTION(uartd), FUNCTION(uarte), FUNCTION(ulpi), FUNCTION(vgp1), FUNCTION(vgp2), FUNCTION(vgp3), FUNCTION(vgp4), FUNCTION(vgp5), FUNCTION(vgp6), FUNCTION(vi), FUNCTION(vi_alt1), FUNCTION(vi_alt2), FUNCTION(vi_alt3), }; #define DRV_PINGROUP_REG_A 0x868 /* bank 0 */ #define PINGROUP_REG_A 0x3000 /* bank 1 */ #define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A) #define PINGROUP_REG_N(r) -1 #define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior) \ { \ .name = #pg_name, \ .pins = pg_name##_pins, \ .npins = ARRAY_SIZE(pg_name##_pins), \ .funcs = { \ TEGRA_MUX_ ## f0, \ TEGRA_MUX_ ## f1, \ TEGRA_MUX_ ## f2, \ TEGRA_MUX_ ## f3, \ }, \ .func_safe = TEGRA_MUX_ ## f_safe, \ .mux_reg = PINGROUP_REG_Y(r), \ .mux_bank = 1, \ .mux_bit = 0, \ .pupd_reg = PINGROUP_REG_Y(r), \ .pupd_bank = 1, \ .pupd_bit = 2, \ .tri_reg = PINGROUP_REG_Y(r), \ .tri_bank = 1, \ .tri_bit = 4, \ .einput_reg = PINGROUP_REG_Y(r), \ .einput_bank = 1, \ .einput_bit = 5, \ .odrain_reg = PINGROUP_REG_##od(r), \ .odrain_bank = 1, \ .odrain_bit = 6, \ .lock_reg = PINGROUP_REG_Y(r), \ .lock_bank = 1, \ .lock_bit = 7, \ .ioreset_reg = PINGROUP_REG_##ior(r), \ .ioreset_bank = 1, \ .ioreset_bit = 8, \ .rcv_sel_reg = -1, \ .drv_reg = -1, \ .drvtype_reg = -1, \ } #define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \ drvdn_b, drvdn_w, drvup_b, drvup_w, \ slwr_b, slwr_w, slwf_b, slwf_w) \ { \ .name = "drive_" #pg_name, \ .pins = drive_##pg_name##_pins, \ .npins = ARRAY_SIZE(drive_##pg_name##_pins), \ .mux_reg = -1, \ .pupd_reg = -1, \ .tri_reg = -1, \ .einput_reg = -1, \ .odrain_reg = -1, \ .lock_reg = -1, \ .ioreset_reg = -1, \ .rcv_sel_reg = -1, \ .drv_reg = ((r) - DRV_PINGROUP_REG_A), \ .drv_bank = 0, \ .hsm_bit = hsm_b, \ .schmitt_bit = schmitt_b, \ .lpmd_bit = lpmd_b, \ .drvdn_bit = drvdn_b, \ .drvdn_width = drvdn_w, \ .drvup_bit = drvup_b, \ .drvup_width = drvup_w, \ .slwr_bit = slwr_b, \ .slwr_width = slwr_w, \ .slwf_bit = slwf_b, \ .slwf_width = slwf_w, \ .drvtype_reg = -1, \ } static const struct tegra_pingroup tegra30_groups[] = { /* pg_name, f0, f1, f2, f3, safe, r, od, ior */ /* FIXME: Fill in correct data in safe column */ PINGROUP(clk_32k_out_pa0, BLINK, RSVD2, RSVD3, RSVD4, RSVD4, 0x331c, N, N), PINGROUP(uart3_cts_n_pa1, UARTC, RSVD2, GMI, RSVD4, RSVD4, 0x317c, N, N), PINGROUP(dap2_fs_pa2, I2S1, HDA, RSVD3, GMI, RSVD3, 0x3358, N, N), PINGROUP(dap2_sclk_pa3, I2S1, HDA, RSVD3, GMI, RSVD3, 0x3364, N, N), PINGROUP(dap2_din_pa4, I2S1, HDA, RSVD3, GMI, RSVD3, 0x335c, N, N), PINGROUP(dap2_dout_pa5, I2S1, HDA, RSVD3, GMI, RSVD3, 0x3360, N, N), PINGROUP(sdmmc3_clk_pa6, UARTA, PWM2, SDMMC3, SPI3, SPI3, 0x3390, N, N), PINGROUP(sdmmc3_cmd_pa7, UARTA, PWM3, SDMMC3, SPI2, SPI2, 0x3394, N, N), PINGROUP(gmi_a17_pb0, UARTD, SPI4, GMI, DTV, DTV, 0x3234, N, N), PINGROUP(gmi_a18_pb1, UARTD, SPI4, GMI, DTV, DTV, 0x3238, N, N), PINGROUP(lcd_pwr0_pb2, DISPLAYA, DISPLAYB, SPI5, HDCP, HDCP, 0x3090, N, N), PINGROUP(lcd_pclk_pb3, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x3094, N, N), PINGROUP(sdmmc3_dat3_pb4, RSVD1, PWM0, SDMMC3, SPI3, RSVD1, 0x33a4, N, N), PINGROUP(sdmmc3_dat2_pb5, RSVD1, PWM1, SDMMC3, SPI3, RSVD1, 0x33a0, N, N), PINGROUP(sdmmc3_dat1_pb6, RSVD1, RSVD2, SDMMC3, SPI3, RSVD2, 0x339c, N, N), PINGROUP(sdmmc3_dat0_pb7, RSVD1, RSVD2, SDMMC3, SPI3, RSVD2, 0x3398, N, N), PINGROUP(uart3_rts_n_pc0, UARTC, PWM0, GMI, RSVD4, RSVD4, 0x3180, N, N), PINGROUP(lcd_pwr1_pc1, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x3070, N, N), PINGROUP(uart2_txd_pc2, UARTB, SPDIF, UARTA, SPI4, SPI4, 0x3168, N, N), PINGROUP(uart2_rxd_pc3, UARTB, SPDIF, UARTA, SPI4, SPI4, 0x3164, N, N), PINGROUP(gen1_i2c_scl_pc4, I2C1, RSVD2, RSVD3, RSVD4, RSVD4, 0x31a4, Y, N), PINGROUP(gen1_i2c_sda_pc5, I2C1, RSVD2, RSVD3, RSVD4, RSVD4, 0x31a0, Y, N), PINGROUP(lcd_pwr2_pc6, DISPLAYA, DISPLAYB, SPI5, HDCP, HDCP, 0x3074, N, N), PINGROUP(gmi_wp_n_pc7, RSVD1, NAND, GMI, GMI_ALT, RSVD1, 0x31c0, N, N), PINGROUP(sdmmc3_dat5_pd0, PWM0, SPI4, SDMMC3, SPI2, SPI2, 0x33ac, N, N), PINGROUP(sdmmc3_dat4_pd1, PWM1, SPI4, SDMMC3, SPI2, SPI2, 0x33a8, N, N), PINGROUP(lcd_dc1_pd2, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x310c, N, N), PINGROUP(sdmmc3_dat6_pd3, SPDIF, SPI4, SDMMC3, SPI2, SPI2, 0x33b0, N, N), PINGROUP(sdmmc3_dat7_pd4, SPDIF, SPI4, SDMMC3, SPI2, SPI2, 0x33b4, N, N), PINGROUP(vi_d1_pd5, DDR, SDMMC2, VI, RSVD4, RSVD4, 0x3128, N, Y), PINGROUP(vi_vsync_pd6, DDR, RSVD2, VI, RSVD4, RSVD4, 0x315c, N, Y), PINGROUP(vi_hsync_pd7, DDR, RSVD2, VI, RSVD4, RSVD4, 0x3160, N, Y), PINGROUP(lcd_d0_pe0, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30a4, N, N), PINGROUP(lcd_d1_pe1, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30a8, N, N), PINGROUP(lcd_d2_pe2, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30ac, N, N), PINGROUP(lcd_d3_pe3, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30b0, N, N), PINGROUP(lcd_d4_pe4, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30b4, N, N), PINGROUP(lcd_d5_pe5, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30b8, N, N), PINGROUP(lcd_d6_pe6, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30bc, N, N), PINGROUP(lcd_d7_pe7, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30c0, N, N), PINGROUP(lcd_d8_pf0, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30c4, N, N), PINGROUP(lcd_d9_pf1, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30c8, N, N), PINGROUP(lcd_d10_pf2, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30cc, N, N), PINGROUP(lcd_d11_pf3, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30d0, N, N), PINGROUP(lcd_d12_pf4, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30d4, N, N), PINGROUP(lcd_d13_pf5, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30d8, N, N), PINGROUP(lcd_d14_pf6, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30dc, N, N), PINGROUP(lcd_d15_pf7, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30e0, N, N), PINGROUP(gmi_ad0_pg0, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31f0, N, N), PINGROUP(gmi_ad1_pg1, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31f4, N, N), PINGROUP(gmi_ad2_pg2, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31f8, N, N), PINGROUP(gmi_ad3_pg3, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31fc, N, N), PINGROUP(gmi_ad4_pg4, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3200, N, N), PINGROUP(gmi_ad5_pg5, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3204, N, N), PINGROUP(gmi_ad6_pg6, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3208, N, N), PINGROUP(gmi_ad7_pg7, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x320c, N, N), PINGROUP(gmi_ad8_ph0, PWM0, NAND, GMI, RSVD4, RSVD4, 0x3210, N, N), PINGROUP(gmi_ad9_ph1, PWM1, NAND, GMI, RSVD4, RSVD4, 0x3214, N, N), PINGROUP(gmi_ad10_ph2, PWM2, NAND, GMI, RSVD4, RSVD4, 0x3218, N, N), PINGROUP(gmi_ad11_ph3, PWM3, NAND, GMI, RSVD4, RSVD4, 0x321c, N, N), PINGROUP(gmi_ad12_ph4, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3220, N, N), PINGROUP(gmi_ad13_ph5, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3224, N, N), PINGROUP(gmi_ad14_ph6, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3228, N, N), PINGROUP(gmi_ad15_ph7, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x322c, N, N), PINGROUP(gmi_wr_n_pi0, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3240, N, N), PINGROUP(gmi_oe_n_pi1, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3244, N, N), PINGROUP(gmi_dqs_pi2, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3248, N, N), PINGROUP(gmi_cs6_n_pi3, NAND, NAND_ALT, GMI, SATA, SATA, 0x31e8, N, N), PINGROUP(gmi_rst_n_pi4, NAND, NAND_ALT, GMI, RSVD4, RSVD4, 0x324c, N, N), PINGROUP(gmi_iordy_pi5, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31c4, N, N), PINGROUP(gmi_cs7_n_pi6, NAND, NAND_ALT, GMI, GMI_ALT, GMI_ALT, 0x31ec, N, N), PINGROUP(gmi_wait_pi7, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31c8, N, N), PINGROUP(gmi_cs0_n_pj0, RSVD1, NAND, GMI, DTV, RSVD1, 0x31d4, N, N), PINGROUP(lcd_de_pj1, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x3098, N, N), PINGROUP(gmi_cs1_n_pj2, RSVD1, NAND, GMI, DTV, RSVD1, 0x31d8, N, N), PINGROUP(lcd_hsync_pj3, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x309c, N, N), PINGROUP(lcd_vsync_pj4, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30a0, N, N), PINGROUP(uart2_cts_n_pj5, UARTA, UARTB, GMI, SPI4, SPI4, 0x3170, N, N), PINGROUP(uart2_rts_n_pj6, UARTA, UARTB, GMI, SPI4, SPI4, 0x316c, N, N), PINGROUP(gmi_a16_pj7, UARTD, SPI4, GMI, GMI_ALT, GMI_ALT, 0x3230, N, N), PINGROUP(gmi_adv_n_pk0, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31cc, N, N), PINGROUP(gmi_clk_pk1, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31d0, N, N), PINGROUP(gmi_cs4_n_pk2, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31e4, N, N), PINGROUP(gmi_cs2_n_pk3, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31dc, N, N), PINGROUP(gmi_cs3_n_pk4, RSVD1, NAND, GMI, GMI_ALT, RSVD1, 0x31e0, N, N), PINGROUP(spdif_out_pk5, SPDIF, RSVD2, I2C1, SDMMC2, RSVD2, 0x3354, N, N), PINGROUP(spdif_in_pk6, SPDIF, HDA, I2C1, SDMMC2, SDMMC2, 0x3350, N, N), PINGROUP(gmi_a19_pk7, UARTD, SPI4, GMI, RSVD4, RSVD4, 0x323c, N, N), PINGROUP(vi_d2_pl0, DDR, SDMMC2, VI, RSVD4, RSVD4, 0x312c, N, Y), PINGROUP(vi_d3_pl1, DDR, SDMMC2, VI, RSVD4, RSVD4, 0x3130, N, Y), PINGROUP(vi_d4_pl2, DDR, SDMMC2, VI, RSVD4, RSVD4, 0x3134, N, Y), PINGROUP(vi_d5_pl3, DDR, SDMMC2, VI, RSVD4, RSVD4, 0x3138, N, Y), PINGROUP(vi_d6_pl4, DDR, SDMMC2, VI, RSVD4, RSVD4, 0x313c, N, Y), PINGROUP(vi_d7_pl5, DDR, SDMMC2, VI, RSVD4, RSVD4, 0x3140, N, Y), PINGROUP(vi_d8_pl6, DDR, SDMMC2, VI, RSVD4, RSVD4, 0x3144, N, Y), PINGROUP(vi_d9_pl7, DDR, SDMMC2, VI, RSVD4, RSVD4, 0x3148, N, Y), PINGROUP(lcd_d16_pm0, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30e4, N, N), PINGROUP(lcd_d17_pm1, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30e8, N, N), PINGROUP(lcd_d18_pm2, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30ec, N, N), PINGROUP(lcd_d19_pm3, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30f0, N, N), PINGROUP(lcd_d20_pm4, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30f4, N, N), PINGROUP(lcd_d21_pm5, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30f8, N, N), PINGROUP(lcd_d22_pm6, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x30fc, N, N), PINGROUP(lcd_d23_pm7, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x3100, N, N), PINGROUP(dap1_fs_pn0, I2S0, HDA, GMI, SDMMC2, SDMMC2, 0x3338, N, N), PINGROUP(dap1_din_pn1, I2S0, HDA, GMI, SDMMC2, SDMMC2, 0x333c, N, N), PINGROUP(dap1_dout_pn2, I2S0, HDA, GMI, SDMMC2, SDMMC2, 0x3340, N, N), PINGROUP(dap1_sclk_pn3, I2S0, HDA, GMI, SDMMC2, SDMMC2, 0x3344, N, N), PINGROUP(lcd_cs0_n_pn4, DISPLAYA, DISPLAYB, SPI5, RSVD4, RSVD4, 0x3084, N, N), PINGROUP(lcd_sdout_pn5, DISPLAYA, DISPLAYB, SPI5, HDCP, HDCP, 0x307c, N, N), PINGROUP(lcd_dc0_pn6, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x3088, N, N), PINGROUP(hdmi_int_pn7, HDMI, RSVD2, RSVD3, RSVD4, RSVD4, 0x3110, N, N), PINGROUP(ulpi_data7_po0, SPI2, HSI, UARTA, ULPI, ULPI, 0x301c, N, N), PINGROUP(ulpi_data0_po1, SPI3, HSI, UARTA, ULPI, ULPI, 0x3000, N, N), PINGROUP(ulpi_data1_po2, SPI3, HSI, UARTA, ULPI, ULPI, 0x3004, N, N), PINGROUP(ulpi_data2_po3, SPI3, HSI, UARTA, ULPI, ULPI, 0x3008, N, N), PINGROUP(ulpi_data3_po4, SPI3, HSI, UARTA, ULPI, ULPI, 0x300c, N, N), PINGROUP(ulpi_data4_po5, SPI2, HSI, UARTA, ULPI, ULPI, 0x3010, N, N), PINGROUP(ulpi_data5_po6, SPI2, HSI, UARTA, ULPI, ULPI, 0x3014, N, N), PINGROUP(ulpi_data6_po7, SPI2, HSI, UARTA, ULPI, ULPI, 0x3018, N, N), PINGROUP(dap3_fs_pp0, I2S2, RSVD2, DISPLAYA, DISPLAYB, RSVD2, 0x3030, N, N), PINGROUP(dap3_din_pp1, I2S2, RSVD2, DISPLAYA, DISPLAYB, RSVD2, 0x3034, N, N), PINGROUP(dap3_dout_pp2, I2S2, RSVD2, DISPLAYA, DISPLAYB, RSVD2, 0x3038, N, N), PINGROUP(dap3_sclk_pp3, I2S2, RSVD2, DISPLAYA, DISPLAYB, RSVD2, 0x303c, N, N), PINGROUP(dap4_fs_pp4, I2S3, RSVD2, GMI, RSVD4, RSVD4, 0x31a8, N, N), PINGROUP(dap4_din_pp5, I2S3, RSVD2, GMI, RSVD4, RSVD4, 0x31ac, N, N), PINGROUP(dap4_dout_pp6, I2S3, RSVD2, GMI, RSVD4, RSVD4, 0x31b0, N, N), PINGROUP(dap4_sclk_pp7, I2S3, RSVD2, GMI, RSVD4, RSVD4, 0x31b4, N, N), PINGROUP(kb_col0_pq0, KBC, NAND, TRACE, TEST, TEST, 0x32fc, N, N), PINGROUP(kb_col1_pq1, KBC, NAND, TRACE, TEST, TEST, 0x3300, N, N), PINGROUP(kb_col2_pq2, KBC, NAND, TRACE, RSVD4, RSVD4, 0x3304, N, N), PINGROUP(kb_col3_pq3, KBC, NAND, TRACE, RSVD4, RSVD4, 0x3308, N, N), PINGROUP(kb_col4_pq4, KBC, NAND, TRACE, RSVD4, RSVD4, 0x330c, N, N), PINGROUP(kb_col5_pq5, KBC, NAND, TRACE, RSVD4, RSVD4, 0x3310, N, N), PINGROUP(kb_col6_pq6, KBC, NAND, TRACE, MIO, MIO, 0x3314, N, N), PINGROUP(kb_col7_pq7, KBC, NAND, TRACE, MIO, MIO, 0x3318, N, N), PINGROUP(kb_row0_pr0, KBC, NAND, RSVD3, RSVD4, RSVD4, 0x32bc, N, N), PINGROUP(kb_row1_pr1, KBC, NAND, RSVD3, RSVD4, RSVD4, 0x32c0, N, N), PINGROUP(kb_row2_pr2, KBC, NAND, RSVD3, RSVD4, RSVD4, 0x32c4, N, N), PINGROUP(kb_row3_pr3, KBC, NAND, RSVD3, INVALID, RSVD3, 0x32c8, N, N), PINGROUP(kb_row4_pr4, KBC, NAND, TRACE, RSVD4, RSVD4, 0x32cc, N, N), PINGROUP(kb_row5_pr5, KBC, NAND, TRACE, OWR, OWR, 0x32d0, N, N), PINGROUP(kb_row6_pr6, KBC, NAND, SDMMC2, MIO, MIO, 0x32d4, N, N), PINGROUP(kb_row7_pr7, KBC, NAND, SDMMC2, MIO, MIO, 0x32d8, N, N), PINGROUP(kb_row8_ps0, KBC, NAND, SDMMC2, MIO, MIO, 0x32dc, N, N), PINGROUP(kb_row9_ps1, KBC, NAND, SDMMC2, MIO, MIO, 0x32e0, N, N), PINGROUP(kb_row10_ps2, KBC, NAND, SDMMC2, MIO, MIO, 0x32e4, N, N), PINGROUP(kb_row11_ps3, KBC, NAND, SDMMC2, MIO, MIO, 0x32e8, N, N), PINGROUP(kb_row12_ps4, KBC, NAND, SDMMC2, MIO, MIO, 0x32ec, N, N), PINGROUP(kb_row13_ps5, KBC, NAND, SDMMC2, MIO, MIO, 0x32f0, N, N), PINGROUP(kb_row14_ps6, KBC, NAND, SDMMC2, MIO, MIO, 0x32f4, N, N), PINGROUP(kb_row15_ps7, KBC, NAND, SDMMC2, MIO, MIO, 0x32f8, N, N), PINGROUP(vi_pclk_pt0, RSVD1, SDMMC2, VI, RSVD4, RSVD4, 0x3154, N, Y), PINGROUP(vi_mclk_pt1, VI, VI_ALT1, VI_ALT2, VI_ALT3, VI_ALT3, 0x3158, N, Y), PINGROUP(vi_d10_pt2, DDR, RSVD2, VI, RSVD4, RSVD4, 0x314c, N, Y), PINGROUP(vi_d11_pt3, DDR, RSVD2, VI, RSVD4, RSVD4, 0x3150, N, Y), PINGROUP(vi_d0_pt4, DDR, RSVD2, VI, RSVD4, RSVD4, 0x3124, N, Y), PINGROUP(gen2_i2c_scl_pt5, I2C2, HDCP, GMI, RSVD4, RSVD4, 0x3250, Y, N), PINGROUP(gen2_i2c_sda_pt6, I2C2, HDCP, GMI, RSVD4, RSVD4, 0x3254, Y, N), PINGROUP(sdmmc4_cmd_pt7, I2C3, NAND, GMI, SDMMC4, SDMMC4, 0x325c, N, Y), PINGROUP(pu0, OWR, UARTA, GMI, RSVD4, RSVD4, 0x3184, N, N), PINGROUP(pu1, RSVD1, UARTA, GMI, RSVD4, RSVD4, 0x3188, N, N), PINGROUP(pu2, RSVD1, UARTA, GMI, RSVD4, RSVD4, 0x318c, N, N), PINGROUP(pu3, PWM0, UARTA, GMI, RSVD4, RSVD4, 0x3190, N, N), PINGROUP(pu4, PWM1, UARTA, GMI, RSVD4, RSVD4, 0x3194, N, N), PINGROUP(pu5, PWM2, UARTA, GMI, RSVD4, RSVD4, 0x3198, N, N), PINGROUP(pu6, PWM3, UARTA, GMI, RSVD4, RSVD4, 0x319c, N, N), PINGROUP(jtag_rtck_pu7, RTCK, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b0, N, N), PINGROUP(pv0, RSVD1, RSVD2, RSVD3, RSVD4, RSVD4, 0x3040, N, N), PINGROUP(pv1, RSVD1, RSVD2, RSVD3, RSVD4, RSVD4, 0x3044, N, N), PINGROUP(pv2, OWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x3060, N, N), PINGROUP(pv3, CLK_12M_OUT, RSVD2, RSVD3, RSVD4, RSVD4, 0x3064, N, N), PINGROUP(ddc_scl_pv4, I2C4, RSVD2, RSVD3, RSVD4, RSVD4, 0x3114, N, N), PINGROUP(ddc_sda_pv5, I2C4, RSVD2, RSVD3, RSVD4, RSVD4, 0x3118, N, N), PINGROUP(crt_hsync_pv6, CRT, RSVD2, RSVD3, RSVD4, RSVD4, 0x311c, N, N), PINGROUP(crt_vsync_pv7, CRT, RSVD2, RSVD3, RSVD4, RSVD4, 0x3120, N, N), PINGROUP(lcd_cs1_n_pw0, DISPLAYA, DISPLAYB, SPI5, RSVD4, RSVD4, 0x3104, N, N), PINGROUP(lcd_m1_pw1, DISPLAYA, DISPLAYB, RSVD3, RSVD4, RSVD4, 0x3108, N, N), PINGROUP(spi2_cs1_n_pw2, SPI3, SPI2, SPI2_ALT, I2C1, I2C1, 0x3388, N, N), PINGROUP(spi2_cs2_n_pw3, SPI3, SPI2, SPI2_ALT, I2C1, I2C1, 0x338c, N, N), PINGROUP(clk1_out_pw4, EXTPERIPH1, RSVD2, RSVD3, RSVD4, RSVD4, 0x334c, N, N), PINGROUP(clk2_out_pw5, EXTPERIPH2, RSVD2, RSVD3, RSVD4, RSVD4, 0x3068, N, N), PINGROUP(uart3_txd_pw6, UARTC, RSVD2, GMI, RSVD4, RSVD4, 0x3174, N, N), PINGROUP(uart3_rxd_pw7, UARTC, RSVD2, GMI, RSVD4, RSVD4, 0x3178, N, N), PINGROUP(spi2_mosi_px0, SPI6, SPI2, SPI3, GMI, GMI, 0x3368, N, N), PINGROUP(spi2_miso_px1, SPI6, SPI2, SPI3, GMI, GMI, 0x336c, N, N), PINGROUP(spi2_sck_px2, SPI6, SPI2, SPI3, GMI, GMI, 0x3374, N, N), PINGROUP(spi2_cs0_n_px3, SPI6, SPI2, SPI3, GMI, GMI, 0x3370, N, N), PINGROUP(spi1_mosi_px4, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x3378, N, N), PINGROUP(spi1_sck_px5, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x337c, N, N), PINGROUP(spi1_cs0_n_px6, SPI2, SPI1, SPI2_ALT, GMI, GMI, 0x3380, N, N), PINGROUP(spi1_miso_px7, SPI3, SPI1, SPI2_ALT, RSVD4, RSVD4, 0x3384, N, N), PINGROUP(ulpi_clk_py0, SPI1, RSVD2, UARTD, ULPI, RSVD2, 0x3020, N, N), PINGROUP(ulpi_dir_py1, SPI1, RSVD2, UARTD, ULPI, RSVD2, 0x3024, N, N), PINGROUP(ulpi_nxt_py2, SPI1, RSVD2, UARTD, ULPI, RSVD2, 0x3028, N, N), PINGROUP(ulpi_stp_py3, SPI1, RSVD2, UARTD, ULPI, RSVD2, 0x302c, N, N), PINGROUP(sdmmc1_dat3_py4, SDMMC1, RSVD2, UARTE, UARTA, RSVD2, 0x3050, N, N), PINGROUP(sdmmc1_dat2_py5, SDMMC1, RSVD2, UARTE, UARTA, RSVD2, 0x3054, N, N), PINGROUP(sdmmc1_dat1_py6, SDMMC1, RSVD2, UARTE, UARTA, RSVD2, 0x3058, N, N), PINGROUP(sdmmc1_dat0_py7, SDMMC1, RSVD2, UARTE, UARTA, RSVD2, 0x305c, N, N), PINGROUP(sdmmc1_clk_pz0, SDMMC1, RSVD2, RSVD3, UARTA, RSVD3, 0x3048, N, N), PINGROUP(sdmmc1_cmd_pz1, SDMMC1, RSVD2, RSVD3, UARTA, RSVD3, 0x304c, N, N), PINGROUP(lcd_sdin_pz2, DISPLAYA, DISPLAYB, SPI5, RSVD4, RSVD4, 0x3078, N, N), PINGROUP(lcd_wr_n_pz3, DISPLAYA, DISPLAYB, SPI5, HDCP, HDCP, 0x3080, N, N), PINGROUP(lcd_sck_pz4, DISPLAYA, DISPLAYB, SPI5, HDCP, HDCP, 0x308c, N, N), PINGROUP(sys_clk_req_pz5, SYSCLK, RSVD2, RSVD3, RSVD4, RSVD4, 0x3320, N, N), PINGROUP(pwr_i2c_scl_pz6, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b4, Y, N), PINGROUP(pwr_i2c_sda_pz7, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b8, Y, N), PINGROUP(sdmmc4_dat0_paa0, UARTE, SPI3, GMI, SDMMC4, SDMMC4, 0x3260, N, Y), PINGROUP(sdmmc4_dat1_paa1, UARTE, SPI3, GMI, SDMMC4, SDMMC4, 0x3264, N, Y), PINGROUP(sdmmc4_dat2_paa2, UARTE, SPI3, GMI, SDMMC4, SDMMC4, 0x3268, N, Y), PINGROUP(sdmmc4_dat3_paa3, UARTE, SPI3, GMI, SDMMC4, SDMMC4, 0x326c, N, Y), PINGROUP(sdmmc4_dat4_paa4, I2C3, I2S4, GMI, SDMMC4, SDMMC4, 0x3270, N, Y), PINGROUP(sdmmc4_dat5_paa5, VGP3, I2S4, GMI, SDMMC4, SDMMC4, 0x3274, N, Y), PINGROUP(sdmmc4_dat6_paa6, VGP4, I2S4, GMI, SDMMC4, SDMMC4, 0x3278, N, Y), PINGROUP(sdmmc4_dat7_paa7, VGP5, I2S4, GMI, SDMMC4, SDMMC4, 0x327c, N, Y), PINGROUP(pbb0, I2S4, RSVD2, RSVD3, SDMMC4, RSVD3, 0x328c, N, N), PINGROUP(cam_i2c_scl_pbb1, VGP1, I2C3, RSVD3, SDMMC4, RSVD3, 0x3290, Y, N), PINGROUP(cam_i2c_sda_pbb2, VGP2, I2C3, RSVD3, SDMMC4, RSVD3, 0x3294, Y, N), PINGROUP(pbb3, VGP3, DISPLAYA, DISPLAYB, SDMMC4, SDMMC4, 0x3298, N, N), PINGROUP(pbb4, VGP4, DISPLAYA, DISPLAYB, SDMMC4, SDMMC4, 0x329c, N, N), PINGROUP(pbb5, VGP5, DISPLAYA, DISPLAYB, SDMMC4, SDMMC4, 0x32a0, N, N), PINGROUP(pbb6, VGP6, DISPLAYA, DISPLAYB, SDMMC4, SDMMC4, 0x32a4, N, N), PINGROUP(pbb7, I2S4, RSVD2, RSVD3, SDMMC4, RSVD3, 0x32a8, N, N), PINGROUP(cam_mclk_pcc0, VI, VI_ALT1, VI_ALT3, SDMMC4, SDMMC4, 0x3284, N, N), PINGROUP(pcc1, I2S4, RSVD2, RSVD3, SDMMC4, RSVD3, 0x3288, N, N), PINGROUP(pcc2, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x32ac, N, N), PINGROUP(sdmmc4_rst_n_pcc3, VGP6, RSVD2, RSVD3, SDMMC4, RSVD3, 0x3280, N, Y), PINGROUP(sdmmc4_clk_pcc4, INVALID, NAND, GMI, SDMMC4, SDMMC4, 0x3258, N, Y), PINGROUP(clk2_req_pcc5, DAP, RSVD2, RSVD3, RSVD4, RSVD4, 0x306c, N, N), PINGROUP(pex_l2_rst_n_pcc6, PCIE, HDA, RSVD3, RSVD4, RSVD4, 0x33d8, N, N), PINGROUP(pex_l2_clkreq_n_pcc7, PCIE, HDA, RSVD3, RSVD4, RSVD4, 0x33dc, N, N), PINGROUP(pex_l0_prsnt_n_pdd0, PCIE, HDA, RSVD3, RSVD4, RSVD4, 0x33b8, N, N), PINGROUP(pex_l0_rst_n_pdd1, PCIE, HDA, RSVD3, RSVD4, RSVD4, 0x33bc, N, N), PINGROUP(pex_l0_clkreq_n_pdd2, PCIE, HDA, RSVD3, RSVD4, RSVD4, 0x33c0, N, N), PINGROUP(pex_wake_n_pdd3, PCIE, HDA, RSVD3, RSVD4, RSVD4, 0x33c4, N, N), PINGROUP(pex_l1_prsnt_n_pdd4, PCIE, HDA, RSVD3, RSVD4, RSVD4, 0x33c8, N, N), PINGROUP(pex_l1_rst_n_pdd5, PCIE, HDA, RSVD3, RSVD4, RSVD4, 0x33cc, N, N), PINGROUP(pex_l1_clkreq_n_pdd6, PCIE, HDA, RSVD3, RSVD4, RSVD4, 0x33d0, N, N), PINGROUP(pex_l2_prsnt_n_pdd7, PCIE, HDA, RSVD3, RSVD4, RSVD4, 0x33d4, N, N), PINGROUP(clk3_out_pee0, EXTPERIPH3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31b8, N, N), PINGROUP(clk3_req_pee1, DEV3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31bc, N, N), PINGROUP(clk1_req_pee2, DAP, HDA, RSVD3, RSVD4, RSVD4, 0x3348, N, N), PINGROUP(hdmi_cec_pee3, CEC, RSVD2, RSVD3, RSVD4, RSVD4, 0x33e0, Y, N), PINGROUP(clk_32k_in, CLK_32K_IN, RSVD2, RSVD3, RSVD4, RSVD4, 0x3330, N, N), PINGROUP(core_pwr_req, CORE_PWR_REQ, RSVD2, RSVD3, RSVD4, RSVD4, 0x3324, N, N), PINGROUP(cpu_pwr_req, CPU_PWR_REQ, RSVD2, RSVD3, RSVD4, RSVD4, 0x3328, N, N), PINGROUP(owr, OWR, CEC, RSVD3, RSVD4, RSVD4, 0x3334, N, N), PINGROUP(pwr_int_n, PWR_INT_N, RSVD2, RSVD3, RSVD4, RSVD4, 0x332c, N, N), /* pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w */ DRV_PINGROUP(ao1, 0x868, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(ao2, 0x86c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(at1, 0x870, 2, 3, 4, 14, 5, 19, 5, 24, 2, 28, 2), DRV_PINGROUP(at2, 0x874, 2, 3, 4, 14, 5, 19, 5, 24, 2, 28, 2), DRV_PINGROUP(at3, 0x878, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2), DRV_PINGROUP(at4, 0x87c, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2), DRV_PINGROUP(at5, 0x880, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2), DRV_PINGROUP(cdev1, 0x884, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(cdev2, 0x888, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(cec, 0x938, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(crt, 0x8f8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(csus, 0x88c, -1, -1, -1, 12, 5, 19, 5, 24, 4, 28, 4), DRV_PINGROUP(dap1, 0x890, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(dap2, 0x894, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(dap3, 0x898, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(dap4, 0x89c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(dbg, 0x8a0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(ddc, 0x8fc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(dev3, 0x92c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(gma, 0x900, -1, -1, -1, 14, 5, 19, 5, 24, 4, 28, 4), DRV_PINGROUP(gmb, 0x904, -1, -1, -1, 14, 5, 19, 5, 24, 4, 28, 4), DRV_PINGROUP(gmc, 0x908, -1, -1, -1, 14, 5, 19, 5, 24, 4, 28, 4), DRV_PINGROUP(gmd, 0x90c, -1, -1, -1, 14, 5, 19, 5, 24, 4, 28, 4), DRV_PINGROUP(gme, 0x910, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2), DRV_PINGROUP(gmf, 0x914, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2), DRV_PINGROUP(gmg, 0x918, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2), DRV_PINGROUP(gmh, 0x91c, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2), DRV_PINGROUP(gpv, 0x928, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(lcd1, 0x8a4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(lcd2, 0x8a8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(owr, 0x920, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(sdio1, 0x8ec, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2), DRV_PINGROUP(sdio2, 0x8ac, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2), DRV_PINGROUP(sdio3, 0x8b0, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2), DRV_PINGROUP(spi, 0x8b4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(uaa, 0x8b8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(uab, 0x8bc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(uart2, 0x8c0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(uart3, 0x8c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(uda, 0x924, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2), DRV_PINGROUP(vi1, 0x8c8, -1, -1, -1, 14, 5, 19, 5, 24, 4, 28, 4), }; static const struct tegra_pinctrl_soc_data tegra30_pinctrl = { .ngpios = NUM_GPIOS, .pins = tegra30_pins, .npins = ARRAY_SIZE(tegra30_pins), .functions = tegra30_functions, .nfunctions = ARRAY_SIZE(tegra30_functions), .groups = tegra30_groups, .ngroups = ARRAY_SIZE(tegra30_groups), }; static int tegra30_pinctrl_probe(struct platform_device *pdev) { return tegra_pinctrl_probe(pdev, &tegra30_pinctrl); } static struct of_device_id tegra30_pinctrl_of_match[] = { { .compatible = "nvidia,tegra30-pinmux", }, { }, }; static struct platform_driver tegra30_pinctrl_driver = { .driver = { .name = "tegra30-pinctrl", .owner = THIS_MODULE, .of_match_table = tegra30_pinctrl_of_match, }, .probe = tegra30_pinctrl_probe, .remove = tegra_pinctrl_remove, }; static int __init tegra30_pinctrl_init(void) { return platform_driver_register(&tegra30_pinctrl_driver); } arch_initcall(tegra30_pinctrl_init); static void __exit tegra30_pinctrl_exit(void) { platform_driver_unregister(&tegra30_pinctrl_driver); } module_exit(tegra30_pinctrl_exit); MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); MODULE_DESCRIPTION("NVIDIA Tegra30 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, tegra30_pinctrl_of_match);
gpl-2.0
xlfjn/kernel_msm-3.10
drivers/media/platform/exynos4-is/fimc-is-errno.c
2786
10018
/* * Samsung Exynos4 SoC series FIMC-IS slave interface driver * * Error log interface functions * * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd. * * Authors: Younghwan Joo <yhwan.joo@samsung.com> * Sylwester Nawrocki <s.nawrocki@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "fimc-is-errno.h" const char * const fimc_is_param_strerr(unsigned int error) { switch (error) { case ERROR_COMMON_CMD: return "ERROR_COMMON_CMD: Invalid Command"; case ERROR_COMMON_PARAMETER: return "ERROR_COMMON_PARAMETER: Invalid Parameter"; case ERROR_COMMON_SETFILE_LOAD: return "ERROR_COMMON_SETFILE_LOAD: Illegal Setfile Loading"; case ERROR_COMMON_SETFILE_ADJUST: return "ERROR_COMMON_SETFILE_ADJUST: Setfile isn't adjusted"; case ERROR_COMMON_SETFILE_INDEX: return "ERROR_COMMON_SETFILE_INDEX: Invalid setfile index"; case ERROR_COMMON_INPUT_PATH: return "ERROR_COMMON_INPUT_PATH: Input path can be changed in ready state"; case ERROR_COMMON_INPUT_INIT: return "ERROR_COMMON_INPUT_INIT: IP can not start if input path is not set"; case ERROR_COMMON_OUTPUT_PATH: return "ERROR_COMMON_OUTPUT_PATH: Output path can be changed in ready state (stop)"; case ERROR_COMMON_OUTPUT_INIT: return "ERROR_COMMON_OUTPUT_INIT: IP can not start if output path is not set"; case ERROR_CONTROL_BYPASS: return "ERROR_CONTROL_BYPASS"; case ERROR_OTF_INPUT_FORMAT: return "ERROR_OTF_INPUT_FORMAT: Invalid format (DRC: YUV444, FD: YUV444, 422, 420)"; case ERROR_OTF_INPUT_WIDTH: return "ERROR_OTF_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)"; case ERROR_OTF_INPUT_HEIGHT: return "ERROR_OTF_INPUT_HEIGHT: Invalid bit-width (DRC: 8~12bits, FD: 8bit)"; case ERROR_OTF_INPUT_BIT_WIDTH: return "ERROR_OTF_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)"; case ERROR_DMA_INPUT_WIDTH: return "ERROR_DMA_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)"; case ERROR_DMA_INPUT_HEIGHT: return "ERROR_DMA_INPUT_HEIGHT: Invalid height (DRC: 64~8192, FD: 16~8190)"; case ERROR_DMA_INPUT_FORMAT: return "ERROR_DMA_INPUT_FORMAT: Invalid format (DRC: YUV444 or YUV422, FD: YUV444,422,420)"; case ERROR_DMA_INPUT_BIT_WIDTH: return "ERROR_DMA_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)"; case ERROR_DMA_INPUT_ORDER: return "ERROR_DMA_INPUT_ORDER: Invalid order(DRC: YYCbCr,YCbYCr,FD:NO,YYCbCr,YCbYCr,CbCr,CrCb)"; case ERROR_DMA_INPUT_PLANE: return "ERROR_DMA_INPUT_PLANE: Invalid palne (DRC: 3, FD: 1, 2, 3)"; case ERROR_OTF_OUTPUT_WIDTH: return "ERROR_OTF_OUTPUT_WIDTH: Invalid width (DRC: 128~8192)"; case ERROR_OTF_OUTPUT_HEIGHT: return "ERROR_OTF_OUTPUT_HEIGHT: Invalid height (DRC: 64~8192)"; case ERROR_OTF_OUTPUT_FORMAT: return "ERROR_OTF_OUTPUT_FORMAT: Invalid format (DRC: YUV444)"; case ERROR_OTF_OUTPUT_BIT_WIDTH: return "ERROR_OTF_OUTPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)"; case ERROR_DMA_OUTPUT_WIDTH: return "ERROR_DMA_OUTPUT_WIDTH"; case ERROR_DMA_OUTPUT_HEIGHT: return "ERROR_DMA_OUTPUT_HEIGHT"; case ERROR_DMA_OUTPUT_FORMAT: return "ERROR_DMA_OUTPUT_FORMAT"; case ERROR_DMA_OUTPUT_BIT_WIDTH: return "ERROR_DMA_OUTPUT_BIT_WIDTH"; case ERROR_DMA_OUTPUT_PLANE: return "ERROR_DMA_OUTPUT_PLANE"; case ERROR_DMA_OUTPUT_ORDER: return "ERROR_DMA_OUTPUT_ORDER"; /* Sensor Error(100~199) */ case ERROR_SENSOR_I2C_FAIL: return "ERROR_SENSOR_I2C_FAIL"; case ERROR_SENSOR_INVALID_FRAMERATE: return "ERROR_SENSOR_INVALID_FRAMERATE"; case ERROR_SENSOR_INVALID_EXPOSURETIME: return "ERROR_SENSOR_INVALID_EXPOSURETIME"; case ERROR_SENSOR_INVALID_SIZE: return "ERROR_SENSOR_INVALID_SIZE"; case ERROR_SENSOR_INVALID_SETTING: return "ERROR_SENSOR_INVALID_SETTING"; case ERROR_SENSOR_ACTURATOR_INIT_FAIL: return "ERROR_SENSOR_ACTURATOR_INIT_FAIL"; case ERROR_SENSOR_INVALID_AF_POS: return "ERROR_SENSOR_INVALID_AF_POS"; case ERROR_SENSOR_UNSUPPORT_FUNC: return "ERROR_SENSOR_UNSUPPORT_FUNC"; case ERROR_SENSOR_UNSUPPORT_PERI: return "ERROR_SENSOR_UNSUPPORT_PERI"; case ERROR_SENSOR_UNSUPPORT_AF: return "ERROR_SENSOR_UNSUPPORT_AF"; /* ISP Error (200~299) */ case ERROR_ISP_AF_BUSY: return "ERROR_ISP_AF_BUSY"; case ERROR_ISP_AF_INVALID_COMMAND: return "ERROR_ISP_AF_INVALID_COMMAND"; case ERROR_ISP_AF_INVALID_MODE: return "ERROR_ISP_AF_INVALID_MODE"; /* DRC Error (300~399) */ /* FD Error (400~499) */ case ERROR_FD_CONFIG_MAX_NUMBER_STATE: return "ERROR_FD_CONFIG_MAX_NUMBER_STATE"; case ERROR_FD_CONFIG_MAX_NUMBER_INVALID: return "ERROR_FD_CONFIG_MAX_NUMBER_INVALID"; case ERROR_FD_CONFIG_YAW_ANGLE_STATE: return "ERROR_FD_CONFIG_YAW_ANGLE_STATE"; case ERROR_FD_CONFIG_YAW_ANGLE_INVALID: return "ERROR_FD_CONFIG_YAW_ANGLE_INVALID\n"; case ERROR_FD_CONFIG_ROLL_ANGLE_STATE: return "ERROR_FD_CONFIG_ROLL_ANGLE_STATE"; case ERROR_FD_CONFIG_ROLL_ANGLE_INVALID: return "ERROR_FD_CONFIG_ROLL_ANGLE_INVALID"; case ERROR_FD_CONFIG_SMILE_MODE_INVALID: return "ERROR_FD_CONFIG_SMILE_MODE_INVALID"; case ERROR_FD_CONFIG_BLINK_MODE_INVALID: return "ERROR_FD_CONFIG_BLINK_MODE_INVALID"; case ERROR_FD_CONFIG_EYES_DETECT_INVALID: return "ERROR_FD_CONFIG_EYES_DETECT_INVALID"; case ERROR_FD_CONFIG_MOUTH_DETECT_INVALID: return "ERROR_FD_CONFIG_MOUTH_DETECT_INVALID"; case ERROR_FD_CONFIG_ORIENTATION_STATE: return "ERROR_FD_CONFIG_ORIENTATION_STATE"; case ERROR_FD_CONFIG_ORIENTATION_INVALID: return "ERROR_FD_CONFIG_ORIENTATION_INVALID"; case ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID: return "ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID"; case ERROR_FD_RESULT: return "ERROR_FD_RESULT"; case ERROR_FD_MODE: return "ERROR_FD_MODE"; default: return "Unknown"; } } const char * const fimc_is_strerr(unsigned int error) { error &= ~IS_ERROR_TIME_OUT_FLAG; switch (error) { /* General */ case IS_ERROR_INVALID_COMMAND: return "IS_ERROR_INVALID_COMMAND"; case IS_ERROR_REQUEST_FAIL: return "IS_ERROR_REQUEST_FAIL"; case IS_ERROR_INVALID_SCENARIO: return "IS_ERROR_INVALID_SCENARIO"; case IS_ERROR_INVALID_SENSORID: return "IS_ERROR_INVALID_SENSORID"; case IS_ERROR_INVALID_MODE_CHANGE: return "IS_ERROR_INVALID_MODE_CHANGE"; case IS_ERROR_INVALID_MAGIC_NUMBER: return "IS_ERROR_INVALID_MAGIC_NUMBER"; case IS_ERROR_INVALID_SETFILE_HDR: return "IS_ERROR_INVALID_SETFILE_HDR"; case IS_ERROR_BUSY: return "IS_ERROR_BUSY"; case IS_ERROR_SET_PARAMETER: return "IS_ERROR_SET_PARAMETER"; case IS_ERROR_INVALID_PATH: return "IS_ERROR_INVALID_PATH"; case IS_ERROR_OPEN_SENSOR_FAIL: return "IS_ERROR_OPEN_SENSOR_FAIL"; case IS_ERROR_ENTRY_MSG_THREAD_DOWN: return "IS_ERROR_ENTRY_MSG_THREAD_DOWN"; case IS_ERROR_ISP_FRAME_END_NOT_DONE: return "IS_ERROR_ISP_FRAME_END_NOT_DONE"; case IS_ERROR_DRC_FRAME_END_NOT_DONE: return "IS_ERROR_DRC_FRAME_END_NOT_DONE"; case IS_ERROR_SCALERC_FRAME_END_NOT_DONE: return "IS_ERROR_SCALERC_FRAME_END_NOT_DONE"; case IS_ERROR_ODC_FRAME_END_NOT_DONE: return "IS_ERROR_ODC_FRAME_END_NOT_DONE"; case IS_ERROR_DIS_FRAME_END_NOT_DONE: return "IS_ERROR_DIS_FRAME_END_NOT_DONE"; case IS_ERROR_TDNR_FRAME_END_NOT_DONE: return "IS_ERROR_TDNR_FRAME_END_NOT_DONE"; case IS_ERROR_SCALERP_FRAME_END_NOT_DONE: return "IS_ERROR_SCALERP_FRAME_END_NOT_DONE"; case IS_ERROR_WAIT_STREAM_OFF_NOT_DONE: return "IS_ERROR_WAIT_STREAM_OFF_NOT_DONE"; case IS_ERROR_NO_MSG_IS_RECEIVED: return "IS_ERROR_NO_MSG_IS_RECEIVED"; case IS_ERROR_SENSOR_MSG_FAIL: return "IS_ERROR_SENSOR_MSG_FAIL"; case IS_ERROR_ISP_MSG_FAIL: return "IS_ERROR_ISP_MSG_FAIL"; case IS_ERROR_DRC_MSG_FAIL: return "IS_ERROR_DRC_MSG_FAIL"; case IS_ERROR_LHFD_MSG_FAIL: return "IS_ERROR_LHFD_MSG_FAIL"; case IS_ERROR_UNKNOWN: return "IS_ERROR_UNKNOWN"; /* Sensor */ case IS_ERROR_SENSOR_PWRDN_FAIL: return "IS_ERROR_SENSOR_PWRDN_FAIL"; /* ISP */ case IS_ERROR_ISP_PWRDN_FAIL: return "IS_ERROR_ISP_PWRDN_FAIL"; case IS_ERROR_ISP_MULTIPLE_INPUT: return "IS_ERROR_ISP_MULTIPLE_INPUT"; case IS_ERROR_ISP_ABSENT_INPUT: return "IS_ERROR_ISP_ABSENT_INPUT"; case IS_ERROR_ISP_ABSENT_OUTPUT: return "IS_ERROR_ISP_ABSENT_OUTPUT"; case IS_ERROR_ISP_NONADJACENT_OUTPUT: return "IS_ERROR_ISP_NONADJACENT_OUTPUT"; case IS_ERROR_ISP_FORMAT_MISMATCH: return "IS_ERROR_ISP_FORMAT_MISMATCH"; case IS_ERROR_ISP_WIDTH_MISMATCH: return "IS_ERROR_ISP_WIDTH_MISMATCH"; case IS_ERROR_ISP_HEIGHT_MISMATCH: return "IS_ERROR_ISP_HEIGHT_MISMATCH"; case IS_ERROR_ISP_BITWIDTH_MISMATCH: return "IS_ERROR_ISP_BITWIDTH_MISMATCH"; case IS_ERROR_ISP_FRAME_END_TIME_OUT: return "IS_ERROR_ISP_FRAME_END_TIME_OUT"; /* DRC */ case IS_ERROR_DRC_PWRDN_FAIL: return "IS_ERROR_DRC_PWRDN_FAIL"; case IS_ERROR_DRC_MULTIPLE_INPUT: return "IS_ERROR_DRC_MULTIPLE_INPUT"; case IS_ERROR_DRC_ABSENT_INPUT: return "IS_ERROR_DRC_ABSENT_INPUT"; case IS_ERROR_DRC_NONADJACENT_INPUT: return "IS_ERROR_DRC_NONADJACENT_INPUT"; case IS_ERROR_DRC_ABSENT_OUTPUT: return "IS_ERROR_DRC_ABSENT_OUTPUT"; case IS_ERROR_DRC_NONADJACENT_OUTPUT: return "IS_ERROR_DRC_NONADJACENT_OUTPUT"; case IS_ERROR_DRC_FORMAT_MISMATCH: return "IS_ERROR_DRC_FORMAT_MISMATCH"; case IS_ERROR_DRC_WIDTH_MISMATCH: return "IS_ERROR_DRC_WIDTH_MISMATCH"; case IS_ERROR_DRC_HEIGHT_MISMATCH: return "IS_ERROR_DRC_HEIGHT_MISMATCH"; case IS_ERROR_DRC_BITWIDTH_MISMATCH: return "IS_ERROR_DRC_BITWIDTH_MISMATCH"; case IS_ERROR_DRC_FRAME_END_TIME_OUT: return "IS_ERROR_DRC_FRAME_END_TIME_OUT"; /* FD */ case IS_ERROR_FD_PWRDN_FAIL: return "IS_ERROR_FD_PWRDN_FAIL"; case IS_ERROR_FD_MULTIPLE_INPUT: return "IS_ERROR_FD_MULTIPLE_INPUT"; case IS_ERROR_FD_ABSENT_INPUT: return "IS_ERROR_FD_ABSENT_INPUT"; case IS_ERROR_FD_NONADJACENT_INPUT: return "IS_ERROR_FD_NONADJACENT_INPUT"; case IS_ERROR_LHFD_FRAME_END_TIME_OUT: return "IS_ERROR_LHFD_FRAME_END_TIME_OUT"; default: return "Unknown"; } }
gpl-2.0
viaembedded/springboard-kernel-bsp
fs/nls/nls_base.c
3554
15511
/* * linux/fs/nls/nls_base.c * * Native language support--charsets and unicode translations. * By Gordon Chaffee 1996, 1997 * * Unicode based case conversion 1999 by Wolfram Pienkoss * */ #include <linux/module.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/kmod.h> #include <linux/spinlock.h> #include <asm/byteorder.h> static struct nls_table default_table; static struct nls_table *tables = &default_table; static DEFINE_SPINLOCK(nls_lock); /* * Sample implementation from Unicode home page. * http://www.stonehand.com/unicode/standard/fss-utf.html */ struct utf8_table { int cmask; int cval; int shift; long lmask; long lval; }; static const struct utf8_table utf8_table[] = { {0x80, 0x00, 0*6, 0x7F, 0, /* 1 byte sequence */}, {0xE0, 0xC0, 1*6, 0x7FF, 0x80, /* 2 byte sequence */}, {0xF0, 0xE0, 2*6, 0xFFFF, 0x800, /* 3 byte sequence */}, {0xF8, 0xF0, 3*6, 0x1FFFFF, 0x10000, /* 4 byte sequence */}, {0xFC, 0xF8, 4*6, 0x3FFFFFF, 0x200000, /* 5 byte sequence */}, {0xFE, 0xFC, 5*6, 0x7FFFFFFF, 0x4000000, /* 6 byte sequence */}, {0, /* end of table */} }; #define UNICODE_MAX 0x0010ffff #define PLANE_SIZE 0x00010000 #define SURROGATE_MASK 0xfffff800 #define SURROGATE_PAIR 0x0000d800 #define SURROGATE_LOW 0x00000400 #define SURROGATE_BITS 0x000003ff int utf8_to_utf32(const u8 *s, int len, unicode_t *pu) { unsigned long l; int c0, c, nc; const struct utf8_table *t; nc = 0; c0 = *s; l = c0; for (t = utf8_table; t->cmask; t++) { nc++; if ((c0 & t->cmask) == t->cval) { l &= t->lmask; if (l < t->lval || l > UNICODE_MAX || (l & SURROGATE_MASK) == SURROGATE_PAIR) return -1; *pu = (unicode_t) l; return nc; } if (len <= nc) return -1; s++; c = (*s ^ 0x80) & 0xFF; if (c & 0xC0) return -1; l = (l << 6) | c; } return -1; } EXPORT_SYMBOL(utf8_to_utf32); int utf32_to_utf8(unicode_t u, u8 *s, int maxlen) { unsigned long l; int c, nc; const struct utf8_table *t; if (!s) return 0; l = u; if (l > UNICODE_MAX || (l & SURROGATE_MASK) == SURROGATE_PAIR) return -1; nc = 0; for (t = utf8_table; t->cmask && maxlen; t++, maxlen--) { nc++; if (l <= t->lmask) { c = t->shift; *s = (u8) (t->cval | (l >> c)); while (c > 0) { c -= 6; s++; *s = (u8) (0x80 | ((l >> c) & 0x3F)); } return nc; } } return -1; } EXPORT_SYMBOL(utf32_to_utf8); int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs) { u16 *op; int size; unicode_t u; op = pwcs; while (*s && len > 0) { if (*s & 0x80) { size = utf8_to_utf32(s, len, &u); if (size < 0) return -EINVAL; if (u >= PLANE_SIZE) { u -= PLANE_SIZE; *op++ = (wchar_t) (SURROGATE_PAIR | ((u >> 10) & SURROGATE_BITS)); *op++ = (wchar_t) (SURROGATE_PAIR | SURROGATE_LOW | (u & SURROGATE_BITS)); } else { *op++ = (wchar_t) u; } s += size; len -= size; } else { *op++ = *s++; len--; } } return op - pwcs; } EXPORT_SYMBOL(utf8s_to_utf16s); static inline unsigned long get_utf16(unsigned c, enum utf16_endian endian) { switch (endian) { default: return c; case UTF16_LITTLE_ENDIAN: return __le16_to_cpu(c); case UTF16_BIG_ENDIAN: return __be16_to_cpu(c); } } int utf16s_to_utf8s(const wchar_t *pwcs, int len, enum utf16_endian endian, u8 *s, int maxlen) { u8 *op; int size; unsigned long u, v; op = s; while (len > 0 && maxlen > 0) { u = get_utf16(*pwcs, endian); if (!u) break; pwcs++; len--; if (u > 0x7f) { if ((u & SURROGATE_MASK) == SURROGATE_PAIR) { if (u & SURROGATE_LOW) { /* Ignore character and move on */ continue; } if (len <= 0) break; v = get_utf16(*pwcs, endian); if ((v & SURROGATE_MASK) != SURROGATE_PAIR || !(v & SURROGATE_LOW)) { /* Ignore character and move on */ continue; } u = PLANE_SIZE + ((u & SURROGATE_BITS) << 10) + (v & SURROGATE_BITS); pwcs++; len--; } size = utf32_to_utf8(u, op, maxlen); if (size == -1) { /* Ignore character and move on */ } else { op += size; maxlen -= size; } } else { *op++ = (u8) u; maxlen--; } } return op - s; } EXPORT_SYMBOL(utf16s_to_utf8s); int register_nls(struct nls_table * nls) { struct nls_table ** tmp = &tables; if (nls->next) return -EBUSY; spin_lock(&nls_lock); while (*tmp) { if (nls == *tmp) { spin_unlock(&nls_lock); return -EBUSY; } tmp = &(*tmp)->next; } nls->next = tables; tables = nls; spin_unlock(&nls_lock); return 0; } int unregister_nls(struct nls_table * nls) { struct nls_table ** tmp = &tables; spin_lock(&nls_lock); while (*tmp) { if (nls == *tmp) { *tmp = nls->next; spin_unlock(&nls_lock); return 0; } tmp = &(*tmp)->next; } spin_unlock(&nls_lock); return -EINVAL; } static struct nls_table *find_nls(char *charset) { struct nls_table *nls; spin_lock(&nls_lock); for (nls = tables; nls; nls = nls->next) { if (!strcmp(nls->charset, charset)) break; if (nls->alias && !strcmp(nls->alias, charset)) break; } if (nls && !try_module_get(nls->owner)) nls = NULL; spin_unlock(&nls_lock); return nls; } struct nls_table *load_nls(char *charset) { return try_then_request_module(find_nls(charset), "nls_%s", charset); } void unload_nls(struct nls_table *nls) { if (nls) module_put(nls->owner); } static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f, /* 0x90*/ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f, /* 0xa0*/ 0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7, 0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af, /* 0xb0*/ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7, 0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf, /* 0xc0*/ 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7, 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf, /* 0xd0*/ 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7, 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df, /* 0xe0*/ 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7, 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef, /* 0xf0*/ 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7, 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char *const page_uni2charset[256] = { page00 }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table default_table = { .charset = "default", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; /* Returns a simple default translation table */ struct nls_table *load_nls_default(void) { struct nls_table *default_nls; default_nls = load_nls(CONFIG_NLS_DEFAULT); if (default_nls != NULL) return default_nls; else return &default_table; } EXPORT_SYMBOL(register_nls); EXPORT_SYMBOL(unregister_nls); EXPORT_SYMBOL(unload_nls); EXPORT_SYMBOL(load_nls); EXPORT_SYMBOL(load_nls_default); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
AriesVE-DevCon-TEAM/samsung-kernel-msm7x30
arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
5346
3222
/* * Support for MicroBlaze PVR (processor version register) * * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007 John Williams <john.williams@petalogix.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/string.h> #include <asm/pvr.h> #include <asm/cpuinfo.h> /* * Helper macro to map between fields in our struct cpuinfo, and * the PVR macros in pvr.h. */ #define CI(c, p) { ci->c = PVR_##p(pvr); } #if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE) #define err_printk(x) \ early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n"); #else #define err_printk(x) \ printk(KERN_INFO "ERROR: Microblaze " x "-different for PVR and DTS\n"); #endif void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) { struct pvr_s pvr; int temp; /* for saving temp value */ get_pvr(&pvr); CI(ver_code, VERSION); if (!ci->ver_code) { printk(KERN_ERR "ERROR: MB has broken PVR regs " "-> use DTS setting\n"); return; } temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) |\ PVR_USE_PCMP_INSTR(pvr) | PVR_USE_DIV(pvr); if (ci->use_instr != temp) err_printk("BARREL, MSR, PCMP or DIV"); ci->use_instr = temp; temp = PVR_USE_HW_MUL(pvr) | PVR_USE_MUL64(pvr); if (ci->use_mult != temp) err_printk("HW_MUL"); ci->use_mult = temp; temp = PVR_USE_FPU(pvr) | PVR_USE_FPU2(pvr); if (ci->use_fpu != temp) err_printk("HW_FPU"); ci->use_fpu = temp; ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) |\ PVR_UNALIGNED_EXCEPTION(pvr) |\ PVR_ILL_OPCODE_EXCEPTION(pvr) |\ PVR_IOPB_BUS_EXCEPTION(pvr) |\ PVR_DOPB_BUS_EXCEPTION(pvr) |\ PVR_DIV_ZERO_EXCEPTION(pvr) |\ PVR_FPU_EXCEPTION(pvr) |\ PVR_FSL_EXCEPTION(pvr); CI(pvr_user1, USER1); CI(pvr_user2, USER2); CI(mmu, USE_MMU); CI(mmu_privins, MMU_PRIVINS); CI(endian, ENDIAN); CI(use_icache, USE_ICACHE); CI(icache_tagbits, ICACHE_ADDR_TAG_BITS); CI(icache_write, ICACHE_ALLOW_WR); ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2; CI(icache_size, ICACHE_BYTE_SIZE); CI(icache_base, ICACHE_BASEADDR); CI(icache_high, ICACHE_HIGHADDR); CI(use_dcache, USE_DCACHE); CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS); CI(dcache_write, DCACHE_ALLOW_WR); ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2; CI(dcache_size, DCACHE_BYTE_SIZE); CI(dcache_base, DCACHE_BASEADDR); CI(dcache_high, DCACHE_HIGHADDR); temp = PVR_DCACHE_USE_WRITEBACK(pvr); if (ci->dcache_wb != temp) err_printk("DCACHE WB"); ci->dcache_wb = temp; CI(use_dopb, D_OPB); CI(use_iopb, I_OPB); CI(use_dlmb, D_LMB); CI(use_ilmb, I_LMB); CI(num_fsl, FSL_LINKS); CI(irq_edge, INTERRUPT_IS_EDGE); CI(irq_positive, EDGE_IS_POSITIVE); CI(area_optimised, AREA_OPTIMISED); CI(hw_debug, DEBUG_ENABLED); CI(num_pc_brk, NUMBER_OF_PC_BRK); CI(num_rd_brk, NUMBER_OF_RD_ADDR_BRK); CI(num_wr_brk, NUMBER_OF_WR_ADDR_BRK); CI(fpga_family_code, TARGET_FAMILY); /* take timebase-frequency from DTS */ ci->cpu_clock_freq = fcpu(cpu, "timebase-frequency"); }
gpl-2.0
CyanogenMod/android_kernel_samsung_jf
drivers/char/agp/amd-k7-agp.c
8162
15647
/* * AMD K7 AGPGART routines. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/page-flags.h> #include <linux/mm.h> #include <linux/slab.h> #include "agp.h" #define AMD_MMBASE 0x14 #define AMD_APSIZE 0xac #define AMD_MODECNTL 0xb0 #define AMD_MODECNTL2 0xb2 #define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */ #define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */ #define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ #define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ static struct pci_device_id agp_amdk7_pci_table[]; struct amd_page_map { unsigned long *real; unsigned long __iomem *remapped; }; static struct _amd_irongate_private { volatile u8 __iomem *registers; struct amd_page_map **gatt_pages; int num_tables; } amd_irongate_private; static int amd_create_page_map(struct amd_page_map *page_map) { int i; page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) return -ENOMEM; set_memory_uc((unsigned long)page_map->real, 1); page_map->remapped = page_map->real; for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { writel(agp_bridge->scratch_page, page_map->remapped+i); readl(page_map->remapped+i); /* PCI Posting. */ } return 0; } static void amd_free_page_map(struct amd_page_map *page_map) { set_memory_wb((unsigned long)page_map->real, 1); free_page((unsigned long) page_map->real); } static void amd_free_gatt_pages(void) { int i; struct amd_page_map **tables; struct amd_page_map *entry; tables = amd_irongate_private.gatt_pages; for (i = 0; i < amd_irongate_private.num_tables; i++) { entry = tables[i]; if (entry != NULL) { if (entry->real != NULL) amd_free_page_map(entry); kfree(entry); } } kfree(tables); amd_irongate_private.gatt_pages = NULL; } static int amd_create_gatt_pages(int nr_tables) { struct amd_page_map **tables; struct amd_page_map *entry; int retval = 0; int i; tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL); if (tables == NULL) return -ENOMEM; for (i = 0; i < nr_tables; i++) { entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL); tables[i] = entry; if (entry == NULL) { retval = -ENOMEM; break; } retval = amd_create_page_map(entry); if (retval != 0) break; } amd_irongate_private.num_tables = i; amd_irongate_private.gatt_pages = tables; if (retval != 0) amd_free_gatt_pages(); return retval; } /* Since we don't need contiguous memory we just try * to get the gatt table once */ #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\ GET_PAGE_DIR_IDX(addr)]->remapped) static int amd_create_gatt_table(struct agp_bridge_data *bridge) { struct aper_size_info_lvl2 *value; struct amd_page_map page_dir; unsigned long __iomem *cur_gatt; unsigned long addr; int retval; u32 temp; int i; value = A_SIZE_LVL2(agp_bridge->current_size); retval = amd_create_page_map(&page_dir); if (retval != 0) return retval; retval = amd_create_gatt_pages(value->num_entries / 1024); if (retval != 0) { amd_free_page_map(&page_dir); return retval; } agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); /* Get the address for the gart region. * This is a bus address even on the alpha, b/c its * used to program the agp master not the cpu */ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); agp_bridge->gart_bus_addr = addr; /* Calculate the agp offset */ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1, page_dir.remapped+GET_PAGE_DIR_OFF(addr)); readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ } for (i = 0; i < value->num_entries; i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ } return 0; } static int amd_free_gatt_table(struct agp_bridge_data *bridge) { struct amd_page_map page_dir; page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table; amd_free_gatt_pages(); amd_free_page_map(&page_dir); return 0; } static int amd_irongate_fetch_size(void) { int i; u32 temp; struct aper_size_info_lvl2 *values; pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); temp = (temp & 0x0000000e); values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static int amd_irongate_configure(void) { struct aper_size_info_lvl2 *current_size; u32 temp; u16 enable_reg; current_size = A_SIZE_LVL2(agp_bridge->current_size); if (!amd_irongate_private.registers) { /* Get the memory mapped registers */ pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp); temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); if (!amd_irongate_private.registers) return -ENOMEM; } /* Write out the address of the gatt table */ writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE); readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */ /* Write the Sync register */ pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80); /* Set indexing mode */ pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00); /* Write the enable register */ enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE); enable_reg = (enable_reg | 0x0004); writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE); readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */ /* Write out the size register */ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1); pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp); /* Flush the tlb */ writel(1, amd_irongate_private.registers+AMD_TLBFLUSH); readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/ return 0; } static void amd_irongate_cleanup(void) { struct aper_size_info_lvl2 *previous_size; u32 temp; u16 enable_reg; previous_size = A_SIZE_LVL2(agp_bridge->previous_size); enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE); enable_reg = (enable_reg & ~(0x0004)); writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE); readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */ /* Write back the previous size and disable gart translation */ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); temp = ((temp & ~(0x0000000f)) | previous_size->size_value); pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp); iounmap((void __iomem *) amd_irongate_private.registers); } /* * This routine could be implemented by taking the addresses * written to the GATT, and flushing them individually. However * currently it just flushes the whole table. Which is probably * more efficient, since agp_memory blocks can be a large number of * entries. */ static void amd_irongate_tlbflush(struct agp_memory *temp) { writel(1, amd_irongate_private.registers+AMD_TLBFLUSH); readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */ } static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { int i, j, num_entries; unsigned long __iomem *cur_gatt; unsigned long addr; num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if (type != mem->type || agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) return -EINVAL; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; j = pg_start; while (j < (pg_start + mem->page_count)) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr)))) return -EBUSY; j++; } if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_generic_mask_memory(agp_bridge, page_to_phys(mem->pages[i]), mem->type), cur_gatt+GET_GATT_OFF(addr)); readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ } amd_irongate_tlbflush(mem); return 0; } static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { int i; unsigned long __iomem *cur_gatt; unsigned long addr; if (type != mem->type || agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) return -EINVAL; for (i = pg_start; i < (mem->page_count + pg_start); i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ } amd_irongate_tlbflush(mem); return 0; } static const struct aper_size_info_lvl2 amd_irongate_sizes[7] = { {2048, 524288, 0x0000000c}, {1024, 262144, 0x0000000a}, {512, 131072, 0x00000008}, {256, 65536, 0x00000006}, {128, 32768, 0x00000004}, {64, 16384, 0x00000002}, {32, 8192, 0x00000000} }; static const struct gatt_mask amd_irongate_masks[] = { {.mask = 1, .type = 0} }; static const struct agp_bridge_driver amd_irongate_driver = { .owner = THIS_MODULE, .aperture_sizes = amd_irongate_sizes, .size_type = LVL2_APER_SIZE, .num_aperture_sizes = 7, .needs_scratch_page = true, .configure = amd_irongate_configure, .fetch_size = amd_irongate_fetch_size, .cleanup = amd_irongate_cleanup, .tlb_flush = amd_irongate_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = amd_irongate_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = amd_create_gatt_table, .free_gatt_table = amd_free_gatt_table, .insert_memory = amd_insert_memory, .remove_memory = amd_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static struct agp_device_ids amd_agp_device_ids[] __devinitdata = { { .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006, .chipset_name = "Irongate", }, { .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700E, .chipset_name = "761", }, { .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700C, .chipset_name = "760MP", }, { }, /* dummy final entry, always present */ }; static int __devinit agp_amdk7_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; int j; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; j = ent - agp_amdk7_pci_table; dev_info(&pdev->dev, "AMD %s chipset\n", amd_agp_device_ids[j].chipset_name); bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &amd_irongate_driver; bridge->dev_private_data = &amd_irongate_private, bridge->dev = pdev; bridge->capndx = cap_ptr; /* 751 Errata (22564_B-1.PDF) erratum 20: strobe glitch with Nvidia NV10 GeForce cards. system controller may experience noise due to strong drive strengths */ if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) { struct pci_dev *gfxcard=NULL; cap_ptr = 0; while (!cap_ptr) { gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard); if (!gfxcard) { dev_info(&pdev->dev, "no AGP VGA controller\n"); return -ENODEV; } cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP); } /* With so many variants of NVidia cards, it's simpler just to blacklist them all, and then whitelist them as needed (if necessary at all). */ if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) { agp_bridge->flags |= AGP_ERRATA_1X; dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n"); } pci_dev_put(gfxcard); } /* 761 Errata (23613_F.pdf) * Revisions B0/B1 were a disaster. * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X * erratum 45: Timing problem prevents fast writes -- Disable fast write. * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing. * With this lot disabled, we should prevent lockups. */ if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) { if (pdev->revision == 0x10 || pdev->revision == 0x11) { agp_bridge->flags = AGP_ERRATA_FASTWRITES; agp_bridge->flags |= AGP_ERRATA_SBA; agp_bridge->flags |= AGP_ERRATA_1X; dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n"); } } /* Fill in the mode register */ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void __devexit agp_amdk7_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } #ifdef CONFIG_PM static int agp_amdk7_suspend(struct pci_dev *pdev, pm_message_t state) { pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int agp_amdk7_resume(struct pci_dev *pdev) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); return amd_irongate_driver.configure(); } #endif /* CONFIG_PM */ /* must be the same order as name table above */ static struct pci_device_id agp_amdk7_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_FE_GATE_7006, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_FE_GATE_700E, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_AMD, .device = PCI_DEVICE_ID_AMD_FE_GATE_700C, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table); static struct pci_driver agp_amdk7_pci_driver = { .name = "agpgart-amdk7", .id_table = agp_amdk7_pci_table, .probe = agp_amdk7_probe, .remove = agp_amdk7_remove, #ifdef CONFIG_PM .suspend = agp_amdk7_suspend, .resume = agp_amdk7_resume, #endif }; static int __init agp_amdk7_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_amdk7_pci_driver); } static void __exit agp_amdk7_cleanup(void) { pci_unregister_driver(&agp_amdk7_pci_driver); } module_init(agp_amdk7_init); module_exit(agp_amdk7_cleanup); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
ptmr3/GalaxyS2_Kernel
drivers/gpu/drm/via/via_verifier.c
8418
28181
/* * Copyright 2004 The Unichrome Project. All Rights Reserved. * Copyright 2005 Thomas Hellstrom. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Thomas Hellstrom 2004, 2005. * This code was written using docs obtained under NDA from VIA Inc. * * Don't run this code directly on an AGP buffer. Due to cache problems it will * be very slow. */ #include "via_3d_reg.h" #include "drmP.h" #include "drm.h" #include "via_drm.h" #include "via_verifier.h" #include "via_drv.h" typedef enum { state_command, state_header2, state_header1, state_vheader5, state_vheader6, state_error } verifier_state_t; typedef enum { no_check = 0, check_for_header2, check_for_header1, check_for_header2_err, check_for_header1_err, check_for_fire, check_z_buffer_addr0, check_z_buffer_addr1, check_z_buffer_addr_mode, check_destination_addr0, check_destination_addr1, check_destination_addr_mode, check_for_dummy, check_for_dd, check_texture_addr0, check_texture_addr1, check_texture_addr2, check_texture_addr3, check_texture_addr4, check_texture_addr5, check_texture_addr6, check_texture_addr7, check_texture_addr8, check_texture_addr_mode, check_for_vertex_count, check_number_texunits, forbidden_command } hazard_t; /* * Associates each hazard above with a possible multi-command * sequence. For example an address that is split over multiple * commands and that needs to be checked at the first command * that does not include any part of the address. */ static drm_via_sequence_t seqs[] = { no_sequence, no_sequence, no_sequence, no_sequence, no_sequence, no_sequence, z_address, z_address, z_address, dest_address, dest_address, dest_address, no_sequence, no_sequence, tex_address, tex_address, tex_address, tex_address, tex_address, tex_address, tex_address, tex_address, tex_address, tex_address, no_sequence }; typedef struct { unsigned int code; hazard_t hz; } hz_init_t; static hz_init_t init_table1[] = { {0xf2, check_for_header2_err}, {0xf0, check_for_header1_err}, {0xee, check_for_fire}, {0xcc, check_for_dummy}, {0xdd, check_for_dd}, {0x00, no_check}, {0x10, check_z_buffer_addr0}, {0x11, check_z_buffer_addr1}, {0x12, check_z_buffer_addr_mode}, {0x13, no_check}, {0x14, no_check}, {0x15, no_check}, {0x23, no_check}, {0x24, no_check}, {0x33, no_check}, {0x34, no_check}, {0x35, no_check}, {0x36, no_check}, {0x37, no_check}, {0x38, no_check}, {0x39, no_check}, {0x3A, no_check}, {0x3B, no_check}, {0x3C, no_check}, {0x3D, no_check}, {0x3E, no_check}, {0x40, check_destination_addr0}, {0x41, check_destination_addr1}, {0x42, check_destination_addr_mode}, {0x43, no_check}, {0x44, no_check}, {0x50, no_check}, {0x51, no_check}, {0x52, no_check}, {0x53, no_check}, {0x54, no_check}, {0x55, no_check}, {0x56, no_check}, {0x57, no_check}, {0x58, no_check}, {0x70, no_check}, {0x71, no_check}, {0x78, no_check}, {0x79, no_check}, {0x7A, no_check}, {0x7B, no_check}, {0x7C, no_check}, {0x7D, check_for_vertex_count} }; static hz_init_t init_table2[] = { {0xf2, check_for_header2_err}, {0xf0, check_for_header1_err}, {0xee, check_for_fire}, {0xcc, check_for_dummy}, {0x00, check_texture_addr0}, {0x01, check_texture_addr0}, {0x02, check_texture_addr0}, {0x03, check_texture_addr0}, {0x04, check_texture_addr0}, {0x05, check_texture_addr0}, {0x06, check_texture_addr0}, {0x07, check_texture_addr0}, {0x08, check_texture_addr0}, {0x09, check_texture_addr0}, {0x20, check_texture_addr1}, {0x21, check_texture_addr1}, {0x22, check_texture_addr1}, {0x23, check_texture_addr4}, {0x2B, check_texture_addr3}, {0x2C, check_texture_addr3}, {0x2D, check_texture_addr3}, {0x2E, check_texture_addr3}, {0x2F, check_texture_addr3}, {0x30, check_texture_addr3}, {0x31, check_texture_addr3}, {0x32, check_texture_addr3}, {0x33, check_texture_addr3}, {0x34, check_texture_addr3}, {0x4B, check_texture_addr5}, {0x4C, check_texture_addr6}, {0x51, check_texture_addr7}, {0x52, check_texture_addr8}, {0x77, check_texture_addr2}, {0x78, no_check}, {0x79, no_check}, {0x7A, no_check}, {0x7B, check_texture_addr_mode}, {0x7C, no_check}, {0x7D, no_check}, {0x7E, no_check}, {0x7F, no_check}, {0x80, no_check}, {0x81, no_check}, {0x82, no_check}, {0x83, no_check}, {0x85, no_check}, {0x86, no_check}, {0x87, no_check}, {0x88, no_check}, {0x89, no_check}, {0x8A, no_check}, {0x90, no_check}, {0x91, no_check}, {0x92, no_check}, {0x93, no_check} }; static hz_init_t init_table3[] = { {0xf2, check_for_header2_err}, {0xf0, check_for_header1_err}, {0xcc, check_for_dummy}, {0x00, check_number_texunits} }; static hazard_t table1[256]; static hazard_t table2[256]; static hazard_t table3[256]; static __inline__ int eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words) { if ((buf_end - *buf) >= num_words) { *buf += num_words; return 0; } DRM_ERROR("Illegal termination of DMA command buffer\n"); return 1; } /* * Partially stolen from drm_memory.h */ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq, unsigned long offset, unsigned long size, struct drm_device *dev) { struct drm_map_list *r_list; drm_local_map_t *map = seq->map_cache; if (map && map->offset <= offset && (offset + size) <= (map->offset + map->size)) { return map; } list_for_each_entry(r_list, &dev->maplist, head) { map = r_list->map; if (!map) continue; if (map->offset <= offset && (offset + size) <= (map->offset + map->size) && !(map->flags & _DRM_RESTRICTED) && (map->type == _DRM_AGP)) { seq->map_cache = map; return map; } } return NULL; } /* * Require that all AGP texture levels reside in the same AGP map which should * be mappable by the client. This is not a big restriction. * FIXME: To actually enforce this security policy strictly, drm_rmmap * would have to wait for dma quiescent before removing an AGP map. * The via_drm_lookup_agp_map call in reality seems to take * very little CPU time. */ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq) { switch (cur_seq->unfinished) { case z_address: DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr); break; case dest_address: DRM_DEBUG("Destination start address is 0x%x\n", cur_seq->d_addr); break; case tex_address: if (cur_seq->agp_texture) { unsigned start = cur_seq->tex_level_lo[cur_seq->texture]; unsigned end = cur_seq->tex_level_hi[cur_seq->texture]; unsigned long lo = ~0, hi = 0, tmp; uint32_t *addr, *pitch, *height, tex; unsigned i; int npot; if (end > 9) end = 9; if (start > 9) start = 9; addr = &(cur_seq->t_addr[tex = cur_seq->texture][start]); pitch = &(cur_seq->pitch[tex][start]); height = &(cur_seq->height[tex][start]); npot = cur_seq->tex_npot[tex]; for (i = start; i <= end; ++i) { tmp = *addr++; if (tmp < lo) lo = tmp; if (i == 0 && npot) tmp += (*height++ * *pitch++); else tmp += (*height++ << *pitch++); if (tmp > hi) hi = tmp; } if (!via_drm_lookup_agp_map (cur_seq, lo, hi - lo, cur_seq->dev)) { DRM_ERROR ("AGP texture is not in allowed map\n"); return 2; } } break; default: break; } cur_seq->unfinished = no_sequence; return 0; } static __inline__ int investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq) { register uint32_t tmp, *tmp_addr; if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) { int ret; if ((ret = finish_current_sequence(cur_seq))) return ret; } switch (hz) { case check_for_header2: if (cmd == HALCYON_HEADER2) return 1; return 0; case check_for_header1: if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) return 1; return 0; case check_for_header2_err: if (cmd == HALCYON_HEADER2) return 1; DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n"); break; case check_for_header1_err: if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) return 1; DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n"); break; case check_for_fire: if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD) return 1; DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n"); break; case check_for_dummy: if (HC_DUMMY == cmd) return 0; DRM_ERROR("Illegal DMA HC_DUMMY command\n"); break; case check_for_dd: if (0xdddddddd == cmd) return 0; DRM_ERROR("Illegal DMA 0xdddddddd command\n"); break; case check_z_buffer_addr0: cur_seq->unfinished = z_address; cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) | (cmd & 0x00FFFFFF); return 0; case check_z_buffer_addr1: cur_seq->unfinished = z_address; cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24); return 0; case check_z_buffer_addr_mode: cur_seq->unfinished = z_address; if ((cmd & 0x0000C000) == 0) return 0; DRM_ERROR("Attempt to place Z buffer in system memory\n"); return 2; case check_destination_addr0: cur_seq->unfinished = dest_address; cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) | (cmd & 0x00FFFFFF); return 0; case check_destination_addr1: cur_seq->unfinished = dest_address; cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24); return 0; case check_destination_addr_mode: cur_seq->unfinished = dest_address; if ((cmd & 0x0000C000) == 0) return 0; DRM_ERROR ("Attempt to place 3D drawing buffer in system memory\n"); return 2; case check_texture_addr0: cur_seq->unfinished = tex_address; tmp = (cmd >> 24); tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp]; *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF); return 0; case check_texture_addr1: cur_seq->unfinished = tex_address; tmp = ((cmd >> 24) - 0x20); tmp += tmp << 1; tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp]; *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24); tmp_addr++; *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16); tmp_addr++; *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8); return 0; case check_texture_addr2: cur_seq->unfinished = tex_address; cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F; cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6; return 0; case check_texture_addr3: cur_seq->unfinished = tex_address; tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit); if (tmp == 0 && (cmd & HC_HTXnEnPit_MASK)) { cur_seq->pitch[cur_seq->texture][tmp] = (cmd & HC_HTXnLnPit_MASK); cur_seq->tex_npot[cur_seq->texture] = 1; } else { cur_seq->pitch[cur_seq->texture][tmp] = (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT; cur_seq->tex_npot[cur_seq->texture] = 0; if (cmd & 0x000FFFFF) { DRM_ERROR ("Unimplemented texture level 0 pitch mode.\n"); return 2; } } return 0; case check_texture_addr4: cur_seq->unfinished = tex_address; tmp_addr = &cur_seq->t_addr[cur_seq->texture][9]; *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24); return 0; case check_texture_addr5: case check_texture_addr6: cur_seq->unfinished = tex_address; /* * Texture width. We don't care since we have the pitch. */ return 0; case check_texture_addr7: cur_seq->unfinished = tex_address; tmp_addr = &(cur_seq->height[cur_seq->texture][0]); tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20); tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16); tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12); tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8); tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4); tmp_addr[0] = 1 << (cmd & 0x0000000F); return 0; case check_texture_addr8: cur_seq->unfinished = tex_address; tmp_addr = &(cur_seq->height[cur_seq->texture][0]); tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12); tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8); tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4); tmp_addr[6] = 1 << (cmd & 0x0000000F); return 0; case check_texture_addr_mode: cur_seq->unfinished = tex_address; if (2 == (tmp = cmd & 0x00000003)) { DRM_ERROR ("Attempt to fetch texture from system memory.\n"); return 2; } cur_seq->agp_texture = (tmp == 3); cur_seq->tex_palette_size[cur_seq->texture] = (cmd >> 16) & 0x000000007; return 0; case check_for_vertex_count: cur_seq->vertex_count = cmd & 0x0000FFFF; return 0; case check_number_texunits: cur_seq->multitex = (cmd >> 3) & 1; return 0; default: DRM_ERROR("Illegal DMA data: 0x%x\n", cmd); return 2; } return 2; } static __inline__ int via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end, drm_via_state_t *cur_seq) { drm_via_private_t *dev_priv = (drm_via_private_t *) cur_seq->dev->dev_private; uint32_t a_fire, bcmd, dw_count; int ret = 0; int have_fire; const uint32_t *buf = *buffer; while (buf < buf_end) { have_fire = 0; if ((buf_end - buf) < 2) { DRM_ERROR ("Unexpected termination of primitive list.\n"); ret = 1; break; } if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB) break; bcmd = *buf++; if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) { DRM_ERROR("Expected Vertex List A command, got 0x%x\n", *buf); ret = 1; break; } a_fire = *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK | HC_HE3Fire_MASK; /* * How many dwords per vertex ? */ if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) { DRM_ERROR("Illegal B command vertex data for AGP.\n"); ret = 1; break; } dw_count = 0; if (bcmd & (1 << 7)) dw_count += (cur_seq->multitex) ? 2 : 1; if (bcmd & (1 << 8)) dw_count += (cur_seq->multitex) ? 2 : 1; if (bcmd & (1 << 9)) dw_count++; if (bcmd & (1 << 10)) dw_count++; if (bcmd & (1 << 11)) dw_count++; if (bcmd & (1 << 12)) dw_count++; if (bcmd & (1 << 13)) dw_count++; if (bcmd & (1 << 14)) dw_count++; while (buf < buf_end) { if (*buf == a_fire) { if (dev_priv->num_fire_offsets >= VIA_FIRE_BUF_SIZE) { DRM_ERROR("Fire offset buffer full.\n"); ret = 1; break; } dev_priv->fire_offsets[dev_priv-> num_fire_offsets++] = buf; have_fire = 1; buf++; if (buf < buf_end && *buf == a_fire) buf++; break; } if ((*buf == HALCYON_HEADER2) || ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) { DRM_ERROR("Missing Vertex Fire command, " "Stray Vertex Fire command or verifier " "lost sync.\n"); ret = 1; break; } if ((ret = eat_words(&buf, buf_end, dw_count))) break; } if (buf >= buf_end && !have_fire) { DRM_ERROR("Missing Vertex Fire command or verifier " "lost sync.\n"); ret = 1; break; } if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) { DRM_ERROR("AGP Primitive list end misaligned.\n"); ret = 1; break; } } *buffer = buf; return ret; } static __inline__ verifier_state_t via_check_header2(uint32_t const **buffer, const uint32_t *buf_end, drm_via_state_t *hc_state) { uint32_t cmd; int hz_mode; hazard_t hz; const uint32_t *buf = *buffer; const hazard_t *hz_table; if ((buf_end - buf) < 2) { DRM_ERROR ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n"); return state_error; } buf++; cmd = (*buf++ & 0xFFFF0000) >> 16; switch (cmd) { case HC_ParaType_CmdVdata: if (via_check_prim_list(&buf, buf_end, hc_state)) return state_error; *buffer = buf; return state_command; case HC_ParaType_NotTex: hz_table = table1; break; case HC_ParaType_Tex: hc_state->texture = 0; hz_table = table2; break; case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)): hc_state->texture = 1; hz_table = table2; break; case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)): hz_table = table3; break; case HC_ParaType_Auto: if (eat_words(&buf, buf_end, 2)) return state_error; *buffer = buf; return state_command; case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)): if (eat_words(&buf, buf_end, 32)) return state_error; *buffer = buf; return state_command; case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)): case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)): DRM_ERROR("Texture palettes are rejected because of " "lack of info how to determine their size.\n"); return state_error; case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)): DRM_ERROR("Fog factor palettes are rejected because of " "lack of info how to determine their size.\n"); return state_error; default: /* * There are some unimplemented HC_ParaTypes here, that * need to be implemented if the Mesa driver is extended. */ DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 " "DMA subcommand: 0x%x. Previous dword: 0x%x\n", cmd, *(buf - 2)); *buffer = buf; return state_error; } while (buf < buf_end) { cmd = *buf++; if ((hz = hz_table[cmd >> 24])) { if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) { if (hz_mode == 1) { buf--; break; } return state_error; } } else if (hc_state->unfinished && finish_current_sequence(hc_state)) { return state_error; } } if (hc_state->unfinished && finish_current_sequence(hc_state)) return state_error; *buffer = buf; return state_command; } static __inline__ verifier_state_t via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer, const uint32_t *buf_end, int *fire_count) { uint32_t cmd; const uint32_t *buf = *buffer; const uint32_t *next_fire; int burst = 0; next_fire = dev_priv->fire_offsets[*fire_count]; buf++; cmd = (*buf & 0xFFFF0000) >> 16; VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++); switch (cmd) { case HC_ParaType_CmdVdata: while ((buf < buf_end) && (*fire_count < dev_priv->num_fire_offsets) && (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) { while (buf <= next_fire) { VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE + (burst & 63), *buf++); burst += 4; } if ((buf < buf_end) && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) buf++; if (++(*fire_count) < dev_priv->num_fire_offsets) next_fire = dev_priv->fire_offsets[*fire_count]; } break; default: while (buf < buf_end) { if (*buf == HC_HEADER2 || (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 || (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 || (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) break; VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE + (burst & 63), *buf++); burst += 4; } } *buffer = buf; return state_command; } static __inline__ int verify_mmio_address(uint32_t address) { if ((address > 0x3FF) && (address < 0xC00)) { DRM_ERROR("Invalid VIDEO DMA command. " "Attempt to access 3D- or command burst area.\n"); return 1; } else if ((address > 0xCFF) && (address < 0x1300)) { DRM_ERROR("Invalid VIDEO DMA command. " "Attempt to access PCI DMA area.\n"); return 1; } else if (address > 0x13FF) { DRM_ERROR("Invalid VIDEO DMA command. " "Attempt to access VGA registers.\n"); return 1; } return 0; } static __inline__ int verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end, uint32_t dwords) { const uint32_t *buf = *buffer; if (buf_end - buf < dwords) { DRM_ERROR("Illegal termination of video command.\n"); return 1; } while (dwords--) { if (*buf++) { DRM_ERROR("Illegal video command tail.\n"); return 1; } } *buffer = buf; return 0; } static __inline__ verifier_state_t via_check_header1(uint32_t const **buffer, const uint32_t * buf_end) { uint32_t cmd; const uint32_t *buf = *buffer; verifier_state_t ret = state_command; while (buf < buf_end) { cmd = *buf; if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) && (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) { if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1) break; DRM_ERROR("Invalid HALCYON_HEADER1 command. " "Attempt to access 3D- or command burst area.\n"); ret = state_error; break; } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) { if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1) break; DRM_ERROR("Invalid HALCYON_HEADER1 command. " "Attempt to access VGA registers.\n"); ret = state_error; break; } else { buf += 2; } } *buffer = buf; return ret; } static __inline__ verifier_state_t via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer, const uint32_t *buf_end) { register uint32_t cmd; const uint32_t *buf = *buffer; while (buf < buf_end) { cmd = *buf; if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1) break; VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf); buf++; } *buffer = buf; return state_command; } static __inline__ verifier_state_t via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end) { uint32_t data; const uint32_t *buf = *buffer; if (buf_end - buf < 4) { DRM_ERROR("Illegal termination of video header5 command\n"); return state_error; } data = *buf++ & ~VIA_VIDEOMASK; if (verify_mmio_address(data)) return state_error; data = *buf++; if (*buf++ != 0x00F50000) { DRM_ERROR("Illegal header5 header data\n"); return state_error; } if (*buf++ != 0x00000000) { DRM_ERROR("Illegal header5 header data\n"); return state_error; } if (eat_words(&buf, buf_end, data)) return state_error; if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3))) return state_error; *buffer = buf; return state_command; } static __inline__ verifier_state_t via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer, const uint32_t *buf_end) { uint32_t addr, count, i; const uint32_t *buf = *buffer; addr = *buf++ & ~VIA_VIDEOMASK; i = count = *buf; buf += 3; while (i--) VIA_WRITE(addr, *buf++); if (count & 3) buf += 4 - (count & 3); *buffer = buf; return state_command; } static __inline__ verifier_state_t via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end) { uint32_t data; const uint32_t *buf = *buffer; uint32_t i; if (buf_end - buf < 4) { DRM_ERROR("Illegal termination of video header6 command\n"); return state_error; } buf++; data = *buf++; if (*buf++ != 0x00F60000) { DRM_ERROR("Illegal header6 header data\n"); return state_error; } if (*buf++ != 0x00000000) { DRM_ERROR("Illegal header6 header data\n"); return state_error; } if ((buf_end - buf) < (data << 1)) { DRM_ERROR("Illegal termination of video header6 command\n"); return state_error; } for (i = 0; i < data; ++i) { if (verify_mmio_address(*buf++)) return state_error; buf++; } data <<= 1; if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3))) return state_error; *buffer = buf; return state_command; } static __inline__ verifier_state_t via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer, const uint32_t *buf_end) { uint32_t addr, count, i; const uint32_t *buf = *buffer; i = count = *++buf; buf += 3; while (i--) { addr = *buf++; VIA_WRITE(addr, *buf++); } count <<= 1; if (count & 3) buf += 4 - (count & 3); *buffer = buf; return state_command; } int via_verify_command_stream(const uint32_t * buf, unsigned int size, struct drm_device * dev, int agp) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; drm_via_state_t *hc_state = &dev_priv->hc_state; drm_via_state_t saved_state = *hc_state; uint32_t cmd; const uint32_t *buf_end = buf + (size >> 2); verifier_state_t state = state_command; int cme_video; int supported_3d; cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A || dev_priv->chipset == VIA_DX9_0); supported_3d = dev_priv->chipset != VIA_DX9_0; hc_state->dev = dev; hc_state->unfinished = no_sequence; hc_state->map_cache = NULL; hc_state->agp = agp; hc_state->buf_start = buf; dev_priv->num_fire_offsets = 0; while (buf < buf_end) { switch (state) { case state_header2: state = via_check_header2(&buf, buf_end, hc_state); break; case state_header1: state = via_check_header1(&buf, buf_end); break; case state_vheader5: state = via_check_vheader5(&buf, buf_end); break; case state_vheader6: state = via_check_vheader6(&buf, buf_end); break; case state_command: if ((HALCYON_HEADER2 == (cmd = *buf)) && supported_3d) state = state_header2; else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) state = state_header1; else if (cme_video && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5) state = state_vheader5; else if (cme_video && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) state = state_vheader6; else if ((cmd == HALCYON_HEADER2) && !supported_3d) { DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n"); state = state_error; } else { DRM_ERROR ("Invalid / Unimplemented DMA HEADER command. 0x%x\n", cmd); state = state_error; } break; case state_error: default: *hc_state = saved_state; return -EINVAL; } } if (state == state_error) { *hc_state = saved_state; return -EINVAL; } return 0; } int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf, unsigned int size) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; uint32_t cmd; const uint32_t *buf_end = buf + (size >> 2); verifier_state_t state = state_command; int fire_count = 0; while (buf < buf_end) { switch (state) { case state_header2: state = via_parse_header2(dev_priv, &buf, buf_end, &fire_count); break; case state_header1: state = via_parse_header1(dev_priv, &buf, buf_end); break; case state_vheader5: state = via_parse_vheader5(dev_priv, &buf, buf_end); break; case state_vheader6: state = via_parse_vheader6(dev_priv, &buf, buf_end); break; case state_command: if (HALCYON_HEADER2 == (cmd = *buf)) state = state_header2; else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) state = state_header1; else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5) state = state_vheader5; else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) state = state_vheader6; else { DRM_ERROR ("Invalid / Unimplemented DMA HEADER command. 0x%x\n", cmd); state = state_error; } break; case state_error: default: return -EINVAL; } } if (state == state_error) return -EINVAL; return 0; } static void setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size) { int i; for (i = 0; i < 256; ++i) table[i] = forbidden_command; for (i = 0; i < size; ++i) table[init_table[i].code] = init_table[i].hz; } void via_init_command_verifier(void) { setup_hazard_table(init_table1, table1, sizeof(init_table1) / sizeof(hz_init_t)); setup_hazard_table(init_table2, table2, sizeof(init_table2) / sizeof(hz_init_t)); setup_hazard_table(init_table3, table3, sizeof(init_table3) / sizeof(hz_init_t)); }
gpl-2.0
rehsack/linux-curie
drivers/net/wireless/libertas/tx.c
9954
5517
/* * This file contains the handling of TX in wlan driver. */ #include <linux/hardirq.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/sched.h> #include <linux/export.h> #include <net/cfg80211.h> #include "host.h" #include "radiotap.h" #include "decl.h" #include "defs.h" #include "dev.h" #include "mesh.h" /** * convert_radiotap_rate_to_mv - converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE * units (500 Kb/s) into Marvell WLAN format (see Table 8 in Section 3.2.1) * * @rate: Input rate * returns: Output Rate (0 if invalid) */ static u32 convert_radiotap_rate_to_mv(u8 rate) { switch (rate) { case 2: /* 1 Mbps */ return 0 | (1 << 4); case 4: /* 2 Mbps */ return 1 | (1 << 4); case 11: /* 5.5 Mbps */ return 2 | (1 << 4); case 22: /* 11 Mbps */ return 3 | (1 << 4); case 12: /* 6 Mbps */ return 4 | (1 << 4); case 18: /* 9 Mbps */ return 5 | (1 << 4); case 24: /* 12 Mbps */ return 6 | (1 << 4); case 36: /* 18 Mbps */ return 7 | (1 << 4); case 48: /* 24 Mbps */ return 8 | (1 << 4); case 72: /* 36 Mbps */ return 9 | (1 << 4); case 96: /* 48 Mbps */ return 10 | (1 << 4); case 108: /* 54 Mbps */ return 11 | (1 << 4); } return 0; } /** * lbs_hard_start_xmit - checks the conditions and sends packet to IF * layer if everything is ok * * @skb: A pointer to skb which includes TX packet * @dev: A pointer to the &struct net_device * returns: 0 or -1 */ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; struct lbs_private *priv = dev->ml_priv; struct txpd *txpd; char *p802x_hdr; uint16_t pkt_len; netdev_tx_t ret = NETDEV_TX_OK; lbs_deb_enter(LBS_DEB_TX); /* We need to protect against the queues being restarted before we get round to stopping them */ spin_lock_irqsave(&priv->driver_lock, flags); if (priv->surpriseremoved) goto free; if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) { lbs_deb_tx("tx err: skb length %d 0 or > %zd\n", skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE); /* We'll never manage to send this one; drop it and return 'OK' */ dev->stats.tx_dropped++; dev->stats.tx_errors++; goto free; } netif_stop_queue(priv->dev); if (priv->mesh_dev) netif_stop_queue(priv->mesh_dev); if (priv->tx_pending_len) { /* This can happen if packets come in on the mesh and eth device simultaneously -- there's no mutual exclusion on hard_start_xmit() calls between devices. */ lbs_deb_tx("Packet on %s while busy\n", dev->name); ret = NETDEV_TX_BUSY; goto unlock; } priv->tx_pending_len = -1; spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100)); txpd = (void *)priv->tx_pending_buf; memset(txpd, 0, sizeof(struct txpd)); p802x_hdr = skb->data; pkt_len = skb->len; if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data; /* set txpd fields from the radiotap header */ txpd->tx_control = cpu_to_le32(convert_radiotap_rate_to_mv(rtap_hdr->rate)); /* skip the radiotap header */ p802x_hdr += sizeof(*rtap_hdr); pkt_len -= sizeof(*rtap_hdr); /* copy destination address from 802.11 header */ memcpy(txpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN); } else { /* copy destination address from 802.3 header */ memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN); } txpd->tx_packet_length = cpu_to_le16(pkt_len); txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); lbs_mesh_set_txpd(priv, dev, txpd); lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) &txpd, sizeof(struct txpd)); lbs_deb_hex(LBS_DEB_TX, "Tx Data", (u8 *) p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); memcpy(&txpd[1], p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); spin_lock_irqsave(&priv->driver_lock, flags); priv->tx_pending_len = pkt_len + sizeof(struct txpd); lbs_deb_tx("%s lined up packet\n", __func__); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { /* Keep the skb to echo it back once Tx feedback is received from FW */ skb_orphan(skb); /* Keep the skb around for when we get feedback */ priv->currenttxskb = skb; } else { free: dev_kfree_skb_any(skb); } unlock: spin_unlock_irqrestore(&priv->driver_lock, flags); wake_up(&priv->waitq); lbs_deb_leave_args(LBS_DEB_TX, "ret %d", ret); return ret; } /** * lbs_send_tx_feedback - sends to the host the last transmitted packet, * filling the radiotap headers with transmission information. * * @priv: A pointer to &struct lbs_private structure * @try_count: A 32-bit value containing transmission retry status. * * returns: void */ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count) { struct tx_radiotap_hdr *radiotap_hdr; if (priv->wdev->iftype != NL80211_IFTYPE_MONITOR || priv->currenttxskb == NULL) return; radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data; radiotap_hdr->data_retries = try_count ? (1 + priv->txretrycount - try_count) : 0; priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb, priv->dev); netif_rx(priv->currenttxskb); priv->currenttxskb = NULL; if (priv->connect_status == LBS_CONNECTED) netif_wake_queue(priv->dev); if (priv->mesh_dev && netif_running(priv->mesh_dev)) netif_wake_queue(priv->mesh_dev); } EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
gpl-2.0
mixtile/garage-linux
drivers/usb/misc/sisusbvga/sisusb_init.c
11746
25368
/* * sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles * * Display mode initializing code * * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria * * If distributed as part of the Linux kernel, this code is licensed under the * terms of the GPL v2. * * Otherwise, the following license terms apply: * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following conditions * * are met: * * 1) Redistributions of source code must retain the above copyright * * notice, this list of conditions and the following disclaimer. * * 2) Redistributions in binary form must reproduce the above copyright * * notice, this list of conditions and the following disclaimer in the * * documentation and/or other materials provided with the distribution. * * 3) The name of the author may not be used to endorse or promote products * * derived from this software without specific prior written permission. * * * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Thomas Winischhofer <thomas@winischhofer.net> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/spinlock.h> #include "sisusb.h" #ifdef INCL_SISUSB_CON #include "sisusb_init.h" /*********************************************/ /* POINTER INITIALIZATION */ /*********************************************/ static void SiSUSB_InitPtr(struct SiS_Private *SiS_Pr) { SiS_Pr->SiS_ModeResInfo = SiSUSB_ModeResInfo; SiS_Pr->SiS_StandTable = SiSUSB_StandTable; SiS_Pr->SiS_SModeIDTable = SiSUSB_SModeIDTable; SiS_Pr->SiS_EModeIDTable = SiSUSB_EModeIDTable; SiS_Pr->SiS_RefIndex = SiSUSB_RefIndex; SiS_Pr->SiS_CRT1Table = SiSUSB_CRT1Table; SiS_Pr->SiS_VCLKData = SiSUSB_VCLKData; } /*********************************************/ /* HELPER: SetReg, GetReg */ /*********************************************/ static void SiS_SetReg(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index, unsigned short data) { sisusb_setidxreg(SiS_Pr->sisusb, port, index, data); } static void SiS_SetRegByte(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short data) { sisusb_setreg(SiS_Pr->sisusb, port, data); } static unsigned char SiS_GetReg(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index) { u8 data; sisusb_getidxreg(SiS_Pr->sisusb, port, index, &data); return data; } static unsigned char SiS_GetRegByte(struct SiS_Private *SiS_Pr, unsigned long port) { u8 data; sisusb_getreg(SiS_Pr->sisusb, port, &data); return data; } static void SiS_SetRegANDOR(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index, unsigned short DataAND, unsigned short DataOR) { sisusb_setidxregandor(SiS_Pr->sisusb, port, index, DataAND, DataOR); } static void SiS_SetRegAND(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index, unsigned short DataAND) { sisusb_setidxregand(SiS_Pr->sisusb, port, index, DataAND); } static void SiS_SetRegOR(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index, unsigned short DataOR) { sisusb_setidxregor(SiS_Pr->sisusb, port, index, DataOR); } /*********************************************/ /* HELPER: DisplayOn, DisplayOff */ /*********************************************/ static void SiS_DisplayOn(struct SiS_Private *SiS_Pr) { SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0xDF); } /*********************************************/ /* HELPER: Init Port Addresses */ /*********************************************/ static void SiSUSBRegInit(struct SiS_Private *SiS_Pr, unsigned long BaseAddr) { SiS_Pr->SiS_P3c4 = BaseAddr + 0x14; SiS_Pr->SiS_P3d4 = BaseAddr + 0x24; SiS_Pr->SiS_P3c0 = BaseAddr + 0x10; SiS_Pr->SiS_P3ce = BaseAddr + 0x1e; SiS_Pr->SiS_P3c2 = BaseAddr + 0x12; SiS_Pr->SiS_P3ca = BaseAddr + 0x1a; SiS_Pr->SiS_P3c6 = BaseAddr + 0x16; SiS_Pr->SiS_P3c7 = BaseAddr + 0x17; SiS_Pr->SiS_P3c8 = BaseAddr + 0x18; SiS_Pr->SiS_P3c9 = BaseAddr + 0x19; SiS_Pr->SiS_P3cb = BaseAddr + 0x1b; SiS_Pr->SiS_P3cc = BaseAddr + 0x1c; SiS_Pr->SiS_P3cd = BaseAddr + 0x1d; SiS_Pr->SiS_P3da = BaseAddr + 0x2a; SiS_Pr->SiS_Part1Port = BaseAddr + SIS_CRT2_PORT_04; } /*********************************************/ /* HELPER: GetSysFlags */ /*********************************************/ static void SiS_GetSysFlags(struct SiS_Private *SiS_Pr) { SiS_Pr->SiS_MyCR63 = 0x63; } /*********************************************/ /* HELPER: Init PCI & Engines */ /*********************************************/ static void SiSInitPCIetc(struct SiS_Private *SiS_Pr) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x20, 0xa1); /* - Enable 2D (0x40) * - Enable 3D (0x02) * - Enable 3D vertex command fetch (0x10) * - Enable 3D command parser (0x08) * - Enable 3D G/L transformation engine (0x80) */ SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1E, 0xDA); } /*********************************************/ /* HELPER: SET SEGMENT REGISTERS */ /*********************************************/ static void SiS_SetSegRegLower(struct SiS_Private *SiS_Pr, unsigned short value) { unsigned short temp; value &= 0x00ff; temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb) & 0xf0; temp |= (value >> 4); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb, temp); temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd) & 0xf0; temp |= (value & 0x0f); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp); } static void SiS_SetSegRegUpper(struct SiS_Private *SiS_Pr, unsigned short value) { unsigned short temp; value &= 0x00ff; temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb) & 0x0f; temp |= (value & 0xf0); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb, temp); temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd) & 0x0f; temp |= (value << 4); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp); } static void SiS_SetSegmentReg(struct SiS_Private *SiS_Pr, unsigned short value) { SiS_SetSegRegLower(SiS_Pr, value); SiS_SetSegRegUpper(SiS_Pr, value); } static void SiS_ResetSegmentReg(struct SiS_Private *SiS_Pr) { SiS_SetSegmentReg(SiS_Pr, 0); } static void SiS_SetSegmentRegOver(struct SiS_Private *SiS_Pr, unsigned short value) { unsigned short temp = value >> 8; temp &= 0x07; temp |= (temp << 4); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1d, temp); SiS_SetSegmentReg(SiS_Pr, value); } static void SiS_ResetSegmentRegOver(struct SiS_Private *SiS_Pr) { SiS_SetSegmentRegOver(SiS_Pr, 0); } static void SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr) { SiS_ResetSegmentReg(SiS_Pr); SiS_ResetSegmentRegOver(SiS_Pr); } /*********************************************/ /* HELPER: SearchModeID */ /*********************************************/ static int SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, unsigned short *ModeIdIndex) { if ((*ModeNo) <= 0x13) { if ((*ModeNo) != 0x03) return 0; (*ModeIdIndex) = 0; } else { for (*ModeIdIndex = 0;; (*ModeIdIndex)++) { if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID == (*ModeNo)) break; if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID == 0xFF) return 0; } } return 1; } /*********************************************/ /* HELPER: ENABLE CRT1 */ /*********************************************/ static void SiS_HandleCRT1(struct SiS_Private *SiS_Pr) { /* Enable CRT1 gating */ SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, SiS_Pr->SiS_MyCR63, 0xbf); } /*********************************************/ /* HELPER: GetColorDepth */ /*********************************************/ static unsigned short SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { static const unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 }; unsigned short modeflag; short index; if (ModeNo <= 0x13) { modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; } else { modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; } index = (modeflag & ModeTypeMask) - ModeEGA; if (index < 0) index = 0; return ColorDepth[index]; } /*********************************************/ /* HELPER: GetOffset */ /*********************************************/ static unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short rrti) { unsigned short xres, temp, colordepth, infoflag; infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag; xres = SiS_Pr->SiS_RefIndex[rrti].XRes; colordepth = SiS_GetColorDepth(SiS_Pr, ModeNo, ModeIdIndex); temp = xres / 16; if (infoflag & InterlaceMode) temp <<= 1; temp *= colordepth; if (xres % 16) temp += (colordepth >> 1); return temp; } /*********************************************/ /* SEQ */ /*********************************************/ static void SiS_SetSeqRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char SRdata; int i; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x00, 0x03); SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[0] | 0x20; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, SRdata); for (i = 2; i <= 4; i++) { SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[i - 1]; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, SRdata); } } /*********************************************/ /* MISC */ /*********************************************/ static void SiS_SetMiscRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char Miscdata = SiS_Pr->SiS_StandTable[StandTableIndex].MISC; SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c2, Miscdata); } /*********************************************/ /* CRTC */ /*********************************************/ static void SiS_SetCRTCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char CRTCdata; unsigned short i; SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f); for (i = 0; i <= 0x18; i++) { CRTCdata = SiS_Pr->SiS_StandTable[StandTableIndex].CRTC[i]; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, i, CRTCdata); } } /*********************************************/ /* ATT */ /*********************************************/ static void SiS_SetATTRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char ARdata; unsigned short i; for (i = 0; i <= 0x13; i++) { ARdata = SiS_Pr->SiS_StandTable[StandTableIndex].ATTR[i]; SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, i); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, ARdata); } SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x14); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x00); SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x20); SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); } /*********************************************/ /* GRC */ /*********************************************/ static void SiS_SetGRCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char GRdata; unsigned short i; for (i = 0; i <= 0x08; i++) { GRdata = SiS_Pr->SiS_StandTable[StandTableIndex].GRC[i]; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3ce, i, GRdata); } if (SiS_Pr->SiS_ModeType > ModeVGA) { /* 256 color disable */ SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3ce, 0x05, 0xBF); } } /*********************************************/ /* CLEAR EXTENDED REGISTERS */ /*********************************************/ static void SiS_ClearExt1Regs(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { int i; for (i = 0x0A; i <= 0x0E; i++) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, 0x00); } SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x37, 0xFE); } /*********************************************/ /* Get rate index */ /*********************************************/ static unsigned short SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { unsigned short rrti, i, index, temp; if (ModeNo <= 0x13) return 0xFFFF; index = SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x33) & 0x0F; if (index > 0) index--; rrti = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].REFindex; ModeNo = SiS_Pr->SiS_RefIndex[rrti].ModeID; i = 0; do { if (SiS_Pr->SiS_RefIndex[rrti + i].ModeID != ModeNo) break; temp = SiS_Pr->SiS_RefIndex[rrti + i].Ext_InfoFlag & ModeTypeMask; if (temp < SiS_Pr->SiS_ModeType) break; i++; index--; } while (index != 0xFFFF); i--; return (rrti + i); } /*********************************************/ /* SYNC */ /*********************************************/ static void SiS_SetCRT1Sync(struct SiS_Private *SiS_Pr, unsigned short rrti) { unsigned short sync = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag >> 8; sync &= 0xC0; sync |= 0x2f; SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c2, sync); } /*********************************************/ /* CRTC/2 */ /*********************************************/ static void SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short rrti) { unsigned char index; unsigned short temp, i, j, modeflag; SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f); modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRT1CRTC; for (i = 0, j = 0; i <= 7; i++, j++) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, SiS_Pr->SiS_CRT1Table[index].CR[i]); } for (j = 0x10; i <= 10; i++, j++) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, SiS_Pr->SiS_CRT1Table[index].CR[i]); } for (j = 0x15; i <= 12; i++, j++) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, SiS_Pr->SiS_CRT1Table[index].CR[i]); } for (j = 0x0A; i <= 15; i++, j++) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, j, SiS_Pr->SiS_CRT1Table[index].CR[i]); } temp = SiS_Pr->SiS_CRT1Table[index].CR[16] & 0xE0; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, temp); temp = ((SiS_Pr->SiS_CRT1Table[index].CR[16]) & 0x01) << 5; if (modeflag & DoubleScanMode) temp |= 0x80; SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x09, 0x5F, temp); if (SiS_Pr->SiS_ModeType > ModeVGA) SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x14, 0x4F); } /*********************************************/ /* OFFSET & PITCH */ /*********************************************/ /* (partly overruled by SetPitch() in XF86) */ /*********************************************/ static void SiS_SetCRT1Offset(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short rrti) { unsigned short du = SiS_GetOffset(SiS_Pr, ModeNo, ModeIdIndex, rrti); unsigned short infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag; unsigned short temp; temp = (du >> 8) & 0x0f; SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, 0xF0, temp); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x13, (du & 0xFF)); if (infoflag & InterlaceMode) du >>= 1; du <<= 5; temp = (du >> 8) & 0xff; if (du & 0xff) temp++; temp++; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x10, temp); } /*********************************************/ /* VCLK */ /*********************************************/ static void SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short rrti) { unsigned short index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK; unsigned short clka = SiS_Pr->SiS_VCLKData[index].SR2B; unsigned short clkb = SiS_Pr->SiS_VCLKData[index].SR2C; SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xCF); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2B, clka); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2C, clkb); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2D, 0x01); } /*********************************************/ /* FIFO */ /*********************************************/ static void SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short mi) { unsigned short modeflag = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag; /* disable auto-threshold */ SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x3D, 0xFE); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x08, 0xAE); SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x09, 0xF0); if (ModeNo <= 0x13) return; if ((!(modeflag & DoubleScanMode)) || (!(modeflag & HalfDCLK))) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x08, 0x34); SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x3D, 0x01); } } /*********************************************/ /* MODE REGISTERS */ /*********************************************/ static void SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short rrti) { unsigned short data = 0, VCLK = 0, index = 0; if (ModeNo > 0x13) { index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK; VCLK = SiS_Pr->SiS_VCLKData[index].CLOCK; } if (VCLK >= 166) data |= 0x0c; SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x32, 0xf3, data); if (VCLK >= 166) SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1f, 0xe7); /* DAC speed */ data = 0x03; if (VCLK >= 260) data = 0x00; else if (VCLK >= 160) data = 0x01; else if (VCLK >= 135) data = 0x02; SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x07, 0xF8, data); } static void SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short rrti) { unsigned short data, infoflag = 0, modeflag; if (ModeNo <= 0x13) modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; else { modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag; } /* Disable DPMS */ SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1F, 0x3F); data = 0; if (ModeNo > 0x13) { if (SiS_Pr->SiS_ModeType > ModeEGA) { data |= 0x02; data |= ((SiS_Pr->SiS_ModeType - ModeVGA) << 2); } if (infoflag & InterlaceMode) data |= 0x20; } SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x06, 0xC0, data); data = 0; if (infoflag & InterlaceMode) { /* data = (Hsync / 8) - ((Htotal / 8) / 2) + 3 */ unsigned short hrs = (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x04) | ((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0xc0) << 2)) - 3; unsigned short hto = (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x00) | ((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0x03) << 8)) + 5; data = hrs - (hto >> 1) + 3; } SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x19, (data & 0xFF)); SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x1a, 0xFC, (data >> 8)); if (modeflag & HalfDCLK) SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0x08); data = 0; if (modeflag & LineCompareOff) data = 0x08; SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0F, 0xB7, data); if ((SiS_Pr->SiS_ModeType == ModeEGA) && (ModeNo > 0x13)) SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0F, 0x40); SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xfb); data = 0x60; if (SiS_Pr->SiS_ModeType != ModeText) { data ^= 0x60; if (SiS_Pr->SiS_ModeType != ModeEGA) data ^= 0xA0; } SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x21, 0x1F, data); SiS_SetVCLKState(SiS_Pr, ModeNo, rrti); if (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x31) & 0x40) SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x52, 0x2c); else SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x52, 0x6c); } /*********************************************/ /* LOAD DAC */ /*********************************************/ static void SiS_WriteDAC(struct SiS_Private *SiS_Pr, unsigned long DACData, unsigned short shiftflag, unsigned short dl, unsigned short ah, unsigned short al, unsigned short dh) { unsigned short d1, d2, d3; switch (dl) { case 0: d1 = dh; d2 = ah; d3 = al; break; case 1: d1 = ah; d2 = al; d3 = dh; break; default: d1 = al; d2 = dh; d3 = ah; } SiS_SetRegByte(SiS_Pr, DACData, (d1 << shiftflag)); SiS_SetRegByte(SiS_Pr, DACData, (d2 << shiftflag)); SiS_SetRegByte(SiS_Pr, DACData, (d3 << shiftflag)); } static void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short mi) { unsigned short data, data2, time, i, j, k, m, n, o; unsigned short si, di, bx, sf; unsigned long DACAddr, DACData; const unsigned char *table = NULL; if (ModeNo < 0x13) data = SiS_Pr->SiS_SModeIDTable[mi].St_ModeFlag; else data = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag; data &= DACInfoFlag; j = time = 64; if (data == 0x00) table = SiS_MDA_DAC; else if (data == 0x08) table = SiS_CGA_DAC; else if (data == 0x10) table = SiS_EGA_DAC; else { j = 16; time = 256; table = SiS_VGA_DAC; } DACAddr = SiS_Pr->SiS_P3c8; DACData = SiS_Pr->SiS_P3c9; sf = 0; SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c6, 0xFF); SiS_SetRegByte(SiS_Pr, DACAddr, 0x00); for (i = 0; i < j; i++) { data = table[i]; for (k = 0; k < 3; k++) { data2 = 0; if (data & 0x01) data2 += 0x2A; if (data & 0x02) data2 += 0x15; SiS_SetRegByte(SiS_Pr, DACData, (data2 << sf)); data >>= 2; } } if (time == 256) { for (i = 16; i < 32; i++) { data = table[i] << sf; for (k = 0; k < 3; k++) SiS_SetRegByte(SiS_Pr, DACData, data); } si = 32; for (m = 0; m < 9; m++) { di = si; bx = si + 4; for (n = 0; n < 3; n++) { for (o = 0; o < 5; o++) { SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[bx], table[si]); si++; } si -= 2; for (o = 0; o < 3; o++) { SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[si], table[bx]); si--; } } si += 5; } } } /*********************************************/ /* SET CRT1 REGISTER GROUP */ /*********************************************/ static void SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { unsigned short StandTableIndex, rrti; SiS_Pr->SiS_CRT1Mode = ModeNo; if (ModeNo <= 0x13) StandTableIndex = 0; else StandTableIndex = 1; SiS_ResetSegmentRegisters(SiS_Pr); SiS_SetSeqRegs(SiS_Pr, StandTableIndex); SiS_SetMiscRegs(SiS_Pr, StandTableIndex); SiS_SetCRTCRegs(SiS_Pr, StandTableIndex); SiS_SetATTRegs(SiS_Pr, StandTableIndex); SiS_SetGRCRegs(SiS_Pr, StandTableIndex); SiS_ClearExt1Regs(SiS_Pr, ModeNo); rrti = SiS_GetRatePtr(SiS_Pr, ModeNo, ModeIdIndex); if (rrti != 0xFFFF) { SiS_SetCRT1Sync(SiS_Pr, rrti); SiS_SetCRT1CRTC(SiS_Pr, ModeNo, ModeIdIndex, rrti); SiS_SetCRT1Offset(SiS_Pr, ModeNo, ModeIdIndex, rrti); SiS_SetCRT1VCLK(SiS_Pr, ModeNo, rrti); } SiS_SetCRT1FIFO_310(SiS_Pr, ModeNo, ModeIdIndex); SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, rrti); SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); SiS_DisplayOn(SiS_Pr); } /*********************************************/ /* SiSSetMode() */ /*********************************************/ int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { unsigned short ModeIdIndex; unsigned long BaseAddr = SiS_Pr->IOAddress; SiSUSB_InitPtr(SiS_Pr); SiSUSBRegInit(SiS_Pr, BaseAddr); SiS_GetSysFlags(SiS_Pr); if (!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return 0; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x05, 0x86); SiSInitPCIetc(SiS_Pr); ModeNo &= 0x7f; SiS_Pr->SiS_ModeType = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag & ModeTypeMask; SiS_Pr->SiS_SetFlag = LowModeTests; /* Set mode on CRT1 */ SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex); SiS_HandleCRT1(SiS_Pr); SiS_DisplayOn(SiS_Pr); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c6, 0xFF); /* Store mode number */ SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x34, ModeNo); return 1; } int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo) { unsigned short ModeNo = 0; int i; SiSUSB_InitPtr(SiS_Pr); if (VModeNo == 0x03) { ModeNo = 0x03; } else { i = 0; do { if (SiS_Pr->SiS_EModeIDTable[i].Ext_VESAID == VModeNo) { ModeNo = SiS_Pr->SiS_EModeIDTable[i].Ext_ModeID; break; } } while (SiS_Pr->SiS_EModeIDTable[i++].Ext_ModeID != 0xff); } if (!ModeNo) return 0; return SiSUSBSetMode(SiS_Pr, ModeNo); } #endif /* INCL_SISUSB_CON */
gpl-2.0
hyuh/villec2-kernel
drivers/usb/misc/sisusbvga/sisusb_init.c
11746
25368
/* * sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles * * Display mode initializing code * * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria * * If distributed as part of the Linux kernel, this code is licensed under the * terms of the GPL v2. * * Otherwise, the following license terms apply: * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following conditions * * are met: * * 1) Redistributions of source code must retain the above copyright * * notice, this list of conditions and the following disclaimer. * * 2) Redistributions in binary form must reproduce the above copyright * * notice, this list of conditions and the following disclaimer in the * * documentation and/or other materials provided with the distribution. * * 3) The name of the author may not be used to endorse or promote products * * derived from this software without specific prior written permission. * * * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Thomas Winischhofer <thomas@winischhofer.net> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/spinlock.h> #include "sisusb.h" #ifdef INCL_SISUSB_CON #include "sisusb_init.h" /*********************************************/ /* POINTER INITIALIZATION */ /*********************************************/ static void SiSUSB_InitPtr(struct SiS_Private *SiS_Pr) { SiS_Pr->SiS_ModeResInfo = SiSUSB_ModeResInfo; SiS_Pr->SiS_StandTable = SiSUSB_StandTable; SiS_Pr->SiS_SModeIDTable = SiSUSB_SModeIDTable; SiS_Pr->SiS_EModeIDTable = SiSUSB_EModeIDTable; SiS_Pr->SiS_RefIndex = SiSUSB_RefIndex; SiS_Pr->SiS_CRT1Table = SiSUSB_CRT1Table; SiS_Pr->SiS_VCLKData = SiSUSB_VCLKData; } /*********************************************/ /* HELPER: SetReg, GetReg */ /*********************************************/ static void SiS_SetReg(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index, unsigned short data) { sisusb_setidxreg(SiS_Pr->sisusb, port, index, data); } static void SiS_SetRegByte(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short data) { sisusb_setreg(SiS_Pr->sisusb, port, data); } static unsigned char SiS_GetReg(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index) { u8 data; sisusb_getidxreg(SiS_Pr->sisusb, port, index, &data); return data; } static unsigned char SiS_GetRegByte(struct SiS_Private *SiS_Pr, unsigned long port) { u8 data; sisusb_getreg(SiS_Pr->sisusb, port, &data); return data; } static void SiS_SetRegANDOR(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index, unsigned short DataAND, unsigned short DataOR) { sisusb_setidxregandor(SiS_Pr->sisusb, port, index, DataAND, DataOR); } static void SiS_SetRegAND(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index, unsigned short DataAND) { sisusb_setidxregand(SiS_Pr->sisusb, port, index, DataAND); } static void SiS_SetRegOR(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index, unsigned short DataOR) { sisusb_setidxregor(SiS_Pr->sisusb, port, index, DataOR); } /*********************************************/ /* HELPER: DisplayOn, DisplayOff */ /*********************************************/ static void SiS_DisplayOn(struct SiS_Private *SiS_Pr) { SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0xDF); } /*********************************************/ /* HELPER: Init Port Addresses */ /*********************************************/ static void SiSUSBRegInit(struct SiS_Private *SiS_Pr, unsigned long BaseAddr) { SiS_Pr->SiS_P3c4 = BaseAddr + 0x14; SiS_Pr->SiS_P3d4 = BaseAddr + 0x24; SiS_Pr->SiS_P3c0 = BaseAddr + 0x10; SiS_Pr->SiS_P3ce = BaseAddr + 0x1e; SiS_Pr->SiS_P3c2 = BaseAddr + 0x12; SiS_Pr->SiS_P3ca = BaseAddr + 0x1a; SiS_Pr->SiS_P3c6 = BaseAddr + 0x16; SiS_Pr->SiS_P3c7 = BaseAddr + 0x17; SiS_Pr->SiS_P3c8 = BaseAddr + 0x18; SiS_Pr->SiS_P3c9 = BaseAddr + 0x19; SiS_Pr->SiS_P3cb = BaseAddr + 0x1b; SiS_Pr->SiS_P3cc = BaseAddr + 0x1c; SiS_Pr->SiS_P3cd = BaseAddr + 0x1d; SiS_Pr->SiS_P3da = BaseAddr + 0x2a; SiS_Pr->SiS_Part1Port = BaseAddr + SIS_CRT2_PORT_04; } /*********************************************/ /* HELPER: GetSysFlags */ /*********************************************/ static void SiS_GetSysFlags(struct SiS_Private *SiS_Pr) { SiS_Pr->SiS_MyCR63 = 0x63; } /*********************************************/ /* HELPER: Init PCI & Engines */ /*********************************************/ static void SiSInitPCIetc(struct SiS_Private *SiS_Pr) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x20, 0xa1); /* - Enable 2D (0x40) * - Enable 3D (0x02) * - Enable 3D vertex command fetch (0x10) * - Enable 3D command parser (0x08) * - Enable 3D G/L transformation engine (0x80) */ SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1E, 0xDA); } /*********************************************/ /* HELPER: SET SEGMENT REGISTERS */ /*********************************************/ static void SiS_SetSegRegLower(struct SiS_Private *SiS_Pr, unsigned short value) { unsigned short temp; value &= 0x00ff; temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb) & 0xf0; temp |= (value >> 4); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb, temp); temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd) & 0xf0; temp |= (value & 0x0f); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp); } static void SiS_SetSegRegUpper(struct SiS_Private *SiS_Pr, unsigned short value) { unsigned short temp; value &= 0x00ff; temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb) & 0x0f; temp |= (value & 0xf0); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb, temp); temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd) & 0x0f; temp |= (value << 4); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp); } static void SiS_SetSegmentReg(struct SiS_Private *SiS_Pr, unsigned short value) { SiS_SetSegRegLower(SiS_Pr, value); SiS_SetSegRegUpper(SiS_Pr, value); } static void SiS_ResetSegmentReg(struct SiS_Private *SiS_Pr) { SiS_SetSegmentReg(SiS_Pr, 0); } static void SiS_SetSegmentRegOver(struct SiS_Private *SiS_Pr, unsigned short value) { unsigned short temp = value >> 8; temp &= 0x07; temp |= (temp << 4); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1d, temp); SiS_SetSegmentReg(SiS_Pr, value); } static void SiS_ResetSegmentRegOver(struct SiS_Private *SiS_Pr) { SiS_SetSegmentRegOver(SiS_Pr, 0); } static void SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr) { SiS_ResetSegmentReg(SiS_Pr); SiS_ResetSegmentRegOver(SiS_Pr); } /*********************************************/ /* HELPER: SearchModeID */ /*********************************************/ static int SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, unsigned short *ModeIdIndex) { if ((*ModeNo) <= 0x13) { if ((*ModeNo) != 0x03) return 0; (*ModeIdIndex) = 0; } else { for (*ModeIdIndex = 0;; (*ModeIdIndex)++) { if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID == (*ModeNo)) break; if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID == 0xFF) return 0; } } return 1; } /*********************************************/ /* HELPER: ENABLE CRT1 */ /*********************************************/ static void SiS_HandleCRT1(struct SiS_Private *SiS_Pr) { /* Enable CRT1 gating */ SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, SiS_Pr->SiS_MyCR63, 0xbf); } /*********************************************/ /* HELPER: GetColorDepth */ /*********************************************/ static unsigned short SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { static const unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 }; unsigned short modeflag; short index; if (ModeNo <= 0x13) { modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; } else { modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; } index = (modeflag & ModeTypeMask) - ModeEGA; if (index < 0) index = 0; return ColorDepth[index]; } /*********************************************/ /* HELPER: GetOffset */ /*********************************************/ static unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short rrti) { unsigned short xres, temp, colordepth, infoflag; infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag; xres = SiS_Pr->SiS_RefIndex[rrti].XRes; colordepth = SiS_GetColorDepth(SiS_Pr, ModeNo, ModeIdIndex); temp = xres / 16; if (infoflag & InterlaceMode) temp <<= 1; temp *= colordepth; if (xres % 16) temp += (colordepth >> 1); return temp; } /*********************************************/ /* SEQ */ /*********************************************/ static void SiS_SetSeqRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char SRdata; int i; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x00, 0x03); SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[0] | 0x20; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, SRdata); for (i = 2; i <= 4; i++) { SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[i - 1]; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, SRdata); } } /*********************************************/ /* MISC */ /*********************************************/ static void SiS_SetMiscRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char Miscdata = SiS_Pr->SiS_StandTable[StandTableIndex].MISC; SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c2, Miscdata); } /*********************************************/ /* CRTC */ /*********************************************/ static void SiS_SetCRTCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char CRTCdata; unsigned short i; SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f); for (i = 0; i <= 0x18; i++) { CRTCdata = SiS_Pr->SiS_StandTable[StandTableIndex].CRTC[i]; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, i, CRTCdata); } } /*********************************************/ /* ATT */ /*********************************************/ static void SiS_SetATTRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char ARdata; unsigned short i; for (i = 0; i <= 0x13; i++) { ARdata = SiS_Pr->SiS_StandTable[StandTableIndex].ATTR[i]; SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, i); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, ARdata); } SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x14); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x00); SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x20); SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da); } /*********************************************/ /* GRC */ /*********************************************/ static void SiS_SetGRCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char GRdata; unsigned short i; for (i = 0; i <= 0x08; i++) { GRdata = SiS_Pr->SiS_StandTable[StandTableIndex].GRC[i]; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3ce, i, GRdata); } if (SiS_Pr->SiS_ModeType > ModeVGA) { /* 256 color disable */ SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3ce, 0x05, 0xBF); } } /*********************************************/ /* CLEAR EXTENDED REGISTERS */ /*********************************************/ static void SiS_ClearExt1Regs(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { int i; for (i = 0x0A; i <= 0x0E; i++) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, 0x00); } SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x37, 0xFE); } /*********************************************/ /* Get rate index */ /*********************************************/ static unsigned short SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { unsigned short rrti, i, index, temp; if (ModeNo <= 0x13) return 0xFFFF; index = SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x33) & 0x0F; if (index > 0) index--; rrti = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].REFindex; ModeNo = SiS_Pr->SiS_RefIndex[rrti].ModeID; i = 0; do { if (SiS_Pr->SiS_RefIndex[rrti + i].ModeID != ModeNo) break; temp = SiS_Pr->SiS_RefIndex[rrti + i].Ext_InfoFlag & ModeTypeMask; if (temp < SiS_Pr->SiS_ModeType) break; i++; index--; } while (index != 0xFFFF); i--; return (rrti + i); } /*********************************************/ /* SYNC */ /*********************************************/ static void SiS_SetCRT1Sync(struct SiS_Private *SiS_Pr, unsigned short rrti) { unsigned short sync = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag >> 8; sync &= 0xC0; sync |= 0x2f; SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c2, sync); } /*********************************************/ /* CRTC/2 */ /*********************************************/ static void SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short rrti) { unsigned char index; unsigned short temp, i, j, modeflag; SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f); modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRT1CRTC; for (i = 0, j = 0; i <= 7; i++, j++) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, SiS_Pr->SiS_CRT1Table[index].CR[i]); } for (j = 0x10; i <= 10; i++, j++) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, SiS_Pr->SiS_CRT1Table[index].CR[i]); } for (j = 0x15; i <= 12; i++, j++) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j, SiS_Pr->SiS_CRT1Table[index].CR[i]); } for (j = 0x0A; i <= 15; i++, j++) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, j, SiS_Pr->SiS_CRT1Table[index].CR[i]); } temp = SiS_Pr->SiS_CRT1Table[index].CR[16] & 0xE0; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, temp); temp = ((SiS_Pr->SiS_CRT1Table[index].CR[16]) & 0x01) << 5; if (modeflag & DoubleScanMode) temp |= 0x80; SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x09, 0x5F, temp); if (SiS_Pr->SiS_ModeType > ModeVGA) SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x14, 0x4F); } /*********************************************/ /* OFFSET & PITCH */ /*********************************************/ /* (partly overruled by SetPitch() in XF86) */ /*********************************************/ static void SiS_SetCRT1Offset(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short rrti) { unsigned short du = SiS_GetOffset(SiS_Pr, ModeNo, ModeIdIndex, rrti); unsigned short infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag; unsigned short temp; temp = (du >> 8) & 0x0f; SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, 0xF0, temp); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x13, (du & 0xFF)); if (infoflag & InterlaceMode) du >>= 1; du <<= 5; temp = (du >> 8) & 0xff; if (du & 0xff) temp++; temp++; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x10, temp); } /*********************************************/ /* VCLK */ /*********************************************/ static void SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short rrti) { unsigned short index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK; unsigned short clka = SiS_Pr->SiS_VCLKData[index].SR2B; unsigned short clkb = SiS_Pr->SiS_VCLKData[index].SR2C; SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xCF); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2B, clka); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2C, clkb); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2D, 0x01); } /*********************************************/ /* FIFO */ /*********************************************/ static void SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short mi) { unsigned short modeflag = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag; /* disable auto-threshold */ SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x3D, 0xFE); SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x08, 0xAE); SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x09, 0xF0); if (ModeNo <= 0x13) return; if ((!(modeflag & DoubleScanMode)) || (!(modeflag & HalfDCLK))) { SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x08, 0x34); SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x3D, 0x01); } } /*********************************************/ /* MODE REGISTERS */ /*********************************************/ static void SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short rrti) { unsigned short data = 0, VCLK = 0, index = 0; if (ModeNo > 0x13) { index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK; VCLK = SiS_Pr->SiS_VCLKData[index].CLOCK; } if (VCLK >= 166) data |= 0x0c; SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x32, 0xf3, data); if (VCLK >= 166) SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1f, 0xe7); /* DAC speed */ data = 0x03; if (VCLK >= 260) data = 0x00; else if (VCLK >= 160) data = 0x01; else if (VCLK >= 135) data = 0x02; SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x07, 0xF8, data); } static void SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short rrti) { unsigned short data, infoflag = 0, modeflag; if (ModeNo <= 0x13) modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; else { modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag; } /* Disable DPMS */ SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1F, 0x3F); data = 0; if (ModeNo > 0x13) { if (SiS_Pr->SiS_ModeType > ModeEGA) { data |= 0x02; data |= ((SiS_Pr->SiS_ModeType - ModeVGA) << 2); } if (infoflag & InterlaceMode) data |= 0x20; } SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x06, 0xC0, data); data = 0; if (infoflag & InterlaceMode) { /* data = (Hsync / 8) - ((Htotal / 8) / 2) + 3 */ unsigned short hrs = (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x04) | ((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0xc0) << 2)) - 3; unsigned short hto = (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x00) | ((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0x03) << 8)) + 5; data = hrs - (hto >> 1) + 3; } SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x19, (data & 0xFF)); SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x1a, 0xFC, (data >> 8)); if (modeflag & HalfDCLK) SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0x08); data = 0; if (modeflag & LineCompareOff) data = 0x08; SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0F, 0xB7, data); if ((SiS_Pr->SiS_ModeType == ModeEGA) && (ModeNo > 0x13)) SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0F, 0x40); SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xfb); data = 0x60; if (SiS_Pr->SiS_ModeType != ModeText) { data ^= 0x60; if (SiS_Pr->SiS_ModeType != ModeEGA) data ^= 0xA0; } SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x21, 0x1F, data); SiS_SetVCLKState(SiS_Pr, ModeNo, rrti); if (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x31) & 0x40) SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x52, 0x2c); else SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x52, 0x6c); } /*********************************************/ /* LOAD DAC */ /*********************************************/ static void SiS_WriteDAC(struct SiS_Private *SiS_Pr, unsigned long DACData, unsigned short shiftflag, unsigned short dl, unsigned short ah, unsigned short al, unsigned short dh) { unsigned short d1, d2, d3; switch (dl) { case 0: d1 = dh; d2 = ah; d3 = al; break; case 1: d1 = ah; d2 = al; d3 = dh; break; default: d1 = al; d2 = dh; d3 = ah; } SiS_SetRegByte(SiS_Pr, DACData, (d1 << shiftflag)); SiS_SetRegByte(SiS_Pr, DACData, (d2 << shiftflag)); SiS_SetRegByte(SiS_Pr, DACData, (d3 << shiftflag)); } static void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short mi) { unsigned short data, data2, time, i, j, k, m, n, o; unsigned short si, di, bx, sf; unsigned long DACAddr, DACData; const unsigned char *table = NULL; if (ModeNo < 0x13) data = SiS_Pr->SiS_SModeIDTable[mi].St_ModeFlag; else data = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag; data &= DACInfoFlag; j = time = 64; if (data == 0x00) table = SiS_MDA_DAC; else if (data == 0x08) table = SiS_CGA_DAC; else if (data == 0x10) table = SiS_EGA_DAC; else { j = 16; time = 256; table = SiS_VGA_DAC; } DACAddr = SiS_Pr->SiS_P3c8; DACData = SiS_Pr->SiS_P3c9; sf = 0; SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c6, 0xFF); SiS_SetRegByte(SiS_Pr, DACAddr, 0x00); for (i = 0; i < j; i++) { data = table[i]; for (k = 0; k < 3; k++) { data2 = 0; if (data & 0x01) data2 += 0x2A; if (data & 0x02) data2 += 0x15; SiS_SetRegByte(SiS_Pr, DACData, (data2 << sf)); data >>= 2; } } if (time == 256) { for (i = 16; i < 32; i++) { data = table[i] << sf; for (k = 0; k < 3; k++) SiS_SetRegByte(SiS_Pr, DACData, data); } si = 32; for (m = 0; m < 9; m++) { di = si; bx = si + 4; for (n = 0; n < 3; n++) { for (o = 0; o < 5; o++) { SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[bx], table[si]); si++; } si -= 2; for (o = 0; o < 3; o++) { SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[si], table[bx]); si--; } } si += 5; } } } /*********************************************/ /* SET CRT1 REGISTER GROUP */ /*********************************************/ static void SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { unsigned short StandTableIndex, rrti; SiS_Pr->SiS_CRT1Mode = ModeNo; if (ModeNo <= 0x13) StandTableIndex = 0; else StandTableIndex = 1; SiS_ResetSegmentRegisters(SiS_Pr); SiS_SetSeqRegs(SiS_Pr, StandTableIndex); SiS_SetMiscRegs(SiS_Pr, StandTableIndex); SiS_SetCRTCRegs(SiS_Pr, StandTableIndex); SiS_SetATTRegs(SiS_Pr, StandTableIndex); SiS_SetGRCRegs(SiS_Pr, StandTableIndex); SiS_ClearExt1Regs(SiS_Pr, ModeNo); rrti = SiS_GetRatePtr(SiS_Pr, ModeNo, ModeIdIndex); if (rrti != 0xFFFF) { SiS_SetCRT1Sync(SiS_Pr, rrti); SiS_SetCRT1CRTC(SiS_Pr, ModeNo, ModeIdIndex, rrti); SiS_SetCRT1Offset(SiS_Pr, ModeNo, ModeIdIndex, rrti); SiS_SetCRT1VCLK(SiS_Pr, ModeNo, rrti); } SiS_SetCRT1FIFO_310(SiS_Pr, ModeNo, ModeIdIndex); SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, rrti); SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); SiS_DisplayOn(SiS_Pr); } /*********************************************/ /* SiSSetMode() */ /*********************************************/ int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { unsigned short ModeIdIndex; unsigned long BaseAddr = SiS_Pr->IOAddress; SiSUSB_InitPtr(SiS_Pr); SiSUSBRegInit(SiS_Pr, BaseAddr); SiS_GetSysFlags(SiS_Pr); if (!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return 0; SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x05, 0x86); SiSInitPCIetc(SiS_Pr); ModeNo &= 0x7f; SiS_Pr->SiS_ModeType = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag & ModeTypeMask; SiS_Pr->SiS_SetFlag = LowModeTests; /* Set mode on CRT1 */ SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex); SiS_HandleCRT1(SiS_Pr); SiS_DisplayOn(SiS_Pr); SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c6, 0xFF); /* Store mode number */ SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x34, ModeNo); return 1; } int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo) { unsigned short ModeNo = 0; int i; SiSUSB_InitPtr(SiS_Pr); if (VModeNo == 0x03) { ModeNo = 0x03; } else { i = 0; do { if (SiS_Pr->SiS_EModeIDTable[i].Ext_VESAID == VModeNo) { ModeNo = SiS_Pr->SiS_EModeIDTable[i].Ext_ModeID; break; } } while (SiS_Pr->SiS_EModeIDTable[i++].Ext_ModeID != 0xff); } if (!ModeNo) return 0; return SiSUSBSetMode(SiS_Pr, ModeNo); } #endif /* INCL_SISUSB_CON */
gpl-2.0
javelinanddart/kernel_zte_draconis
arch/mips/lasat/setup.c
14050
3811
/* * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999 MIPS Technologies, Inc. All rights reserved. * * Thomas Horsten <thh@lasat.com> * Copyright (C) 2000 LASAT Networks A/S. * * Brian Murphy <brian@murphy.dk> * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Lasat specific setup. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <asm/time.h> #include <asm/cpu.h> #include <asm/bootinfo.h> #include <asm/irq.h> #include <asm/lasat/lasat.h> #include <asm/lasat/serial.h> #ifdef CONFIG_PICVUE #include <linux/notifier.h> #endif #include "ds1603.h" #include <asm/lasat/ds1603.h> #include <asm/lasat/picvue.h> #include <asm/lasat/eeprom.h> #include "prom.h" int lasat_command_line; void lasatint_init(void); extern void lasat_reboot_setup(void); extern void pcisetup(void); extern void edhac_init(void *, void *, void *); extern void addrflt_init(void); struct lasat_misc lasat_misc_info[N_MACHTYPES] = { { .reset_reg = (void *)KSEG1ADDR(0x1c840000), .flash_wp_reg = (void *)KSEG1ADDR(0x1c800000), 2 }, { .reset_reg = (void *)KSEG1ADDR(0x11080000), .flash_wp_reg = (void *)KSEG1ADDR(0x11000000), 6 } }; struct lasat_misc *lasat_misc; #ifdef CONFIG_DS1603 static struct ds_defs ds_defs[N_MACHTYPES] = { { (void *)DS1603_REG_100, (void *)DS1603_REG_100, DS1603_RST_100, DS1603_CLK_100, DS1603_DATA_100, DS1603_DATA_SHIFT_100, 0, 0 }, { (void *)DS1603_REG_200, (void *)DS1603_DATA_REG_200, DS1603_RST_200, DS1603_CLK_200, DS1603_DATA_200, DS1603_DATA_READ_SHIFT_200, 1, 2000 } }; #endif #ifdef CONFIG_PICVUE #include "picvue.h" static struct pvc_defs pvc_defs[N_MACHTYPES] = { { (void *)PVC_REG_100, PVC_DATA_SHIFT_100, PVC_DATA_M_100, PVC_E_100, PVC_RW_100, PVC_RS_100 }, { (void *)PVC_REG_200, PVC_DATA_SHIFT_200, PVC_DATA_M_200, PVC_E_200, PVC_RW_200, PVC_RS_200 } }; #endif static int lasat_panic_display(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef CONFIG_PICVUE unsigned char *string = ptr; if (string == NULL) string = "Kernel Panic"; pvc_dump_string(string); #endif return NOTIFY_DONE; } static int lasat_panic_prom_monitor(struct notifier_block *this, unsigned long event, void *ptr) { prom_monitor(); return NOTIFY_DONE; } static struct notifier_block lasat_panic_block[] = { { .notifier_call = lasat_panic_display, .priority = INT_MAX }, { .notifier_call = lasat_panic_prom_monitor, .priority = INT_MIN } }; void __init plat_time_init(void) { mips_hpt_frequency = lasat_board_info.li_cpu_hz / 2; change_c0_status(ST0_IM, IE_IRQ0); } void __init plat_mem_setup(void) { int i; int lasat_type = IS_LASAT_200() ? 1 : 0; lasat_misc = &lasat_misc_info[lasat_type]; #ifdef CONFIG_PICVUE picvue = &pvc_defs[lasat_type]; #endif /* Set up panic notifier */ for (i = 0; i < ARRAY_SIZE(lasat_panic_block); i++) atomic_notifier_chain_register(&panic_notifier_list, &lasat_panic_block[i]); lasat_reboot_setup(); #ifdef CONFIG_DS1603 ds1603 = &ds_defs[lasat_type]; #endif #ifdef DYNAMIC_SERIAL_INIT serial_init(); #endif pr_info("Lasat specific initialization complete\n"); }
gpl-2.0
Shao-Feng/tinyweb
third-party/busybox-1.22.1/modutils/modutils.c
227
4742
/* * Common modutils related functions for busybox * * Copyright (C) 2008 by Timo Teras <timo.teras@iki.fi> * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ #include "modutils.h" #ifdef __UCLIBC__ extern int init_module(void *module, unsigned long len, const char *options); extern int delete_module(const char *module, unsigned int flags); #else # include <sys/syscall.h> # define init_module(mod, len, opts) syscall(__NR_init_module, mod, len, opts) # define delete_module(mod, flags) syscall(__NR_delete_module, mod, flags) #endif void FAST_FUNC replace(char *s, char what, char with) { while (*s) { if (what == *s) *s = with; ++s; } } char* FAST_FUNC replace_underscores(char *s) { replace(s, '-', '_'); return s; } int FAST_FUNC string_to_llist(char *string, llist_t **llist, const char *delim) { char *tok; int len = 0; while ((tok = strsep(&string, delim)) != NULL) { if (tok[0] == '\0') continue; llist_add_to_end(llist, xstrdup(tok)); len += strlen(tok); } return len; } char* FAST_FUNC filename2modname(const char *filename, char *modname) { int i; char *from; if (filename == NULL) return NULL; if (modname == NULL) modname = xmalloc(MODULE_NAME_LEN); from = bb_get_last_path_component_nostrip(filename); for (i = 0; i < (MODULE_NAME_LEN-1) && from[i] != '\0' && from[i] != '.'; i++) modname[i] = (from[i] == '-') ? '_' : from[i]; modname[i] = '\0'; return modname; } char* FAST_FUNC parse_cmdline_module_options(char **argv, int quote_spaces) { char *options; int optlen; options = xzalloc(1); optlen = 0; while (*++argv) { const char *fmt; const char *var; const char *val; var = *argv; options = xrealloc(options, optlen + 2 + strlen(var) + 2); fmt = "%.*s%s "; val = strchrnul(var, '='); if (quote_spaces) { /* * modprobe (module-init-tools version 3.11.1) compat: * quote only value: * var="val with spaces", not "var=val with spaces" * (note: var *name* is not checked for spaces!) */ if (*val) { /* has var=val format. skip '=' */ val++; if (strchr(val, ' ')) fmt = "%.*s\"%s\" "; } } optlen += sprintf(options + optlen, fmt, (int)(val - var), var, val); } /* Remove trailing space. Disabled */ /* if (optlen != 0) options[optlen-1] = '\0'; */ return options; } #if ENABLE_FEATURE_INSMOD_TRY_MMAP void* FAST_FUNC try_to_mmap_module(const char *filename, size_t *image_size_p) { /* We have user reports of failure to load 3MB module * on a 16MB RAM machine. Apparently even a transient * memory spike to 6MB during module load * is too big for that system. */ void *image; struct stat st; int fd; fd = xopen(filename, O_RDONLY); fstat(fd, &st); image = NULL; /* st.st_size is off_t, we can't just pass it to mmap */ if (st.st_size <= *image_size_p) { size_t image_size = st.st_size; image = mmap(NULL, image_size, PROT_READ, MAP_PRIVATE, fd, 0); if (image == MAP_FAILED) { image = NULL; } else if (*(uint32_t*)image != SWAP_BE32(0x7f454C46)) { /* No ELF signature. Compressed module? */ munmap(image, image_size); image = NULL; } else { /* Success. Report the size */ *image_size_p = image_size; } } close(fd); return image; } #endif /* Return: * 0 on success, * -errno on open/read error, * errno on init_module() error */ int FAST_FUNC bb_init_module(const char *filename, const char *options) { size_t image_size; char *image; int rc; bool mmaped; if (!options) options = ""; //TODO: audit bb_init_module_24 to match error code convention #if ENABLE_FEATURE_2_4_MODULES if (get_linux_version_code() < KERNEL_VERSION(2,6,0)) return bb_init_module_24(filename, options); #endif image_size = INT_MAX - 4095; mmaped = 0; image = try_to_mmap_module(filename, &image_size); if (image) { mmaped = 1; } else { errno = ENOMEM; /* may be changed by e.g. open errors below */ image = xmalloc_open_zipped_read_close(filename, &image_size); if (!image) return -errno; } errno = 0; init_module(image, image_size, options); rc = errno; if (mmaped) munmap(image, image_size); else free(image); return rc; } int FAST_FUNC bb_delete_module(const char *module, unsigned int flags) { errno = 0; delete_module(module, flags); return errno; } const char* FAST_FUNC moderror(int err) { switch (err) { case -1: /* btw: it's -EPERM */ return "no such module"; case ENOEXEC: return "invalid module format"; case ENOENT: return "unknown symbol in module, or unknown parameter"; case ESRCH: return "module has wrong symbol version"; case ENOSYS: return "kernel does not support requested operation"; } if (err < 0) /* should always be */ err = -err; return strerror(err); }
gpl-2.0
fabiocannizzo/linux
drivers/gpu/drm/udl/udl_main.c
227
8458
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Red Hat * * based in parts on udlfb.c: * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> */ #include <drm/drm.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "udl_drv.h" /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */ #define BULK_SIZE 512 #define NR_USB_REQUEST_CHANNEL 0x12 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE) #define WRITES_IN_FLIGHT (4) #define MAX_VENDOR_DESCRIPTOR_SIZE 256 #define GET_URB_TIMEOUT HZ #define FREE_URB_TIMEOUT (HZ*2) static int udl_parse_vendor_descriptor(struct udl_device *udl) { struct usb_device *udev = udl_to_usb_device(udl); char *desc; char *buf; char *desc_end; u8 total_len = 0; buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL); if (!buf) return false; desc = buf; total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */ 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); if (total_len > 5) { DRM_INFO("vendor descriptor length:%x data:%11ph\n", total_len, desc); if ((desc[0] != total_len) || /* descriptor length */ (desc[1] != 0x5f) || /* vendor descriptor type */ (desc[2] != 0x01) || /* version (2 bytes) */ (desc[3] != 0x00) || (desc[4] != total_len - 2)) /* length after type */ goto unrecognized; desc_end = desc + total_len; desc += 5; /* the fixed header we've already parsed */ while (desc < desc_end) { u8 length; u16 key; key = le16_to_cpu(*((u16 *) desc)); desc += sizeof(u16); length = *desc; desc++; switch (key) { case 0x0200: { /* max_area */ u32 max_area; max_area = le32_to_cpu(*((u32 *)desc)); DRM_DEBUG("DL chip limited to %d pixel modes\n", max_area); udl->sku_pixel_limit = max_area; break; } default: break; } desc += length; } } goto success; unrecognized: /* allow udlfb to load for now even if firmware unrecognized */ DRM_ERROR("Unrecognized vendor firmware descriptor\n"); success: kfree(buf); return true; } /* * Need to ensure a channel is selected before submitting URBs */ static int udl_select_std_channel(struct udl_device *udl) { static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, 0x1C, 0x88, 0x5E, 0x15, 0x60, 0xFE, 0xC6, 0x97, 0x16, 0x3D, 0x47, 0xF2}; void *sendbuf; int ret; struct usb_device *udev = udl_to_usb_device(udl); sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); if (!sendbuf) return -ENOMEM; ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), NR_USB_REQUEST_CHANNEL, (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, sendbuf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); kfree(sendbuf); return ret < 0 ? ret : 0; } static void udl_release_urb_work(struct work_struct *work) { struct urb_node *unode = container_of(work, struct urb_node, release_urb_work.work); up(&unode->dev->urbs.limit_sem); } void udl_urb_completion(struct urb *urb) { struct urb_node *unode = urb->context; struct udl_device *udl = unode->dev; unsigned long flags; /* sync/async unlink faults aren't errors */ if (urb->status) { if (!(urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) { DRM_ERROR("%s - nonzero write bulk status received: %d\n", __func__, urb->status); } } urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */ spin_lock_irqsave(&udl->urbs.lock, flags); list_add_tail(&unode->entry, &udl->urbs.list); udl->urbs.available++; spin_unlock_irqrestore(&udl->urbs.lock, flags); #if 0 /* * When using fb_defio, we deadlock if up() is called * while another is waiting. So queue to another process. */ if (fb_defio) schedule_delayed_work(&unode->release_urb_work, 0); else #endif up(&udl->urbs.limit_sem); } static void udl_free_urb_list(struct drm_device *dev) { struct udl_device *udl = to_udl(dev); int count = udl->urbs.count; struct list_head *node; struct urb_node *unode; struct urb *urb; DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); /* keep waiting and freeing, until we've got 'em all */ while (count--) { down(&udl->urbs.limit_sem); spin_lock_irq(&udl->urbs.lock); node = udl->urbs.list.next; /* have reserved one with sem */ list_del_init(node); spin_unlock_irq(&udl->urbs.lock); unode = list_entry(node, struct urb_node, entry); urb = unode->urb; /* Free each separately allocated piece */ usb_free_coherent(urb->dev, udl->urbs.size, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); kfree(node); } udl->urbs.count = 0; } static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) { struct udl_device *udl = to_udl(dev); struct urb *urb; struct urb_node *unode; char *buf; size_t wanted_size = count * size; struct usb_device *udev = udl_to_usb_device(udl); spin_lock_init(&udl->urbs.lock); retry: udl->urbs.size = size; INIT_LIST_HEAD(&udl->urbs.list); sema_init(&udl->urbs.limit_sem, 0); udl->urbs.count = 0; udl->urbs.available = 0; while (udl->urbs.count * size < wanted_size) { unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); if (!unode) break; unode->dev = udl; INIT_DELAYED_WORK(&unode->release_urb_work, udl_release_urb_work); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { kfree(unode); break; } unode->urb = urb; buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &urb->transfer_dma); if (!buf) { kfree(unode); usb_free_urb(urb); if (size > PAGE_SIZE) { size /= 2; udl_free_urb_list(dev); goto retry; } break; } /* urb->transfer_buffer_length set to actual before submit */ usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, 1), buf, size, udl_urb_completion, unode); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; list_add_tail(&unode->entry, &udl->urbs.list); up(&udl->urbs.limit_sem); udl->urbs.count++; udl->urbs.available++; } DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size); return udl->urbs.count; } struct urb *udl_get_urb(struct drm_device *dev) { struct udl_device *udl = to_udl(dev); int ret = 0; struct list_head *entry; struct urb_node *unode; struct urb *urb = NULL; /* Wait for an in-flight buffer to complete and get re-queued */ ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT); if (ret) { DRM_INFO("wait for urb interrupted: %x available: %d\n", ret, udl->urbs.available); goto error; } spin_lock_irq(&udl->urbs.lock); BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */ entry = udl->urbs.list.next; list_del_init(entry); udl->urbs.available--; spin_unlock_irq(&udl->urbs.lock); unode = list_entry(entry, struct urb_node, entry); urb = unode->urb; error: return urb; } int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) { struct udl_device *udl = to_udl(dev); int ret; BUG_ON(len > udl->urbs.size); urb->transfer_buffer_length = len; /* set to actual payload len */ ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { udl_urb_completion(urb); /* because no one else will */ DRM_ERROR("usb_submit_urb error %x\n", ret); } return ret; } int udl_init(struct udl_device *udl) { struct drm_device *dev = &udl->drm; int ret = -ENOMEM; DRM_DEBUG("\n"); udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); if (!udl->dmadev) drm_warn(dev, "buffer sharing not supported"); /* not an error */ mutex_init(&udl->gem_lock); if (!udl_parse_vendor_descriptor(udl)) { ret = -ENODEV; DRM_ERROR("firmware not recognized. Assume incompatible device\n"); goto err; } if (udl_select_std_channel(udl)) DRM_ERROR("Selecting channel failed\n"); if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { DRM_ERROR("udl_alloc_urb_list failed\n"); goto err; } DRM_DEBUG("\n"); ret = udl_modeset_init(dev); if (ret) goto err; drm_kms_helper_poll_init(dev); return 0; err: if (udl->urbs.count) udl_free_urb_list(dev); put_device(udl->dmadev); DRM_ERROR("%d\n", ret); return ret; } int udl_drop_usb(struct drm_device *dev) { struct udl_device *udl = to_udl(dev); udl_free_urb_list(dev); put_device(udl->dmadev); udl->dmadev = NULL; return 0; }
gpl-2.0
cattleprod/XCeLL-X69
drivers/acpi/acpica/rsmemory.c
995
7395
/******************************************************************************* * * Module Name: rsmem24 - Memory resource descriptors * ******************************************************************************/ /* * Copyright (C) 2000 - 2010, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rsmemory") /******************************************************************************* * * acpi_rs_convert_memory24 * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_memory24[4] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY24, ACPI_RS_SIZE(struct acpi_resource_memory24), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory24)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY24, sizeof(struct aml_resource_memory24), 0}, /* Read/Write bit */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory24.write_protect), AML_OFFSET(memory24.flags), 0}, /* * These fields are contiguous in both the source and destination: * Minimum Base Address * Maximum Base Address * Address Base Alignment * Range Length */ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.memory24.minimum), AML_OFFSET(memory24.minimum), 4} }; /******************************************************************************* * * acpi_rs_convert_memory32 * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_memory32[4] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY32, ACPI_RS_SIZE(struct acpi_resource_memory32), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory32)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY32, sizeof(struct aml_resource_memory32), 0}, /* Read/Write bit */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory32.write_protect), AML_OFFSET(memory32.flags), 0}, /* * These fields are contiguous in both the source and destination: * Minimum Base Address * Maximum Base Address * Address Base Alignment * Range Length */ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.memory32.minimum), AML_OFFSET(memory32.minimum), 4} }; /******************************************************************************* * * acpi_rs_convert_fixed_memory32 * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_fixed_memory32[4] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_MEMORY32, ACPI_RS_SIZE(struct acpi_resource_fixed_memory32), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_memory32)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_MEMORY32, sizeof(struct aml_resource_fixed_memory32), 0}, /* Read/Write bit */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.fixed_memory32.write_protect), AML_OFFSET(fixed_memory32.flags), 0}, /* * These fields are contiguous in both the source and destination: * Base Address * Range Length */ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.fixed_memory32.address), AML_OFFSET(fixed_memory32.address), 2} }; /******************************************************************************* * * acpi_rs_get_vendor_small * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_get_vendor_small[3] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR, ACPI_RS_SIZE(struct acpi_resource_vendor), ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_small)}, /* Length of the vendor data (byte count) */ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, sizeof(u8)} , /* Vendor data */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_small_header), 0} }; /******************************************************************************* * * acpi_rs_get_vendor_large * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_get_vendor_large[3] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR, ACPI_RS_SIZE(struct acpi_resource_vendor), ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_large)}, /* Length of the vendor data (byte count) */ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, sizeof(u8)} , /* Vendor data */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_large_header), 0} }; /******************************************************************************* * * acpi_rs_set_vendor * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_set_vendor[7] = { /* Default is a small vendor descriptor */ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_SMALL, sizeof(struct aml_resource_small_header), ACPI_RSC_TABLE_SIZE(acpi_rs_set_vendor)}, /* Get the length and copy the data */ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_small_header), 0}, /* * All done if the Vendor byte length is 7 or less, meaning that it will * fit within a small descriptor */ {ACPI_RSC_EXIT_LE, 0, 0, 7}, /* Must create a large vendor descriptor */ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_LARGE, sizeof(struct aml_resource_large_header), 0}, {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_large_header), 0} };
gpl-2.0
friedrich420/S5-G900F-AEL-Kernel-LOLLIPOP
drivers/gud/MobiCoreDriver/main.c
1251
34657
/* * MobiCore Driver Kernel Module. * * This driver represents the command proxy on the lowest layer, from the * secure world to the non secure world, and vice versa. * This driver offers IOCTL commands, for access to the secure world, and has * the interface from the secure world to the normal world. * The access to the driver is possible with a file descriptor, * which has to be created by the fd = open(/dev/mobicore) command or * fd = open(/dev/mobicore-user) * * <-- Copyright Giesecke & Devrient GmbH 2009-2012 --> * <-- Copyright Trustonic Limited 2013 --> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/miscdevice.h> #include <linux/interrupt.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/device.h> #include <linux/module.h> #include <linux/ioctl.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/completion.h> #include <linux/fdtable.h> #include <linux/cdev.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/af_unix.h> #include "main.h" #include "fastcall.h" #include "arm.h" #include "mem.h" #include "ops.h" #include "pm.h" #include "debug.h" #include "logging.h" /* Define a MobiCore device structure for use with dev_debug() etc */ struct device_driver mcd_debug_name = { .name = "MobiCore" }; struct device mcd_debug_subname = { .driver = &mcd_debug_name }; struct device *mcd = &mcd_debug_subname; /* We need 2 devices for admin and user interface*/ #define MC_DEV_MAX 2 /* Need to discover a chrdev region for the driver */ static dev_t mc_dev_admin, mc_dev_user; struct cdev mc_admin_cdev, mc_user_cdev; /* Device class for the driver assigned major */ static struct class *mc_device_class; #ifndef FMODE_PATH #define FMODE_PATH 0x0 #endif static struct sock *__get_socket(struct file *filp) { struct sock *u_sock = NULL; struct inode *inode = filp->f_path.dentry->d_inode; /* * Socket ? */ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { struct socket *sock = SOCKET_I(inode); struct sock *s = sock->sk; /* * PF_UNIX ? */ if (s && sock->ops && sock->ops->family == PF_UNIX) u_sock = s; } return u_sock; } /* MobiCore interrupt context data */ struct mc_context ctx; /* Get process context from file pointer */ static struct mc_instance *get_instance(struct file *file) { return (struct mc_instance *)(file->private_data); } /* Get a unique ID */ unsigned int get_unique_id(void) { return (unsigned int)atomic_inc_return(&ctx.unique_counter); } /* Clears the reserved bit of each page and frees the pages */ static inline void free_continguous_pages(void *addr, unsigned int order) { int i; struct page *page = virt_to_page(addr); for (i = 0; i < (1<<order); i++) { MCDRV_DBG_VERBOSE(mcd, "free page at 0x%p", page); clear_bit(PG_reserved, &page->flags); page++; } MCDRV_DBG_VERBOSE(mcd, "freeing addr:%p, order:%x", addr, order); free_pages((unsigned long)addr, order); } /* Frees the memory associated with a buffer */ static int free_buffer(struct mc_buffer *buffer, bool unlock) { if (buffer->handle == 0) return -EINVAL; if (buffer->addr == 0) return -EINVAL; MCDRV_DBG_VERBOSE(mcd, "handle=%u phys_addr=0x%llx, virt_addr=0x%p len=%u", buffer->handle, (u64)buffer->phys, buffer->addr, buffer->len); if (!atomic_dec_and_test(&buffer->usage)) { MCDRV_DBG_VERBOSE(mcd, "Could not free %u", buffer->handle); return 0; } list_del(&buffer->list); free_continguous_pages(buffer->addr, buffer->order); kfree(buffer); return 0; } static uint32_t mc_find_cont_wsm_addr(struct mc_instance *instance, void *uaddr, void **addr, uint32_t len) { int ret = 0; struct mc_buffer *buffer; if (WARN(!instance, "No instance data available")) return -EFAULT; mutex_lock(&instance->lock); mutex_lock(&ctx.bufs_lock); /* search for the given handle in the buffers list */ list_for_each_entry(buffer, &ctx.cont_bufs, list) { if (buffer->uaddr == uaddr && buffer->len == len) { *addr = buffer->addr; goto found; } } /* Coundn't find the buffer */ ret = -EINVAL; found: mutex_unlock(&ctx.bufs_lock); mutex_unlock(&instance->lock); return ret; } bool mc_check_owner_fd(struct mc_instance *instance, int32_t fd) { #ifndef __ARM_VE_A9X4_STD__ struct file *fp; struct sock *s; struct files_struct *files; struct task_struct *peer = NULL; bool ret = false; MCDRV_DBG_VERBOSE(mcd, "Finding wsm for fd = %d", fd); if (!instance) return false; if (is_daemon(instance)) return true; fp = fcheck_files(current->files, fd); s = __get_socket(fp); if (s) { peer = get_pid_task(s->sk_peer_pid, PIDTYPE_PID); MCDRV_DBG_VERBOSE(mcd, "Found pid for fd %d", peer->pid); } if (peer) { task_lock(peer); files = peer->files; if (!files) goto out; for (fd = 0; fd < files_fdtable(files)->max_fds; fd++) { fp = fcheck_files(files, fd); if (!fp) continue; if (fp->private_data == instance) { MCDRV_DBG_VERBOSE(mcd, "Found owner!"); ret = true; goto out; } } } else { MCDRV_DBG(mcd, "Owner not found!"); return false; } out: if (peer) task_unlock(peer); if (!ret) MCDRV_DBG(mcd, "Owner not found!"); return ret; #else return true; #endif } static uint32_t mc_find_cont_wsm(struct mc_instance *instance, uint32_t handle, int32_t fd, phys_addr_t *phys, uint32_t *len) { int ret = 0; struct mc_buffer *buffer; if (WARN(!instance, "No instance data available")) return -EFAULT; if (WARN_ON(!is_daemon(instance))) { MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon"); return -EPERM; } mutex_lock(&instance->lock); mutex_lock(&ctx.bufs_lock); /* search for the given handle in the buffers list */ list_for_each_entry(buffer, &ctx.cont_bufs, list) { if (buffer->handle == handle) { if (mc_check_owner_fd(buffer->instance, fd)) { *phys = buffer->phys; *len = buffer->len; goto found; } else { break; } } } /* Couldn't find the buffer */ ret = -EINVAL; found: mutex_unlock(&ctx.bufs_lock); mutex_unlock(&instance->lock); return ret; } /* * __free_buffer - Free a WSM buffer allocated with mobicore_allocate_wsm * * @instance * @handle handle of the buffer * * Returns 0 if no error * */ static int __free_buffer(struct mc_instance *instance, uint32_t handle, bool unlock) { int ret = 0; struct mc_buffer *buffer; void *uaddr = NULL; size_t len = 0; #ifndef MC_VM_UNMAP struct mm_struct *mm = current->mm; #endif if (WARN(!instance, "No instance data available")) return -EFAULT; mutex_lock(&ctx.bufs_lock); /* search for the given handle in the buffers list */ list_for_each_entry(buffer, &ctx.cont_bufs, list) { if (buffer->handle == handle) { uaddr = buffer->uaddr; len = buffer->len; goto found_buffer; } } ret = -EINVAL; goto err; found_buffer: if (!is_daemon(instance) && buffer->instance != instance) { ret = -EPERM; goto err; } mutex_unlock(&ctx.bufs_lock); /* Only unmap if the request is comming from the user space and * it hasn't already been unmapped */ if (unlock == false && uaddr != NULL) { #ifndef MC_VM_UNMAP /* do_munmap must be done with mm->mmap_sem taken */ down_write(&mm->mmap_sem); ret = do_munmap(mm, (long unsigned int)uaddr, len); up_write(&mm->mmap_sem); #else ret = vm_munmap((long unsigned int)uaddr, len); #endif if (ret < 0) { /* Something is not right if we end up here, better not * clean the buffer so we just leak memory instead of * creating security issues */ MCDRV_DBG_ERROR(mcd, "Memory can't be unmapped"); return -EINVAL; } } mutex_lock(&ctx.bufs_lock); /* search for the given handle in the buffers list */ list_for_each_entry(buffer, &ctx.cont_bufs, list) { if (buffer->handle == handle) goto del_buffer; } ret = -EINVAL; goto err; del_buffer: if (is_daemon(instance) || buffer->instance == instance) ret = free_buffer(buffer, unlock); else ret = -EPERM; err: mutex_unlock(&ctx.bufs_lock); return ret; } int mc_free_buffer(struct mc_instance *instance, uint32_t handle) { int ret = 0; if (WARN(!instance, "No instance data available")) return -EFAULT; mutex_lock(&instance->lock); ret = __free_buffer(instance, handle, false); mutex_unlock(&instance->lock); return ret; } int mc_get_buffer(struct mc_instance *instance, struct mc_buffer **buffer, unsigned long len) { struct mc_buffer *cbuffer = NULL; void *addr = 0; phys_addr_t phys = 0; unsigned int order; unsigned long allocated_size; int ret = 0; if (WARN(!instance, "No instance data available")) return -EFAULT; if (len == 0) { MCDRV_DBG_WARN(mcd, "cannot allocate size 0"); return -ENOMEM; } order = get_order(len); if (order > MAX_ORDER) { MCDRV_DBG_WARN(mcd, "Buffer size too large"); return -ENOMEM; } allocated_size = (1 << order) * PAGE_SIZE; if (mutex_lock_interruptible(&instance->lock)) return -ERESTARTSYS; /* allocate a new buffer. */ cbuffer = kzalloc(sizeof(struct mc_buffer), GFP_KERNEL); if (cbuffer == NULL) { MCDRV_DBG_WARN(mcd, "MMAP_WSM request: could not allocate buffer"); ret = -ENOMEM; goto unlock_instance; } mutex_lock(&ctx.bufs_lock); MCDRV_DBG_VERBOSE(mcd, "size %ld -> order %d --> %ld (2^n pages)", len, order, allocated_size); addr = (void *)__get_free_pages(GFP_USER | __GFP_ZERO, order); if (addr == NULL) { MCDRV_DBG_WARN(mcd, "get_free_pages failed"); ret = -ENOMEM; goto err; } phys = virt_to_phys(addr); cbuffer->handle = get_unique_id(); cbuffer->phys = phys; cbuffer->addr = addr; cbuffer->order = order; cbuffer->len = len; cbuffer->instance = instance; cbuffer->uaddr = 0; /* Refcount +1 because the TLC is requesting it */ atomic_set(&cbuffer->usage, 1); INIT_LIST_HEAD(&cbuffer->list); list_add(&cbuffer->list, &ctx.cont_bufs); MCDRV_DBG_VERBOSE(mcd, "allocated phys=0x%llx - 0x%llx, size=%ld, kvirt=0x%p" ", h=%d", (u64)phys, (u64)(phys+allocated_size), allocated_size, addr, cbuffer->handle); *buffer = cbuffer; goto unlock; err: kfree(cbuffer); unlock: mutex_unlock(&ctx.bufs_lock); unlock_instance: mutex_unlock(&instance->lock); return ret; } /* * __lock_buffer() - Locks a contiguous buffer - +1 refcount. * Assumes the instance lock is already taken! */ static int __lock_buffer(struct mc_instance *instance, uint32_t handle) { int ret = 0; struct mc_buffer *buffer; if (WARN(!instance, "No instance data available")) return -EFAULT; if (WARN_ON(!is_daemon(instance))) { MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon"); return -EPERM; } mutex_lock(&ctx.bufs_lock); /* search for the given handle in the buffers list */ list_for_each_entry(buffer, &ctx.cont_bufs, list) { if (buffer->handle == handle) { atomic_inc(&buffer->usage); goto unlock; } } ret = -EINVAL; unlock: mutex_unlock(&ctx.bufs_lock); return ret; } static phys_addr_t get_mci_base_phys(unsigned int len) { if (ctx.mci_base.phys) { return ctx.mci_base.phys; } else { unsigned int order = get_order(len); ctx.mcp = NULL; ctx.mci_base.order = order; ctx.mci_base.addr = (void *)__get_free_pages(GFP_USER | __GFP_ZERO, order); if (ctx.mci_base.addr == NULL) { MCDRV_DBG_WARN(mcd, "get_free_pages failed"); memset(&ctx.mci_base, 0, sizeof(ctx.mci_base)); return 0; } ctx.mci_base.phys = virt_to_phys(ctx.mci_base.addr); return ctx.mci_base.phys; } } /* * Create a MMU table from a virtual memory buffer which can be vmalloc * or user space virtual memory */ int mc_register_wsm_mmu(struct mc_instance *instance, void *buffer, uint32_t len, uint32_t *handle, phys_addr_t *phys) { int ret = 0; struct mc_mmu_table *table = NULL; struct task_struct *task = current; void *kbuff = NULL; if (WARN(!instance, "No instance data available")) return -EFAULT; if (len == 0) { MCDRV_DBG_ERROR(mcd, "len=0 is not supported!"); return -EINVAL; } MCDRV_DBG_VERBOSE(mcd, "buffer: %p, len=%08x", buffer, len); if (!mc_find_cont_wsm_addr(instance, buffer, &kbuff, len)) table = mc_alloc_mmu_table(instance, NULL, kbuff, len); else table = mc_alloc_mmu_table(instance, task, buffer, len); if (IS_ERR(table)) { MCDRV_DBG_ERROR(mcd, "mc_alloc_mmu_table() failed"); return -EINVAL; } /* set response */ *handle = table->handle; /* WARNING: daemon shouldn't know this either, but live with it */ if (is_daemon(instance)) *phys = table->phys; else *phys = 0; MCDRV_DBG_VERBOSE(mcd, "handle: %d, phys=0x%llX", *handle, (u64)(*phys)); MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret); return ret; } int mc_unregister_wsm_mmu(struct mc_instance *instance, uint32_t handle) { int ret = 0; if (WARN(!instance, "No instance data available")) return -EFAULT; /* free table (if no further locks exist) */ mc_free_mmu_table(instance, handle); return ret; } /* Lock the object from handle, it could be a WSM MMU table or a cont buffer! */ static int mc_lock_handle(struct mc_instance *instance, uint32_t handle) { int ret = 0; if (WARN(!instance, "No instance data available")) return -EFAULT; if (WARN_ON(!is_daemon(instance))) { MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon"); return -EPERM; } mutex_lock(&instance->lock); ret = mc_lock_mmu_table(instance, handle); /* Handle was not a MMU table but a cont buffer */ if (ret == -EINVAL) { /* Call the non locking variant! */ ret = __lock_buffer(instance, handle); } mutex_unlock(&instance->lock); return ret; } static int mc_unlock_handle(struct mc_instance *instance, uint32_t handle) { int ret = 0; if (WARN(!instance, "No instance data available")) return -EFAULT; if (WARN_ON(!is_daemon(instance))) { MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon"); return -EPERM; } mutex_lock(&instance->lock); ret = mc_free_mmu_table(instance, handle); /* Not a MMU table, then it must be a buffer */ if (ret == -EINVAL) { /* Call the non locking variant! */ ret = __free_buffer(instance, handle, true); } mutex_unlock(&instance->lock); return ret; } static phys_addr_t mc_find_wsm_mmu(struct mc_instance *instance, uint32_t handle, int32_t fd) { if (WARN(!instance, "No instance data available")) return 0; if (WARN_ON(!is_daemon(instance))) { MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon"); return 0; } return mc_find_mmu_table(handle, fd); } static int mc_clean_wsm_mmu(struct mc_instance *instance) { if (WARN(!instance, "No instance data available")) return -EFAULT; if (WARN_ON(!is_daemon(instance))) { MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon"); return -EPERM; } mc_clean_mmu_tables(); return 0; } static int mc_fd_mmap(struct file *file, struct vm_area_struct *vmarea) { struct mc_instance *instance = get_instance(file); unsigned long len = vmarea->vm_end - vmarea->vm_start; phys_addr_t paddr = (vmarea->vm_pgoff << PAGE_SHIFT); unsigned int pfn; struct mc_buffer *buffer = 0; int ret = 0; MCDRV_DBG_VERBOSE(mcd, "enter (vma start=0x%p, size=%ld, mci=0x%llX)", (void *)vmarea->vm_start, len, (u64)ctx.mci_base.phys); if (WARN(!instance, "No instance data available")) return -EFAULT; if (len == 0) { MCDRV_DBG_ERROR(mcd, "cannot allocate size 0"); return -ENOMEM; } if (paddr) { mutex_lock(&ctx.bufs_lock); /* search for the buffer list. */ list_for_each_entry(buffer, &ctx.cont_bufs, list) { /* Only allow mapping if the client owns it!*/ if (buffer->phys == paddr && buffer->instance == instance) { /* We shouldn't do remap with larger size */ if (buffer->len > len) break; /* We can't allow mapping the buffer twice */ if (!buffer->uaddr) goto found; else break; } } /* Nothing found return */ mutex_unlock(&ctx.bufs_lock); return -EINVAL; found: buffer->uaddr = (void *)vmarea->vm_start; vmarea->vm_flags |= VM_IO; /* * Convert kernel address to user address. Kernel address begins * at PAGE_OFFSET, user address range is below PAGE_OFFSET. * Remapping the area is always done, so multiple mappings * of one region are possible. Now remap kernel address * space into user space */ pfn = (unsigned int)paddr >> PAGE_SHIFT; ret = (int)remap_pfn_range(vmarea, vmarea->vm_start, pfn, buffer->len, vmarea->vm_page_prot); /* If the remap failed then don't mark this buffer as marked * since the unmaping will also fail */ if (ret) buffer->uaddr = NULL; mutex_unlock(&ctx.bufs_lock); } else { if (!is_daemon(instance)) return -EPERM; paddr = get_mci_base_phys(len); if (!paddr) return -EFAULT; vmarea->vm_flags |= VM_IO; /* * Convert kernel address to user address. Kernel address begins * at PAGE_OFFSET, user address range is below PAGE_OFFSET. * Remapping the area is always done, so multiple mappings * of one region are possible. Now remap kernel address * space into user space */ pfn = (unsigned int)paddr >> PAGE_SHIFT; ret = (int)remap_pfn_range(vmarea, vmarea->vm_start, pfn, len, vmarea->vm_page_prot); } MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret); return ret; } static inline int ioctl_check_pointer(unsigned int cmd, int __user *uarg) { int err = 0; if (_IOC_DIR(cmd) & _IOC_READ) err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd)); if (err) return -EFAULT; return 0; } /* * mc_fd_user_ioctl() - Will be called from user space as ioctl(..) * @file pointer to file * @cmd command * @arg arguments * * Returns 0 for OK and an errno in case of error */ static long mc_fd_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mc_instance *instance = get_instance(file); int __user *uarg = (int __user *)arg; int ret = -EINVAL; if (WARN(!instance, "No instance data available")) return -EFAULT; if (ioctl_check_pointer(cmd, uarg)) return -EFAULT; switch (cmd) { case MC_IO_FREE: ret = mc_free_buffer(instance, (uint32_t)arg); break; case MC_IO_REG_WSM:{ struct mc_ioctl_reg_wsm reg; phys_addr_t phys; if (copy_from_user(&reg, uarg, sizeof(reg))) return -EFAULT; ret = mc_register_wsm_mmu(instance, (void *)reg.buffer, reg.len, &reg.handle, &phys); reg.table_phys = phys; if (!ret) { if (copy_to_user(uarg, &reg, sizeof(reg))) { ret = -EFAULT; mc_unregister_wsm_mmu(instance, reg.handle); } } break; } case MC_IO_UNREG_WSM: ret = mc_unregister_wsm_mmu(instance, (uint32_t)arg); break; case MC_IO_VERSION: ret = put_user(mc_get_version(), uarg); if (ret) MCDRV_DBG_ERROR(mcd, "IOCTL_GET_VERSION failed to put data"); break; case MC_IO_MAP_WSM:{ struct mc_ioctl_map map; struct mc_buffer *buffer = 0; if (copy_from_user(&map, uarg, sizeof(map))) return -EFAULT; /* Setup the WSM buffer structure! */ if (mc_get_buffer(instance, &buffer, map.len)) return -EFAULT; map.handle = buffer->handle; map.phys_addr = buffer->phys; map.reused = 0; if (copy_to_user(uarg, &map, sizeof(map))) ret = -EFAULT; else ret = 0; break; } default: MCDRV_DBG_ERROR(mcd, "unsupported cmd=0x%x", cmd); ret = -ENOIOCTLCMD; break; } /* end switch(cmd) */ #ifdef MC_MEM_TRACES mobicore_log_read(); #endif return (int)ret; } static long mc_fd_admin_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mc_instance *instance = get_instance(file); int __user *uarg = (int __user *)arg; int ret = -EINVAL; if (WARN(!instance, "No instance data available")) return -EFAULT; if (WARN_ON(!is_daemon(instance))) { MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon"); return -EPERM; } if (ioctl_check_pointer(cmd, uarg)) return -EFAULT; switch (cmd) { case MC_IO_INIT: { struct mc_ioctl_init init; ctx.mcp = NULL; if (!ctx.mci_base.phys) { MCDRV_DBG_ERROR(mcd, "Cannot init MobiCore without MCI!"); return -EINVAL; } if (copy_from_user(&init, uarg, sizeof(init))) return -EFAULT; ctx.mcp = ctx.mci_base.addr + init.mcp_offset; ret = mc_init(ctx.mci_base.phys, init.nq_length, init.mcp_offset, init.mcp_length); break; } case MC_IO_INFO: { struct mc_ioctl_info info; if (copy_from_user(&info, uarg, sizeof(info))) return -EFAULT; ret = mc_info(info.ext_info_id, &info.state, &info.ext_info); if (!ret) { if (copy_to_user(uarg, &info, sizeof(info))) ret = -EFAULT; } break; } case MC_IO_YIELD: ret = mc_yield(); break; case MC_IO_NSIQ: ret = mc_nsiq(); break; case MC_IO_LOCK_WSM: { ret = mc_lock_handle(instance, (uint32_t)arg); break; } case MC_IO_UNLOCK_WSM: ret = mc_unlock_handle(instance, (uint32_t)arg); break; case MC_IO_CLEAN_WSM: ret = mc_clean_wsm_mmu(instance); break; case MC_IO_RESOLVE_WSM: { phys_addr_t phys; struct mc_ioctl_resolv_wsm wsm; if (copy_from_user(&wsm, uarg, sizeof(wsm))) return -EFAULT; phys = mc_find_wsm_mmu(instance, wsm.handle, wsm.fd); if (!phys) return -EINVAL; wsm.phys = phys; if (copy_to_user(uarg, &wsm, sizeof(wsm))) return -EFAULT; ret = 0; break; } case MC_IO_RESOLVE_CONT_WSM: { struct mc_ioctl_resolv_cont_wsm cont_wsm; phys_addr_t phys = 0; uint32_t len = 0; if (copy_from_user(&cont_wsm, uarg, sizeof(cont_wsm))) return -EFAULT; ret = mc_find_cont_wsm(instance, cont_wsm.handle, cont_wsm.fd, &phys, &len); if (!ret) { cont_wsm.phys = phys; cont_wsm.length = len; if (copy_to_user(uarg, &cont_wsm, sizeof(cont_wsm))) ret = -EFAULT; } break; } case MC_IO_MAP_MCI:{ struct mc_ioctl_map map; if (copy_from_user(&map, uarg, sizeof(map))) return -EFAULT; map.reused = (ctx.mci_base.phys != 0); map.phys_addr = get_mci_base_phys(map.len); if (!map.phys_addr) { MCDRV_DBG_ERROR(mcd, "Failed to setup MCI buffer!"); return -EFAULT; } if (copy_to_user(uarg, &map, sizeof(map))) ret = -EFAULT; ret = 0; break; } case MC_IO_LOG_SETUP: { #ifdef MC_MEM_TRACES ret = mobicore_log_setup(); #endif break; } /* The rest is handled commonly by user IOCTL */ default: ret = mc_fd_user_ioctl(file, cmd, arg); } /* end switch(cmd) */ #ifdef MC_MEM_TRACES mobicore_log_read(); #endif return (int)ret; } /* * mc_fd_read() - This will be called from user space as read(...) * @file: file pointer * @buffer: buffer where to copy to(userspace) * @buffer_len: number of requested data * @pos: not used * * The read function is blocking until a interrupt occurs. In that case the * event counter is copied into user space and the function is finished. * * If OK this function returns the number of copied data otherwise it returns * errno */ static ssize_t mc_fd_read(struct file *file, char *buffer, size_t buffer_len, loff_t *pos) { int ret = 0, ssiq_counter; struct mc_instance *instance = get_instance(file); if (WARN(!instance, "No instance data available")) return -EFAULT; /* avoid debug output on non-error, because this is call quite often */ MCDRV_DBG_VERBOSE(mcd, "enter"); /* only the MobiCore Daemon is allowed to call this function */ if (WARN_ON(!is_daemon(instance))) { MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon"); return -EPERM; } if (buffer_len < sizeof(unsigned int)) { MCDRV_DBG_ERROR(mcd, "invalid length"); return -EINVAL; } for (;;) { if (wait_for_completion_interruptible(&ctx.isr_comp)) { MCDRV_DBG_VERBOSE(mcd, "read interrupted"); return -ERESTARTSYS; } ssiq_counter = atomic_read(&ctx.isr_counter); MCDRV_DBG_VERBOSE(mcd, "ssiq_counter=%i, ctx.counter=%i", ssiq_counter, ctx.evt_counter); if (ssiq_counter != ctx.evt_counter) { /* read data and exit loop without error */ ctx.evt_counter = ssiq_counter; ret = 0; break; } /* end loop if non-blocking */ if (file->f_flags & O_NONBLOCK) { MCDRV_DBG_ERROR(mcd, "non-blocking read"); return -EAGAIN; } if (signal_pending(current)) { MCDRV_DBG_VERBOSE(mcd, "received signal."); return -ERESTARTSYS; } } /* read data and exit loop */ ret = copy_to_user(buffer, &ctx.evt_counter, sizeof(unsigned int)); if (ret != 0) { MCDRV_DBG_ERROR(mcd, "copy_to_user failed"); return -EFAULT; } ret = sizeof(unsigned int); return (ssize_t)ret; } /* * Initialize a new mobicore API instance object * * @return Instance or NULL if no allocation was possible. */ struct mc_instance *mc_alloc_instance(void) { struct mc_instance *instance; instance = kzalloc(sizeof(*instance), GFP_KERNEL); if (instance == NULL) return NULL; /* get a unique ID for this instance (PIDs are not unique) */ instance->handle = get_unique_id(); mutex_init(&instance->lock); return instance; } #if defined(TBASE_CORE_SWITCHER) && defined(DEBUG) static ssize_t mc_fd_write(struct file *file, const char __user *buffer, size_t buffer_len, loff_t *x) { uint32_t cpu_new; /* we only consider one digit */ char buf[2]; struct mc_instance *instance = get_instance(file); if (WARN(!instance, "No instance data available")) return -EFAULT; /* Invalid data, nothing to do */ if (buffer_len < 1) return -EINVAL; /* Invalid data, nothing to do */ if (copy_from_user(buf, buffer, min(sizeof(buf), buffer_len))) return -EFAULT; if (buf[0] == 'n') { mc_nsiq(); /* If it's a digit then switch cores */ } else if ((buf[0] >= '0') && (buf[0] <= '9')) { cpu_new = buf[0] - '0'; if (cpu_new <= 8) { MCDRV_DBG_VERBOSE(mcd, "Set Active Cpu: %d\n", cpu_new); mc_switch_core(cpu_new); } } else { return -EINVAL; } return buffer_len; } #endif /* * Release a mobicore instance object and all objects related to it * @instance: instance * Returns 0 if Ok or -E ERROR */ int mc_release_instance(struct mc_instance *instance) { struct mc_buffer *buffer, *tmp; if (WARN(!instance, "No instance data available")) return -EFAULT; mutex_lock(&instance->lock); mc_clear_mmu_tables(instance); mutex_lock(&ctx.bufs_lock); /* release all mapped data */ /* Check if some buffers are orphaned. */ list_for_each_entry_safe(buffer, tmp, &ctx.cont_bufs, list) { /* It's safe here to only call free_buffer() without unmapping * because mmap() takes a refcount to the file's fd so only * time we end up here is when everything has been unmaped or * the process called exit() */ if (buffer->instance == instance) { buffer->instance = NULL; free_buffer(buffer, false); } } mutex_unlock(&ctx.bufs_lock); mutex_unlock(&instance->lock); /* release instance context */ kfree(instance); return 0; } /* * mc_fd_user_open() - Will be called from user space as fd = open(...) * A set of internal instance data are created and initialized. * * @inode * @file * Returns 0 if OK or -ENOMEM if no allocation was possible. */ static int mc_fd_user_open(struct inode *inode, struct file *file) { struct mc_instance *instance; MCDRV_DBG_VERBOSE(mcd, "enter"); instance = mc_alloc_instance(); if (instance == NULL) return -ENOMEM; /* store instance data reference */ file->private_data = instance; return 0; } static int mc_fd_admin_open(struct inode *inode, struct file *file) { struct mc_instance *instance; /* * The daemon is already set so we can't allow anybody else to open * the admin interface. */ if (ctx.daemon_inst) { MCDRV_DBG_ERROR(mcd, "Daemon is already connected"); return -EPERM; } /* Setup the usual variables */ if (mc_fd_user_open(inode, file)) return -ENOMEM; instance = get_instance(file); MCDRV_DBG(mcd, "accept this as MobiCore Daemon"); ctx.daemon_inst = instance; ctx.daemon = current; instance->admin = true; init_completion(&ctx.isr_comp); /* init ssiq event counter */ ctx.evt_counter = atomic_read(&(ctx.isr_counter)); return 0; } /* * mc_fd_release() - This function will be called from user space as close(...) * The instance data are freed and the associated memory pages are unreserved. * * @inode * @file * * Returns 0 */ static int mc_fd_release(struct inode *inode, struct file *file) { int ret = 0; struct mc_instance *instance = get_instance(file); if (WARN(!instance, "No instance data available")) return -EFAULT; /* check if daemon closes us. */ if (is_daemon(instance)) { MCDRV_DBG_WARN(mcd, "MobiCore Daemon died"); ctx.daemon_inst = NULL; ctx.daemon = NULL; } ret = mc_release_instance(instance); /* * ret is quite irrelevant here as most apps don't care about the * return value from close() and it's quite difficult to recover */ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret); return (int)ret; } /* * This function represents the interrupt function of the mcDrvModule. * It signals by incrementing of an event counter and the start of the read * waiting queue, the read function a interrupt has occurred. */ static irqreturn_t mc_ssiq_isr(int intr, void *context) { /* increment interrupt event counter */ atomic_inc(&(ctx.isr_counter)); /* signal the daemon */ complete(&ctx.isr_comp); #ifdef MC_MEM_TRACES mobicore_log_read(); #endif return IRQ_HANDLED; } /* function table structure of this device driver. */ static const struct file_operations mc_admin_fops = { .owner = THIS_MODULE, .open = mc_fd_admin_open, .release = mc_fd_release, .unlocked_ioctl = mc_fd_admin_ioctl, .mmap = mc_fd_mmap, .read = mc_fd_read, }; /* function table structure of this device driver. */ static const struct file_operations mc_user_fops = { .owner = THIS_MODULE, .open = mc_fd_user_open, .release = mc_fd_release, .unlocked_ioctl = mc_fd_user_ioctl, .mmap = mc_fd_mmap, #if defined(TBASE_CORE_SWITCHER) && defined(DEBUG) .write = mc_fd_write, #endif }; static int create_devices(void) { int ret = 0; cdev_init(&mc_admin_cdev, &mc_admin_fops); cdev_init(&mc_user_cdev, &mc_user_fops); mc_device_class = class_create(THIS_MODULE, "mobicore"); if (IS_ERR(mc_device_class)) { MCDRV_DBG_ERROR(mcd, "failed to create device class"); ret = PTR_ERR(mc_device_class); goto out; } ret = alloc_chrdev_region(&mc_dev_admin, 0, MC_DEV_MAX, "mobicore"); if (ret < 0) { MCDRV_DBG_ERROR(mcd, "failed to allocate char dev region"); goto error; } mc_dev_user = MKDEV(MAJOR(mc_dev_admin), 1); MCDRV_DBG_VERBOSE(mcd, "%s: dev %d", "mobicore", MAJOR(mc_dev_admin)); /* First the ADMIN node */ ret = cdev_add(&mc_admin_cdev, mc_dev_admin, 1); if (ret != 0) { MCDRV_DBG_ERROR(mcd, "admin device register failed"); goto error; } mc_admin_cdev.owner = THIS_MODULE; device_create(mc_device_class, NULL, mc_dev_admin, NULL, MC_ADMIN_DEVNODE); /* Then the user node */ ret = cdev_add(&mc_user_cdev, mc_dev_user, 1); if (ret != 0) { MCDRV_DBG_ERROR(mcd, "user device register failed"); goto error_unregister; } mc_user_cdev.owner = THIS_MODULE; device_create(mc_device_class, NULL, mc_dev_user, NULL, MC_USER_DEVNODE); goto out; error_unregister: device_destroy(mc_device_class, mc_dev_admin); device_destroy(mc_device_class, mc_dev_user); cdev_del(&mc_admin_cdev); cdev_del(&mc_user_cdev); unregister_chrdev_region(mc_dev_admin, MC_DEV_MAX); error: class_destroy(mc_device_class); out: return ret; } /* * This function is called the kernel during startup or by a insmod command. * This device is installed and registered as cdev, then interrupt and * queue handling is set up */ static int __init mobicore_init(void) { int ret = 0; dev_set_name(mcd, "mcd"); dev_info(mcd, "MobiCore Driver, Build: " __TIMESTAMP__ "\n"); dev_info(mcd, "MobiCore mcDrvModuleApi version is %i.%i\n", MCDRVMODULEAPI_VERSION_MAJOR, MCDRVMODULEAPI_VERSION_MINOR); #ifdef MOBICORE_COMPONENT_BUILD_TAG dev_info(mcd, "MobiCore %s\n", MOBICORE_COMPONENT_BUILD_TAG); #endif /* Hardware does not support ARM TrustZone -> Cannot continue! */ if (!has_security_extensions()) { MCDRV_DBG_ERROR(mcd, "Hardware doesn't support ARM TrustZone!"); return -ENODEV; } /* Running in secure mode -> Cannot load the driver! */ if (is_secure_mode()) { MCDRV_DBG_ERROR(mcd, "Running in secure MODE!"); return -ENODEV; } ret = mc_fastcall_init(&ctx); if (ret) goto error; init_completion(&ctx.isr_comp); /* initialize event counter for signaling of an IRQ to zero */ atomic_set(&ctx.isr_counter, 0); /* set up S-SIQ interrupt handler ************************/ ret = request_irq(MC_INTR_SSIQ, mc_ssiq_isr, IRQF_TRIGGER_RISING, MC_ADMIN_DEVNODE, &ctx); if (ret != 0) { MCDRV_DBG_ERROR(mcd, "interrupt request failed"); goto err_req_irq; } #ifdef MC_PM_RUNTIME ret = mc_pm_initialize(&ctx); if (ret != 0) { MCDRV_DBG_ERROR(mcd, "Power Management init failed!"); goto free_isr; } #endif ret = create_devices(); if (ret != 0) goto free_pm; ret = mc_init_mmu_tables(); #ifdef MC_CRYPTO_CLOCK_MANAGEMENT ret = mc_pm_clock_initialize(); #endif /* * initialize unique number counter which we can use for * handles. It is limited to 2^32, but this should be * enough to be roll-over safe for us. We start with 1 * instead of 0. */ atomic_set(&ctx.unique_counter, 1); /* init list for contiguous buffers */ INIT_LIST_HEAD(&ctx.cont_bufs); /* init lock for the buffers list */ mutex_init(&ctx.bufs_lock); memset(&ctx.mci_base, 0, sizeof(ctx.mci_base)); MCDRV_DBG(mcd, "initialized"); return 0; free_pm: #ifdef MC_PM_RUNTIME mc_pm_free(); free_isr: free_irq(MC_INTR_SSIQ, &ctx); #endif err_req_irq: mc_fastcall_destroy(); error: return ret; } /* * This function removes this device driver from the Linux device manager . */ static void __exit mobicore_exit(void) { MCDRV_DBG_VERBOSE(mcd, "enter"); #ifdef MC_MEM_TRACES mobicore_log_free(); #endif mc_release_mmu_tables(); #ifdef MC_PM_RUNTIME mc_pm_free(); #endif device_destroy(mc_device_class, mc_dev_admin); device_destroy(mc_device_class, mc_dev_user); class_destroy(mc_device_class); unregister_chrdev_region(mc_dev_admin, MC_DEV_MAX); free_irq(MC_INTR_SSIQ, &ctx); mc_fastcall_destroy(); #ifdef MC_CRYPTO_CLOCK_MANAGEMENT mc_pm_clock_finalize(); #endif MCDRV_DBG_VERBOSE(mcd, "exit"); } bool mc_sleep_ready(void) { #ifdef MC_PM_RUNTIME return mc_pm_sleep_ready(); #else return true; #endif } /* Linux Driver Module Macros */ module_init(mobicore_init); module_exit(mobicore_exit); MODULE_AUTHOR("Giesecke & Devrient GmbH"); MODULE_AUTHOR("Trustonic Limited"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MobiCore driver");
gpl-2.0
someone755/android_kernel_sony_msm8974_stock
drivers/devfreq/governor_msm_cpufreq.c
1507
1903
/* * Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/devfreq.h> #include <mach/cpufreq.h> #include "governor.h" DEFINE_MUTEX(df_lock); static struct devfreq *df; static int devfreq_msm_cpufreq_get_freq(struct devfreq *df, unsigned long *freq, u32 *flag) { *freq = msm_cpufreq_get_bw(); return 0; } int devfreq_msm_cpufreq_update_bw(void) { int ret = 0; mutex_lock(&df_lock); if (df) { mutex_lock(&df->lock); ret = update_devfreq(df); mutex_unlock(&df->lock); } mutex_unlock(&df_lock); return ret; } static int devfreq_msm_cpufreq_ev_handler(struct devfreq *devfreq, unsigned int event, void *data) { int ret; switch (event) { case DEVFREQ_GOV_START: mutex_lock(&df_lock); df = devfreq; mutex_unlock(&df_lock); ret = devfreq_msm_cpufreq_update_bw(); if (ret) { pr_err("Unable to update BW! Gov start failed!\n"); return ret; } devfreq_monitor_stop(df); pr_debug("Enabled MSM CPUfreq governor\n"); break; case DEVFREQ_GOV_STOP: mutex_lock(&df_lock); df = NULL; mutex_unlock(&df_lock); pr_debug("Disabled MSM CPUfreq governor\n"); break; } return 0; } static struct devfreq_governor devfreq_msm_cpufreq = { .name = "msm_cpufreq", .get_target_freq = devfreq_msm_cpufreq_get_freq, .event_handler = devfreq_msm_cpufreq_ev_handler, }; int register_devfreq_msm_cpufreq(void) { return devfreq_add_governor(&devfreq_msm_cpufreq); }
gpl-2.0
n3ocort3x/msm_htc_helper
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
1507
47872
/* * Copyright (C) 2003 - 2009 NetXen, Inc. * Copyright (C) 2009 - QLogic Corporation. * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, * MA 02111-1307, USA. * * The full GNU General Public License is included in this distribution * in the file called "COPYING". * */ #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/if_vlan.h> #include "netxen_nic.h" #include "netxen_nic_hw.h" struct crb_addr_pair { u32 addr; u32 data; }; #define NETXEN_MAX_CRB_XFORM 60 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; #define NETXEN_ADDR_ERROR (0xffffffff) #define crb_addr_transform(name) \ crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20 #define NETXEN_NIC_XDMA_RESET 0x8000ff static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, struct nx_host_rds_ring *rds_ring); static int netxen_p3_has_mn(struct netxen_adapter *adapter); static void crb_addr_transform_setup(void) { crb_addr_transform(XDMA); crb_addr_transform(TIMR); crb_addr_transform(SRE); crb_addr_transform(SQN3); crb_addr_transform(SQN2); crb_addr_transform(SQN1); crb_addr_transform(SQN0); crb_addr_transform(SQS3); crb_addr_transform(SQS2); crb_addr_transform(SQS1); crb_addr_transform(SQS0); crb_addr_transform(RPMX7); crb_addr_transform(RPMX6); crb_addr_transform(RPMX5); crb_addr_transform(RPMX4); crb_addr_transform(RPMX3); crb_addr_transform(RPMX2); crb_addr_transform(RPMX1); crb_addr_transform(RPMX0); crb_addr_transform(ROMUSB); crb_addr_transform(SN); crb_addr_transform(QMN); crb_addr_transform(QMS); crb_addr_transform(PGNI); crb_addr_transform(PGND); crb_addr_transform(PGN3); crb_addr_transform(PGN2); crb_addr_transform(PGN1); crb_addr_transform(PGN0); crb_addr_transform(PGSI); crb_addr_transform(PGSD); crb_addr_transform(PGS3); crb_addr_transform(PGS2); crb_addr_transform(PGS1); crb_addr_transform(PGS0); crb_addr_transform(PS); crb_addr_transform(PH); crb_addr_transform(NIU); crb_addr_transform(I2Q); crb_addr_transform(EG); crb_addr_transform(MN); crb_addr_transform(MS); crb_addr_transform(CAS2); crb_addr_transform(CAS1); crb_addr_transform(CAS0); crb_addr_transform(CAM); crb_addr_transform(C2C1); crb_addr_transform(C2C0); crb_addr_transform(SMB); crb_addr_transform(OCM0); crb_addr_transform(I2C0); } void netxen_release_rx_buffers(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct netxen_rx_buffer *rx_buf; int i, ring; recv_ctx = &adapter->recv_ctx; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; for (i = 0; i < rds_ring->num_desc; ++i) { rx_buf = &(rds_ring->rx_buf_arr[i]); if (rx_buf->state == NETXEN_BUFFER_FREE) continue; pci_unmap_single(adapter->pdev, rx_buf->dma, rds_ring->dma_size, PCI_DMA_FROMDEVICE); if (rx_buf->skb != NULL) dev_kfree_skb_any(rx_buf->skb); } } } void netxen_release_tx_buffers(struct netxen_adapter *adapter) { struct netxen_cmd_buffer *cmd_buf; struct netxen_skb_frag *buffrag; int i, j; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; if (buffrag->dma) { pci_unmap_single(adapter->pdev, buffrag->dma, buffrag->length, PCI_DMA_TODEVICE); buffrag->dma = 0ULL; } for (j = 1; j < cmd_buf->frag_count; j++) { buffrag++; if (buffrag->dma) { pci_unmap_page(adapter->pdev, buffrag->dma, buffrag->length, PCI_DMA_TODEVICE); buffrag->dma = 0ULL; } } if (cmd_buf->skb) { dev_kfree_skb_any(cmd_buf->skb); cmd_buf->skb = NULL; } cmd_buf++; } } void netxen_free_sw_resources(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct nx_host_tx_ring *tx_ring; int ring; recv_ctx = &adapter->recv_ctx; if (recv_ctx->rds_rings == NULL) goto skip_rds; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; vfree(rds_ring->rx_buf_arr); rds_ring->rx_buf_arr = NULL; } kfree(recv_ctx->rds_rings); skip_rds: if (adapter->tx_ring == NULL) return; tx_ring = adapter->tx_ring; vfree(tx_ring->cmd_buf_arr); kfree(tx_ring); adapter->tx_ring = NULL; } int netxen_alloc_sw_resources(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct nx_host_sds_ring *sds_ring; struct nx_host_tx_ring *tx_ring; struct netxen_rx_buffer *rx_buf; int ring, i, size; struct netxen_cmd_buffer *cmd_buf_arr; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; size = sizeof(struct nx_host_tx_ring); tx_ring = kzalloc(size, GFP_KERNEL); if (tx_ring == NULL) { dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n", netdev->name); return -ENOMEM; } adapter->tx_ring = tx_ring; tx_ring->num_desc = adapter->num_txd; tx_ring->txq = netdev_get_tx_queue(netdev, 0); cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); if (cmd_buf_arr == NULL) { dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", netdev->name); goto err_out; } tx_ring->cmd_buf_arr = cmd_buf_arr; recv_ctx = &adapter->recv_ctx; size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring); rds_ring = kzalloc(size, GFP_KERNEL); if (rds_ring == NULL) { dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n", netdev->name); goto err_out; } recv_ctx->rds_rings = rds_ring; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; switch (ring) { case RCV_RING_NORMAL: rds_ring->num_desc = adapter->num_rxd; if (adapter->ahw.cut_through) { rds_ring->dma_size = NX_CT_DEFAULT_RX_BUF_LEN; rds_ring->skb_size = NX_CT_DEFAULT_RX_BUF_LEN; } else { if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) rds_ring->dma_size = NX_P3_RX_BUF_MAX_LEN; else rds_ring->dma_size = NX_P2_RX_BUF_MAX_LEN; rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; } break; case RCV_RING_JUMBO: rds_ring->num_desc = adapter->num_jumbo_rxd; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) rds_ring->dma_size = NX_P3_RX_JUMBO_BUF_MAX_LEN; else rds_ring->dma_size = NX_P2_RX_JUMBO_BUF_MAX_LEN; if (adapter->capabilities & NX_CAP0_HW_LRO) rds_ring->dma_size += NX_LRO_BUFFER_EXTRA; rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; break; case RCV_RING_LRO: rds_ring->num_desc = adapter->num_lro_rxd; rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH; rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; break; } rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); if (rds_ring->rx_buf_arr == NULL) /* free whatever was already allocated */ goto err_out; INIT_LIST_HEAD(&rds_ring->free_list); /* * Now go through all of them, set reference handles * and put them in the queues. */ rx_buf = rds_ring->rx_buf_arr; for (i = 0; i < rds_ring->num_desc; i++) { list_add_tail(&rx_buf->list, &rds_ring->free_list); rx_buf->ref_handle = i; rx_buf->state = NETXEN_BUFFER_FREE; rx_buf++; } spin_lock_init(&rds_ring->lock); } for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; sds_ring->irq = adapter->msix_entries[ring].vector; sds_ring->adapter = adapter; sds_ring->num_desc = adapter->num_rxd; for (i = 0; i < NUM_RCV_DESC_RINGS; i++) INIT_LIST_HEAD(&sds_ring->free_list[i]); } return 0; err_out: netxen_free_sw_resources(adapter); return -ENOMEM; } /* * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB * address to external PCI CRB address. */ static u32 netxen_decode_crb_addr(u32 addr) { int i; u32 base_addr, offset, pci_base; crb_addr_transform_setup(); pci_base = NETXEN_ADDR_ERROR; base_addr = addr & 0xfff00000; offset = addr & 0x000fffff; for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) { if (crb_addr_xform[i] == base_addr) { pci_base = i << 20; break; } } if (pci_base == NETXEN_ADDR_ERROR) return pci_base; else return pci_base + offset; } #define NETXEN_MAX_ROM_WAIT_USEC 100 static int netxen_wait_rom_done(struct netxen_adapter *adapter) { long timeout = 0; long done = 0; cond_resched(); while (done == 0) { done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS); done &= 2; if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) { dev_err(&adapter->pdev->dev, "Timeout reached waiting for rom done"); return -EIO; } udelay(1); } return 0; } static int do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) { NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); if (netxen_wait_rom_done(adapter)) { printk("Error waiting for rom done\n"); return -EIO; } /* reset abyte_cnt and dummy_byte_cnt */ NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); udelay(10); NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA); return 0; } static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr, u8 *bytes, size_t size) { int addridx; int ret = 0; for (addridx = addr; addridx < (addr + size); addridx += 4) { int v; ret = do_rom_fast_read(adapter, addridx, &v); if (ret != 0) break; *(__le32 *)bytes = cpu_to_le32(v); bytes += 4; } return ret; } int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, u8 *bytes, size_t size) { int ret; ret = netxen_rom_lock(adapter); if (ret < 0) return ret; ret = do_rom_fast_read_words(adapter, addr, bytes, size); netxen_rom_unlock(adapter); return ret; } int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) { int ret; if (netxen_rom_lock(adapter) != 0) return -EIO; ret = do_rom_fast_read(adapter, addr, valp); netxen_rom_unlock(adapter); return ret; } #define NETXEN_BOARDTYPE 0x4008 #define NETXEN_BOARDNUM 0x400c #define NETXEN_CHIPNUM 0x4010 int netxen_pinit_from_rom(struct netxen_adapter *adapter) { int addr, val; int i, n, init_delay = 0; struct crb_addr_pair *buf; unsigned offset; u32 off; /* resetall */ netxen_rom_lock(adapter); NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff); netxen_rom_unlock(adapter); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (netxen_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || netxen_rom_fast_read(adapter, 4, &n) != 0) { printk(KERN_ERR "%s: ERROR Reading crb_init area: " "n: %08x\n", netxen_nic_driver_name, n); return -EIO; } offset = n & 0xffffU; n = (n >> 16) & 0xffffU; } else { if (netxen_rom_fast_read(adapter, 0, &n) != 0 || !(n & 0x80000000)) { printk(KERN_ERR "%s: ERROR Reading crb_init area: " "n: %08x\n", netxen_nic_driver_name, n); return -EIO; } offset = 1; n &= ~0x80000000; } if (n >= 1024) { printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" " initialized.\n", __func__, n); return -EIO; } buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); if (buf == NULL) return -ENOMEM; for (i = 0; i < n; i++) { if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { kfree(buf); return -EIO; } buf[i].addr = addr; buf[i].data = val; } for (i = 0; i < n; i++) { off = netxen_decode_crb_addr(buf[i].addr); if (off == NETXEN_ADDR_ERROR) { printk(KERN_ERR"CRB init value out of range %x\n", buf[i].addr); continue; } off += NETXEN_PCI_CRBSPACE; if (off & 1) continue; /* skipping cold reboot MAGIC */ if (off == NETXEN_CAM_RAM(0x1fc)) continue; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (off == (NETXEN_CRB_I2C0 + 0x1c)) continue; /* do not reset PCI */ if (off == (ROMUSB_GLB + 0xbc)) continue; if (off == (ROMUSB_GLB + 0xa8)) continue; if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ continue; if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ continue; if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ continue; if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET) continue; if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) && !NX_IS_REVISION_P3P(adapter->ahw.revision_id)) buf[i].data = 0x1020; /* skip the function enable register */ if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) continue; if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2)) continue; if ((off & 0x0ff00000) == NETXEN_CRB_SMB) continue; } init_delay = 1; /* After writing this register, HW needs time for CRB */ /* to quiet down (else crb_window returns 0xffffffff) */ if (off == NETXEN_ROMUSB_GLB_SW_RESET) { init_delay = 1000; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { /* hold xdma in reset also */ buf[i].data = NETXEN_NIC_XDMA_RESET; buf[i].data = 0x8000ff; } } NXWR32(adapter, off, buf[i].data); msleep(init_delay); } kfree(buf); /* disable_peg_cache_all */ /* unreset_net_cache */ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f)); } /* p2dn replyCount */ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); /* disable_peg_cache 0 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); /* disable_peg_cache 1 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); /* peg_clr_all */ /* peg_clr 0 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); /* peg_clr 1 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); /* peg_clr 2 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); /* peg_clr 3 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); return 0; } static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) { uint32_t i; struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; __le32 entries = cpu_to_le32(directory->num_entries); for (i = 0; i < entries; i++) { __le32 offs = cpu_to_le32(directory->findex) + (i * cpu_to_le32(directory->entry_size)); __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); if (tab_type == section) return (struct uni_table_desc *) &unirom[offs]; } return NULL; } #define QLCNIC_FILEHEADER_SIZE (14 * 4) static int netxen_nic_validate_header(struct netxen_adapter *adapter) { const u8 *unirom = adapter->fw->data; struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; u32 fw_file_size = adapter->fw->size; u32 tab_size; __le32 entries; __le32 entry_size; if (fw_file_size < QLCNIC_FILEHEADER_SIZE) return -EINVAL; entries = cpu_to_le32(directory->num_entries); entry_size = cpu_to_le32(directory->entry_size); tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); if (fw_file_size < tab_size) return -EINVAL; return 0; } static int netxen_nic_validate_bootld(struct netxen_adapter *adapter) { struct uni_table_desc *tab_desc; struct uni_data_desc *descr; const u8 *unirom = adapter->fw->data; __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + NX_UNI_BOOTLD_IDX_OFF)); u32 offs; u32 tab_size; u32 data_size; tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD); if (!tab_desc) return -EINVAL; tab_size = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); if (adapter->fw->size < tab_size) return -EINVAL; offs = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx)); descr = (struct uni_data_desc *)&unirom[offs]; data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); if (adapter->fw->size < data_size) return -EINVAL; return 0; } static int netxen_nic_validate_fw(struct netxen_adapter *adapter) { struct uni_table_desc *tab_desc; struct uni_data_desc *descr; const u8 *unirom = adapter->fw->data; __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + NX_UNI_FIRMWARE_IDX_OFF)); u32 offs; u32 tab_size; u32 data_size; tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW); if (!tab_desc) return -EINVAL; tab_size = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); if (adapter->fw->size < tab_size) return -EINVAL; offs = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx)); descr = (struct uni_data_desc *)&unirom[offs]; data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); if (adapter->fw->size < data_size) return -EINVAL; return 0; } static int netxen_nic_validate_product_offs(struct netxen_adapter *adapter) { struct uni_table_desc *ptab_descr; const u8 *unirom = adapter->fw->data; int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? 1 : netxen_p3_has_mn(adapter); __le32 entries; __le32 entry_size; u32 tab_size; u32 i; ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); if (ptab_descr == NULL) return -EINVAL; entries = cpu_to_le32(ptab_descr->num_entries); entry_size = cpu_to_le32(ptab_descr->entry_size); tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); if (adapter->fw->size < tab_size) return -EINVAL; nomn: for (i = 0; i < entries; i++) { __le32 flags, file_chiprev, offs; u8 chiprev = adapter->ahw.revision_id; uint32_t flagbit; offs = cpu_to_le32(ptab_descr->findex) + (i * cpu_to_le32(ptab_descr->entry_size)); flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF)); file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_CHIP_REV_OFF)); flagbit = mn_present ? 1 : 2; if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { adapter->file_prd_off = offs; return 0; } } if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { mn_present = 0; goto nomn; } return -EINVAL; } static int netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter) { if (netxen_nic_validate_header(adapter)) { dev_err(&adapter->pdev->dev, "unified image: header validation failed\n"); return -EINVAL; } if (netxen_nic_validate_product_offs(adapter)) { dev_err(&adapter->pdev->dev, "unified image: product validation failed\n"); return -EINVAL; } if (netxen_nic_validate_bootld(adapter)) { dev_err(&adapter->pdev->dev, "unified image: bootld validation failed\n"); return -EINVAL; } if (netxen_nic_validate_fw(adapter)) { dev_err(&adapter->pdev->dev, "unified image: firmware validation failed\n"); return -EINVAL; } return 0; } static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, u32 section, u32 idx_offset) { const u8 *unirom = adapter->fw->data; int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + idx_offset)); struct uni_table_desc *tab_desc; __le32 offs; tab_desc = nx_get_table_desc(unirom, section); if (tab_desc == NULL) return NULL; offs = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * idx); return (struct uni_data_desc *)&unirom[offs]; } static u8 * nx_get_bootld_offs(struct netxen_adapter *adapter) { u32 offs = NETXEN_BOOTLD_START; if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) offs = cpu_to_le32((nx_get_data_desc(adapter, NX_UNI_DIR_SECT_BOOTLD, NX_UNI_BOOTLD_IDX_OFF))->findex); return (u8 *)&adapter->fw->data[offs]; } static u8 * nx_get_fw_offs(struct netxen_adapter *adapter) { u32 offs = NETXEN_IMAGE_START; if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) offs = cpu_to_le32((nx_get_data_desc(adapter, NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF))->findex); return (u8 *)&adapter->fw->data[offs]; } static __le32 nx_get_fw_size(struct netxen_adapter *adapter) { if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) return cpu_to_le32((nx_get_data_desc(adapter, NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF))->size); else return cpu_to_le32( *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]); } static __le32 nx_get_fw_version(struct netxen_adapter *adapter) { struct uni_data_desc *fw_data_desc; const struct firmware *fw = adapter->fw; __le32 major, minor, sub; const u8 *ver_str; int i, ret = 0; if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { fw_data_desc = nx_get_data_desc(adapter, NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF); ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + cpu_to_le32(fw_data_desc->size) - 17; for (i = 0; i < 12; i++) { if (!strncmp(&ver_str[i], "REV=", 4)) { ret = sscanf(&ver_str[i+4], "%u.%u.%u ", &major, &minor, &sub); break; } } if (ret != 3) return 0; return major + (minor << 8) + (sub << 16); } else return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); } static __le32 nx_get_bios_version(struct netxen_adapter *adapter) { const struct firmware *fw = adapter->fw; __le32 bios_ver, prd_off = adapter->file_prd_off; if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) + NX_UNI_BIOS_VERSION_OFF)); return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); } else return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); } int netxen_need_fw_reset(struct netxen_adapter *adapter) { u32 count, old_count; u32 val, version, major, minor, build; int i, timeout; u8 fw_type; /* NX2031 firmware doesn't support heartbit */ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 1; if (adapter->need_fw_reset) return 1; /* last attempt had failed */ if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) return 1; old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); for (i = 0; i < 10; i++) { timeout = msleep_interruptible(200); if (timeout) { NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); return -EINTR; } count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); if (count != old_count) break; } /* firmware is dead */ if (count == old_count) return 1; /* check if we have got newer or different file firmware */ if (adapter->fw) { val = nx_get_fw_version(adapter); version = NETXEN_DECODE_VERSION(val); major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); if (version > NETXEN_VERSION_CODE(major, minor, build)) return 1; if (version == NETXEN_VERSION_CODE(major, minor, build) && adapter->fw_type != NX_UNIFIED_ROMIMAGE) { val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); fw_type = (val & 0x4) ? NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE; if (adapter->fw_type != fw_type) return 1; } } return 0; } #define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505) int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter) { u32 flash_fw_ver, min_fw_ver; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; if (netxen_rom_fast_read(adapter, NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { dev_err(&adapter->pdev->dev, "Unable to read flash fw" "version\n"); return -EIO; } flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); min_fw_ver = NETXEN_MIN_P3_FW_SUPP; if (flash_fw_ver >= min_fw_ver) return 0; dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported" "[4.0.505]. Please update firmware on flash\n", _major(flash_fw_ver), _minor(flash_fw_ver), _build(flash_fw_ver)); return -EINVAL; } static char *fw_name[] = { NX_P2_MN_ROMIMAGE_NAME, NX_P3_CT_ROMIMAGE_NAME, NX_P3_MN_ROMIMAGE_NAME, NX_UNIFIED_ROMIMAGE_NAME, NX_FLASH_ROMIMAGE_NAME, }; int netxen_load_firmware(struct netxen_adapter *adapter) { u64 *ptr64; u32 i, flashaddr, size; const struct firmware *fw = adapter->fw; struct pci_dev *pdev = adapter->pdev; dev_info(&pdev->dev, "loading firmware from %s\n", fw_name[adapter->fw_type]); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); if (fw) { __le64 data; size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; ptr64 = (u64 *)nx_get_bootld_offs(adapter); flashaddr = NETXEN_BOOTLD_START; for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (adapter->pci_mem_write(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } size = (__force u32)nx_get_fw_size(adapter) / 8; ptr64 = (u64 *)nx_get_fw_offs(adapter); flashaddr = NETXEN_IMAGE_START; for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (adapter->pci_mem_write(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } size = (__force u32)nx_get_fw_size(adapter) % 8; if (size) { data = cpu_to_le64(ptr64[i]); if (adapter->pci_mem_write(adapter, flashaddr, data)) return -EIO; } } else { u64 data; u32 hi, lo; size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; flashaddr = NETXEN_BOOTLD_START; for (i = 0; i < size; i++) { if (netxen_rom_fast_read(adapter, flashaddr, (int *)&lo) != 0) return -EIO; if (netxen_rom_fast_read(adapter, flashaddr + 4, (int *)&hi) != 0) return -EIO; /* hi, lo are already in host endian byteorder */ data = (((u64)hi << 32) | lo); if (adapter->pci_mem_write(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } } msleep(1); if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020); NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e); } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); else { NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0); } return 0; } static int netxen_validate_firmware(struct netxen_adapter *adapter) { __le32 val; __le32 flash_fw_ver; u32 file_fw_ver, min_ver, bios; struct pci_dev *pdev = adapter->pdev; const struct firmware *fw = adapter->fw; u8 fw_type = adapter->fw_type; u32 crbinit_fix_fw; if (fw_type == NX_UNIFIED_ROMIMAGE) { if (netxen_nic_validate_unified_romimage(adapter)) return -EINVAL; } else { val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); if ((__force u32)val != NETXEN_BDINFO_MAGIC) return -EINVAL; if (fw->size < NX_FW_MIN_SIZE) return -EINVAL; } val = nx_get_fw_version(adapter); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) min_ver = NETXEN_MIN_P3_FW_SUPP; else min_ver = NETXEN_VERSION_CODE(3, 4, 216); file_fw_ver = NETXEN_DECODE_VERSION(val); if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) || (file_fw_ver < min_ver)) { dev_err(&pdev->dev, "%s: firmware version %d.%d.%d unsupported\n", fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver), _build(file_fw_ver)); return -EINVAL; } val = nx_get_bios_version(adapter); netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); if ((__force u32)val != bios) { dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", fw_name[fw_type]); return -EINVAL; } if (netxen_rom_fast_read(adapter, NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { dev_err(&pdev->dev, "Unable to read flash fw version\n"); return -EIO; } flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); /* New fw from file is not allowed, if fw on flash is < 4.0.554 */ crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554); if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { dev_err(&pdev->dev, "Incompatibility detected between driver " "and firmware version on flash. This configuration " "is not recommended. Please update the firmware on " "flash immediately\n"); return -EINVAL; } /* check if flashed firmware is newer only for no-mn and P2 case*/ if (!netxen_p3_has_mn(adapter) || NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if (flash_fw_ver > file_fw_ver) { dev_info(&pdev->dev, "%s: firmware is older than flash\n", fw_name[fw_type]); return -EINVAL; } } NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); return 0; } static void nx_get_next_fwtype(struct netxen_adapter *adapter) { u8 fw_type; switch (adapter->fw_type) { case NX_UNKNOWN_ROMIMAGE: fw_type = NX_UNIFIED_ROMIMAGE; break; case NX_UNIFIED_ROMIMAGE: if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) fw_type = NX_FLASH_ROMIMAGE; else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) fw_type = NX_P2_MN_ROMIMAGE; else if (netxen_p3_has_mn(adapter)) fw_type = NX_P3_MN_ROMIMAGE; else fw_type = NX_P3_CT_ROMIMAGE; break; case NX_P3_MN_ROMIMAGE: fw_type = NX_P3_CT_ROMIMAGE; break; case NX_P2_MN_ROMIMAGE: case NX_P3_CT_ROMIMAGE: default: fw_type = NX_FLASH_ROMIMAGE; break; } adapter->fw_type = fw_type; } static int netxen_p3_has_mn(struct netxen_adapter *adapter) { u32 capability, flashed_ver; capability = 0; /* NX2031 always had MN */ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 1; netxen_rom_fast_read(adapter, NX_FW_VERSION_OFFSET, (int *)&flashed_ver); flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); if (capability & NX_PEG_TUNE_MN_PRESENT) return 1; } return 0; } void netxen_request_firmware(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int rc = 0; adapter->fw_type = NX_UNKNOWN_ROMIMAGE; next: nx_get_next_fwtype(adapter); if (adapter->fw_type == NX_FLASH_ROMIMAGE) { adapter->fw = NULL; } else { rc = request_firmware(&adapter->fw, fw_name[adapter->fw_type], &pdev->dev); if (rc != 0) goto next; rc = netxen_validate_firmware(adapter); if (rc != 0) { release_firmware(adapter->fw); msleep(1); goto next; } } } void netxen_release_firmware(struct netxen_adapter *adapter) { if (adapter->fw) release_firmware(adapter->fw); adapter->fw = NULL; } int netxen_init_dummy_dma(struct netxen_adapter *adapter) { u64 addr; u32 hi, lo; if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev, NETXEN_HOST_DUMMY_DMA_SIZE, &adapter->dummy_dma.phys_addr); if (adapter->dummy_dma.addr == NULL) { dev_err(&adapter->pdev->dev, "ERROR: Could not allocate dummy DMA memory\n"); return -ENOMEM; } addr = (uint64_t) adapter->dummy_dma.phys_addr; hi = (addr >> 32) & 0xffffffff; lo = addr & 0xffffffff; NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); return 0; } /* * NetXen DMA watchdog control: * * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive * Bit 1 : disable_request => 1 req disable dma watchdog * Bit 2 : enable_request => 1 req enable dma watchdog * Bit 3-31 : unused */ void netxen_free_dummy_dma(struct netxen_adapter *adapter) { int i = 100; u32 ctrl; if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) return; if (!adapter->dummy_dma.addr) return; ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); if ((ctrl & 0x1) != 0) { NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2)); while ((ctrl & 0x1) != 0) { msleep(50); ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); if (--i == 0) break; } } if (i) { pci_free_consistent(adapter->pdev, NETXEN_HOST_DUMMY_DMA_SIZE, adapter->dummy_dma.addr, adapter->dummy_dma.phys_addr); adapter->dummy_dma.addr = NULL; } else dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n"); } int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) { u32 val = 0; int retries = 60; if (pegtune_val) return 0; do { val = NXRD32(adapter, CRB_CMDPEG_STATE); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return 0; case PHAN_INITIALIZE_FAILED: goto out_err; default: break; } msleep(500); } while (--retries); NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); out_err: dev_warn(&adapter->pdev->dev, "firmware init failed\n"); return -EIO; } static int netxen_receive_peg_ready(struct netxen_adapter *adapter) { u32 val = 0; int retries = 2000; do { val = NXRD32(adapter, CRB_RCVPEG_STATE); if (val == PHAN_PEG_RCV_INITIALIZED) return 0; msleep(10); } while (--retries); if (!retries) { printk(KERN_ERR "Receive Peg initialization not " "complete, state: 0x%x.\n", val); return -EIO; } return 0; } int netxen_init_firmware(struct netxen_adapter *adapter) { int err; err = netxen_receive_peg_ready(adapter); if (err) return err; NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC); return err; } static void netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg) { u32 cable_OUI; u16 cable_len; u16 link_speed; u8 link_status, module, duplex, autoneg; struct net_device *netdev = adapter->netdev; adapter->has_link_events = 1; cable_OUI = msg->body[1] & 0xffffffff; cable_len = (msg->body[1] >> 32) & 0xffff; link_speed = (msg->body[1] >> 48) & 0xffff; link_status = msg->body[2] & 0xff; duplex = (msg->body[2] >> 16) & 0xff; autoneg = (msg->body[2] >> 24) & 0xff; module = (msg->body[2] >> 8) & 0xff; if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) { printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n", netdev->name, cable_OUI, cable_len); } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) { printk(KERN_INFO "%s: unsupported cable length %d\n", netdev->name, cable_len); } netxen_advert_link_change(adapter, link_status); /* update link parameters */ if (duplex == LINKEVENT_FULL_DUPLEX) adapter->link_duplex = DUPLEX_FULL; else adapter->link_duplex = DUPLEX_HALF; adapter->module_type = module; adapter->link_autoneg = autoneg; adapter->link_speed = link_speed; } static void netxen_handle_fw_message(int desc_cnt, int index, struct nx_host_sds_ring *sds_ring) { nx_fw_msg_t msg; struct status_desc *desc; int i = 0, opcode; while (desc_cnt > 0 && i < 8) { desc = &sds_ring->desc_head[index]; msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); index = get_next_index(index, sds_ring->num_desc); desc_cnt--; } opcode = netxen_get_nic_msg_opcode(msg.body[0]); switch (opcode) { case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: netxen_handle_linkevent(sds_ring->adapter, &msg); break; default: break; } } static int netxen_alloc_rx_skb(struct netxen_adapter *adapter, struct nx_host_rds_ring *rds_ring, struct netxen_rx_buffer *buffer) { struct sk_buff *skb; dma_addr_t dma; struct pci_dev *pdev = adapter->pdev; buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size); if (!buffer->skb) return 1; skb = buffer->skb; if (!adapter->ahw.cut_through) skb_reserve(skb, 2); dma = pci_map_single(pdev, skb->data, rds_ring->dma_size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(pdev, dma)) { dev_kfree_skb_any(skb); buffer->skb = NULL; return 1; } buffer->skb = skb; buffer->dma = dma; buffer->state = NETXEN_BUFFER_BUSY; return 0; } static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) { struct netxen_rx_buffer *buffer; struct sk_buff *skb; buffer = &rds_ring->rx_buf_arr[index]; pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, PCI_DMA_FROMDEVICE); skb = buffer->skb; if (!skb) goto no_skb; if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && cksum == STATUS_CKSUM_OK)) { adapter->stats.csummed++; skb->ip_summed = CHECKSUM_UNNECESSARY; } else skb->ip_summed = CHECKSUM_NONE; skb->dev = adapter->netdev; buffer->skb = NULL; no_skb: buffer->state = NETXEN_BUFFER_FREE; return skb; } static struct netxen_rx_buffer * netxen_process_rcv(struct netxen_adapter *adapter, struct nx_host_sds_ring *sds_ring, int ring, u64 sts_data0) { struct net_device *netdev = adapter->netdev; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct netxen_rx_buffer *buffer; struct sk_buff *skb; struct nx_host_rds_ring *rds_ring; int index, length, cksum, pkt_offset; if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = netxen_get_sts_refhandle(sts_data0); if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; length = netxen_get_sts_totallength(sts_data0); cksum = netxen_get_sts_status(sts_data0); pkt_offset = netxen_get_sts_pkt_offset(sts_data0); skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); if (!skb) return buffer; if (length > rds_ring->skb_size) skb_put(skb, rds_ring->skb_size); else skb_put(skb, length); if (pkt_offset) skb_pull(skb, pkt_offset); skb->protocol = eth_type_trans(skb, netdev); napi_gro_receive(&sds_ring->napi, skb); adapter->stats.rx_pkts++; adapter->stats.rxbytes += length; return buffer; } #define TCP_HDR_SIZE 20 #define TCP_TS_OPTION_SIZE 12 #define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE) static struct netxen_rx_buffer * netxen_process_lro(struct netxen_adapter *adapter, struct nx_host_sds_ring *sds_ring, int ring, u64 sts_data0, u64 sts_data1) { struct net_device *netdev = adapter->netdev; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct netxen_rx_buffer *buffer; struct sk_buff *skb; struct nx_host_rds_ring *rds_ring; struct iphdr *iph; struct tcphdr *th; bool push, timestamp; int l2_hdr_offset, l4_hdr_offset; int index; u16 lro_length, length, data_offset; u32 seq_number; u8 vhdr_len = 0; if (unlikely(ring > adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = netxen_get_lro_sts_refhandle(sts_data0); if (unlikely(index > rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; timestamp = netxen_get_lro_sts_timestamp(sts_data0); lro_length = netxen_get_lro_sts_length(sts_data0); l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0); l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0); push = netxen_get_lro_sts_push_flag(sts_data0); seq_number = netxen_get_lro_sts_seq_number(sts_data1); skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); if (!skb) return buffer; if (timestamp) data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE; else data_offset = l4_hdr_offset + TCP_HDR_SIZE; skb_put(skb, lro_length + data_offset); skb_pull(skb, l2_hdr_offset); skb->protocol = eth_type_trans(skb, netdev); if (skb->protocol == htons(ETH_P_8021Q)) vhdr_len = VLAN_HLEN; iph = (struct iphdr *)(skb->data + vhdr_len); th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); length = (iph->ihl << 2) + (th->doff << 2) + lro_length; iph->tot_len = htons(length); iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); th->psh = push; th->seq = htonl(seq_number); length = skb->len; netif_receive_skb(skb); adapter->stats.lro_pkts++; adapter->stats.rxbytes += length; return buffer; } #define netxen_merge_rx_buffers(list, head) \ do { list_splice_tail_init(list, head); } while (0); int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) { struct netxen_adapter *adapter = sds_ring->adapter; struct list_head *cur; struct status_desc *desc; struct netxen_rx_buffer *rxbuf; u32 consumer = sds_ring->consumer; int count = 0; u64 sts_data0, sts_data1; int opcode, ring = 0, desc_cnt; while (count < max) { desc = &sds_ring->desc_head[consumer]; sts_data0 = le64_to_cpu(desc->status_desc_data[0]); if (!(sts_data0 & STATUS_OWNER_HOST)) break; desc_cnt = netxen_get_sts_desc_cnt(sts_data0); opcode = netxen_get_sts_opcode(sts_data0); switch (opcode) { case NETXEN_NIC_RXPKT_DESC: case NETXEN_OLD_RXPKT_DESC: case NETXEN_NIC_SYN_OFFLOAD: ring = netxen_get_sts_type(sts_data0); rxbuf = netxen_process_rcv(adapter, sds_ring, ring, sts_data0); break; case NETXEN_NIC_LRO_DESC: ring = netxen_get_lro_sts_type(sts_data0); sts_data1 = le64_to_cpu(desc->status_desc_data[1]); rxbuf = netxen_process_lro(adapter, sds_ring, ring, sts_data0, sts_data1); break; case NETXEN_NIC_RESPONSE_DESC: netxen_handle_fw_message(desc_cnt, consumer, sds_ring); default: goto skip; } WARN_ON(desc_cnt > 1); if (rxbuf) list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); skip: for (; desc_cnt > 0; desc_cnt--) { desc = &sds_ring->desc_head[consumer]; desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); consumer = get_next_index(consumer, sds_ring->num_desc); } count++; } for (ring = 0; ring < adapter->max_rds_rings; ring++) { struct nx_host_rds_ring *rds_ring = &adapter->recv_ctx.rds_rings[ring]; if (!list_empty(&sds_ring->free_list[ring])) { list_for_each(cur, &sds_ring->free_list[ring]) { rxbuf = list_entry(cur, struct netxen_rx_buffer, list); netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); } spin_lock(&rds_ring->lock); netxen_merge_rx_buffers(&sds_ring->free_list[ring], &rds_ring->free_list); spin_unlock(&rds_ring->lock); } netxen_post_rx_buffers_nodb(adapter, rds_ring); } if (count) { sds_ring->consumer = consumer; NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer); } return count; } /* Process Command status ring */ int netxen_process_cmd_ring(struct netxen_adapter *adapter) { u32 sw_consumer, hw_consumer; int count = 0, i; struct netxen_cmd_buffer *buffer; struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; struct netxen_skb_frag *frag; int done = 0; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; if (!spin_trylock(&adapter->tx_clean_lock)) return 1; sw_consumer = tx_ring->sw_consumer; hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); while (sw_consumer != hw_consumer) { buffer = &tx_ring->cmd_buf_arr[sw_consumer]; if (buffer->skb) { frag = &buffer->frag_array[0]; pci_unmap_single(pdev, frag->dma, frag->length, PCI_DMA_TODEVICE); frag->dma = 0ULL; for (i = 1; i < buffer->frag_count; i++) { frag++; /* Get the next frag */ pci_unmap_page(pdev, frag->dma, frag->length, PCI_DMA_TODEVICE); frag->dma = 0ULL; } adapter->stats.xmitfinished++; dev_kfree_skb_any(buffer->skb); buffer->skb = NULL; } sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); if (++count >= MAX_STATUS_HANDLE) break; } if (count && netif_running(netdev)) { tx_ring->sw_consumer = sw_consumer; smp_mb(); if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) netif_wake_queue(netdev); adapter->tx_timeo_cnt = 0; } /* * If everything is freed up to consumer then check if the ring is full * If the ring is full then check if more needs to be freed and * schedule the call back again. * * This happens when there are 2 CPUs. One could be freeing and the * other filling it. If the ring is full when we get out of here and * the card has already interrupted the host then the host can miss the * interrupt. * * There is still a possible race condition and the host could miss an * interrupt. The card has to take care of this. */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); spin_unlock(&adapter->tx_clean_lock); return done; } void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, struct nx_host_rds_ring *rds_ring) { struct rcv_desc *pdesc; struct netxen_rx_buffer *buffer; int producer, count = 0; netxen_ctx_msg msg = 0; struct list_head *head; producer = rds_ring->producer; head = &rds_ring->free_list; while (!list_empty(head)) { buffer = list_entry(head->next, struct netxen_rx_buffer, list); if (!buffer->skb) { if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) break; } count++; list_del(&buffer->list); /* make a rcv descriptor */ pdesc = &rds_ring->desc_head[producer]; pdesc->addr_buffer = cpu_to_le64(buffer->dma); pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); producer = get_next_index(producer, rds_ring->num_desc); } if (count) { rds_ring->producer = producer; NXWRIO(adapter, rds_ring->crb_rcv_producer, (producer-1) & (rds_ring->num_desc-1)); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { /* * Write a doorbell msg to tell phanmon of change in * receive ring producer * Only for firmware version < 4.0.0 */ netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); netxen_set_msg_privid(msg); netxen_set_msg_count(msg, ((producer - 1) & (rds_ring->num_desc - 1))); netxen_set_msg_ctxid(msg, adapter->portnum); netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); NXWRIO(adapter, DB_NORMALIZE(adapter, NETXEN_RCV_PRODUCER_OFFSET), msg); } } } static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, struct nx_host_rds_ring *rds_ring) { struct rcv_desc *pdesc; struct netxen_rx_buffer *buffer; int producer, count = 0; struct list_head *head; if (!spin_trylock(&rds_ring->lock)) return; producer = rds_ring->producer; head = &rds_ring->free_list; while (!list_empty(head)) { buffer = list_entry(head->next, struct netxen_rx_buffer, list); if (!buffer->skb) { if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) break; } count++; list_del(&buffer->list); /* make a rcv descriptor */ pdesc = &rds_ring->desc_head[producer]; pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); pdesc->addr_buffer = cpu_to_le64(buffer->dma); producer = get_next_index(producer, rds_ring->num_desc); } if (count) { rds_ring->producer = producer; NXWRIO(adapter, rds_ring->crb_rcv_producer, (producer - 1) & (rds_ring->num_desc - 1)); } spin_unlock(&rds_ring->lock); } void netxen_nic_clear_stats(struct netxen_adapter *adapter) { memset(&adapter->stats, 0, sizeof(adapter->stats)); }
gpl-2.0
dsb9938/Rezound-ICS-Kernel-Old
fs/jfs/jfs_dmap.c
2531
111578
/* * Copyright (C) International Business Machines Corp., 2000-2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/slab.h> #include "jfs_incore.h" #include "jfs_superblock.h" #include "jfs_dmap.h" #include "jfs_imap.h" #include "jfs_lock.h" #include "jfs_metapage.h" #include "jfs_debug.h" /* * SERIALIZATION of the Block Allocation Map. * * the working state of the block allocation map is accessed in * two directions: * * 1) allocation and free requests that start at the dmap * level and move up through the dmap control pages (i.e. * the vast majority of requests). * * 2) allocation requests that start at dmap control page * level and work down towards the dmaps. * * the serialization scheme used here is as follows. * * requests which start at the bottom are serialized against each * other through buffers and each requests holds onto its buffers * as it works it way up from a single dmap to the required level * of dmap control page. * requests that start at the top are serialized against each other * and request that start from the bottom by the multiple read/single * write inode lock of the bmap inode. requests starting at the top * take this lock in write mode while request starting at the bottom * take the lock in read mode. a single top-down request may proceed * exclusively while multiple bottoms-up requests may proceed * simultaneously (under the protection of busy buffers). * * in addition to information found in dmaps and dmap control pages, * the working state of the block allocation map also includes read/ * write information maintained in the bmap descriptor (i.e. total * free block count, allocation group level free block counts). * a single exclusive lock (BMAP_LOCK) is used to guard this information * in the face of multiple-bottoms up requests. * (lock ordering: IREAD_LOCK, BMAP_LOCK); * * accesses to the persistent state of the block allocation map (limited * to the persistent bitmaps in dmaps) is guarded by (busy) buffers. */ #define BMAP_LOCK_INIT(bmp) mutex_init(&bmp->db_bmaplock) #define BMAP_LOCK(bmp) mutex_lock(&bmp->db_bmaplock) #define BMAP_UNLOCK(bmp) mutex_unlock(&bmp->db_bmaplock) /* * forward references */ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks); static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval); static int dbBackSplit(dmtree_t * tp, int leafno); static int dbJoin(dmtree_t * tp, int leafno, int newval); static void dbAdjTree(dmtree_t * tp, int leafno, int newval); static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level); static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results); static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks); static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results); static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks); static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks, int l2nb, s64 * results); static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results); static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results); static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks); static int dbFindBits(u32 word, int l2nb); static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno); static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx); static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks); static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks); static int dbMaxBud(u8 * cp); s64 dbMapFileSizeToMapSize(struct inode *ipbmap); static int blkstol2(s64 nb); static int cntlz(u32 value); static int cnttz(u32 word); static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks); static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks); static int dbInitDmapTree(struct dmap * dp); static int dbInitTree(struct dmaptree * dtp); static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i); static int dbGetL2AGSize(s64 nblocks); /* * buddy table * * table used for determining buddy sizes within characters of * dmap bitmap words. the characters themselves serve as indexes * into the table, with the table elements yielding the maximum * binary buddy of free bits within the character. */ static const s8 budtab[256] = { 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1 }; /* * NAME: dbMount() * * FUNCTION: initializate the block allocation map. * * memory is allocated for the in-core bmap descriptor and * the in-core descriptor is initialized from disk. * * PARAMETERS: * ipbmap - pointer to in-core inode for the block map. * * RETURN VALUES: * 0 - success * -ENOMEM - insufficient memory * -EIO - i/o error */ int dbMount(struct inode *ipbmap) { struct bmap *bmp; struct dbmap_disk *dbmp_le; struct metapage *mp; int i; /* * allocate/initialize the in-memory bmap descriptor */ /* allocate memory for the in-memory bmap descriptor */ bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL); if (bmp == NULL) return -ENOMEM; /* read the on-disk bmap descriptor. */ mp = read_metapage(ipbmap, BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, PSIZE, 0); if (mp == NULL) { kfree(bmp); return -EIO; } /* copy the on-disk bmap descriptor to its in-memory version. */ dbmp_le = (struct dbmap_disk *) mp->data; bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize); bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree); bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); for (i = 0; i < MAXAG; i++) bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]); bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize); bmp->db_maxfreebud = dbmp_le->dn_maxfreebud; /* release the buffer. */ release_metapage(mp); /* bind the bmap inode and the bmap descriptor to each other. */ bmp->db_ipbmap = ipbmap; JFS_SBI(ipbmap->i_sb)->bmap = bmp; memset(bmp->db_active, 0, sizeof(bmp->db_active)); /* * allocate/initialize the bmap lock */ BMAP_LOCK_INIT(bmp); return (0); } /* * NAME: dbUnmount() * * FUNCTION: terminate the block allocation map in preparation for * file system unmount. * * the in-core bmap descriptor is written to disk and * the memory for this descriptor is freed. * * PARAMETERS: * ipbmap - pointer to in-core inode for the block map. * * RETURN VALUES: * 0 - success * -EIO - i/o error */ int dbUnmount(struct inode *ipbmap, int mounterror) { struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; if (!(mounterror || isReadOnly(ipbmap))) dbSync(ipbmap); /* * Invalidate the page cache buffers */ truncate_inode_pages(ipbmap->i_mapping, 0); /* free the memory for the in-memory bmap. */ kfree(bmp); return (0); } /* * dbSync() */ int dbSync(struct inode *ipbmap) { struct dbmap_disk *dbmp_le; struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; struct metapage *mp; int i; /* * write bmap global control page */ /* get the buffer for the on-disk bmap descriptor. */ mp = read_metapage(ipbmap, BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, PSIZE, 0); if (mp == NULL) { jfs_err("dbSync: read_metapage failed!"); return -EIO; } /* copy the in-memory version of the bmap to the on-disk version */ dbmp_le = (struct dbmap_disk *) mp->data; dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize); dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree); dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage); dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag); dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel); dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight); dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); for (i = 0; i < MAXAG; i++) dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]); dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize); dbmp_le->dn_maxfreebud = bmp->db_maxfreebud; /* write the buffer */ write_metapage(mp); /* * write out dirty pages of bmap */ filemap_write_and_wait(ipbmap->i_mapping); diWriteSpecial(ipbmap, 0); return (0); } /* * NAME: dbFree() * * FUNCTION: free the specified block range from the working block * allocation map. * * the blocks will be free from the working map one dmap * at a time. * * PARAMETERS: * ip - pointer to in-core inode; * blkno - starting block number to be freed. * nblocks - number of blocks to be freed. * * RETURN VALUES: * 0 - success * -EIO - i/o error */ int dbFree(struct inode *ip, s64 blkno, s64 nblocks) { struct metapage *mp; struct dmap *dp; int nb, rc; s64 lblkno, rem; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); /* block to be freed better be within the mapsize. */ if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) { IREAD_UNLOCK(ipbmap); printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n", (unsigned long long) blkno, (unsigned long long) nblocks); jfs_error(ip->i_sb, "dbFree: block to be freed is outside the map"); return -EIO; } /* * free the blocks a dmap at a time. */ mp = NULL; for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) { /* release previous dmap if any */ if (mp) { write_metapage(mp); } /* get the buffer for the current dmap. */ lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); mp = read_metapage(ipbmap, lblkno, PSIZE, 0); if (mp == NULL) { IREAD_UNLOCK(ipbmap); return -EIO; } dp = (struct dmap *) mp->data; /* determine the number of blocks to be freed from * this dmap. */ nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1))); /* free the blocks. */ if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) { jfs_error(ip->i_sb, "dbFree: error in block map\n"); release_metapage(mp); IREAD_UNLOCK(ipbmap); return (rc); } } /* write the last buffer. */ write_metapage(mp); IREAD_UNLOCK(ipbmap); return (0); } /* * NAME: dbUpdatePMap() * * FUNCTION: update the allocation state (free or allocate) of the * specified block range in the persistent block allocation map. * * the blocks will be updated in the persistent map one * dmap at a time. * * PARAMETERS: * ipbmap - pointer to in-core inode for the block map. * free - 'true' if block range is to be freed from the persistent * map; 'false' if it is to be allocated. * blkno - starting block number of the range. * nblocks - number of contiguous blocks in the range. * tblk - transaction block; * * RETURN VALUES: * 0 - success * -EIO - i/o error */ int dbUpdatePMap(struct inode *ipbmap, int free, s64 blkno, s64 nblocks, struct tblock * tblk) { int nblks, dbitno, wbitno, rbits; int word, nbits, nwords; struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; s64 lblkno, rem, lastlblkno; u32 mask; struct dmap *dp; struct metapage *mp; struct jfs_log *log; int lsn, difft, diffp; unsigned long flags; /* the blocks better be within the mapsize. */ if (blkno + nblocks > bmp->db_mapsize) { printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n", (unsigned long long) blkno, (unsigned long long) nblocks); jfs_error(ipbmap->i_sb, "dbUpdatePMap: blocks are outside the map"); return -EIO; } /* compute delta of transaction lsn from log syncpt */ lsn = tblk->lsn; log = (struct jfs_log *) JFS_SBI(tblk->sb)->log; logdiff(difft, lsn, log); /* * update the block state a dmap at a time. */ mp = NULL; lastlblkno = 0; for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) { /* get the buffer for the current dmap. */ lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); if (lblkno != lastlblkno) { if (mp) { write_metapage(mp); } mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) return -EIO; metapage_wait_for_io(mp); } dp = (struct dmap *) mp->data; /* determine the bit number and word within the dmap of * the starting block. also determine how many blocks * are to be updated within this dmap. */ dbitno = blkno & (BPERDMAP - 1); word = dbitno >> L2DBWORD; nblks = min(rem, (s64)BPERDMAP - dbitno); /* update the bits of the dmap words. the first and last * words may only have a subset of their bits updated. if * this is the case, we'll work against that word (i.e. * partial first and/or last) only in a single pass. a * single pass will also be used to update all words that * are to have all their bits updated. */ for (rbits = nblks; rbits > 0; rbits -= nbits, dbitno += nbits) { /* determine the bit number within the word and * the number of bits within the word. */ wbitno = dbitno & (DBWORD - 1); nbits = min(rbits, DBWORD - wbitno); /* check if only part of the word is to be updated. */ if (nbits < DBWORD) { /* update (free or allocate) the bits * in this word. */ mask = (ONES << (DBWORD - nbits) >> wbitno); if (free) dp->pmap[word] &= cpu_to_le32(~mask); else dp->pmap[word] |= cpu_to_le32(mask); word += 1; } else { /* one or more words are to have all * their bits updated. determine how * many words and how many bits. */ nwords = rbits >> L2DBWORD; nbits = nwords << L2DBWORD; /* update (free or allocate) the bits * in these words. */ if (free) memset(&dp->pmap[word], 0, nwords * 4); else memset(&dp->pmap[word], (int) ONES, nwords * 4); word += nwords; } } /* * update dmap lsn */ if (lblkno == lastlblkno) continue; lastlblkno = lblkno; LOGSYNC_LOCK(log, flags); if (mp->lsn != 0) { /* inherit older/smaller lsn */ logdiff(diffp, mp->lsn, log); if (difft < diffp) { mp->lsn = lsn; /* move bp after tblock in logsync list */ list_move(&mp->synclist, &tblk->synclist); } /* inherit younger/larger clsn */ logdiff(difft, tblk->clsn, log); logdiff(diffp, mp->clsn, log); if (difft > diffp) mp->clsn = tblk->clsn; } else { mp->log = log; mp->lsn = lsn; /* insert bp after tblock in logsync list */ log->count++; list_add(&mp->synclist, &tblk->synclist); mp->clsn = tblk->clsn; } LOGSYNC_UNLOCK(log, flags); } /* write the last buffer. */ if (mp) { write_metapage(mp); } return (0); } /* * NAME: dbNextAG() * * FUNCTION: find the preferred allocation group for new allocations. * * Within the allocation groups, we maintain a preferred * allocation group which consists of a group with at least * average free space. It is the preferred group that we target * new inode allocation towards. The tie-in between inode * allocation and block allocation occurs as we allocate the * first (data) block of an inode and specify the inode (block) * as the allocation hint for this block. * * We try to avoid having more than one open file growing in * an allocation group, as this will lead to fragmentation. * This differs from the old OS/2 method of trying to keep * empty ags around for large allocations. * * PARAMETERS: * ipbmap - pointer to in-core inode for the block map. * * RETURN VALUES: * the preferred allocation group number. */ int dbNextAG(struct inode *ipbmap) { s64 avgfree; int agpref; s64 hwm = 0; int i; int next_best = -1; struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; BMAP_LOCK(bmp); /* determine the average number of free blocks within the ags. */ avgfree = (u32)bmp->db_nfree / bmp->db_numag; /* * if the current preferred ag does not have an active allocator * and has at least average freespace, return it */ agpref = bmp->db_agpref; if ((atomic_read(&bmp->db_active[agpref]) == 0) && (bmp->db_agfree[agpref] >= avgfree)) goto unlock; /* From the last preferred ag, find the next one with at least * average free space. */ for (i = 0 ; i < bmp->db_numag; i++, agpref++) { if (agpref == bmp->db_numag) agpref = 0; if (atomic_read(&bmp->db_active[agpref])) /* open file is currently growing in this ag */ continue; if (bmp->db_agfree[agpref] >= avgfree) { /* Return this one */ bmp->db_agpref = agpref; goto unlock; } else if (bmp->db_agfree[agpref] > hwm) { /* Less than avg. freespace, but best so far */ hwm = bmp->db_agfree[agpref]; next_best = agpref; } } /* * If no inactive ag was found with average freespace, use the * next best */ if (next_best != -1) bmp->db_agpref = next_best; /* else leave db_agpref unchanged */ unlock: BMAP_UNLOCK(bmp); /* return the preferred group. */ return (bmp->db_agpref); } /* * NAME: dbAlloc() * * FUNCTION: attempt to allocate a specified number of contiguous free * blocks from the working allocation block map. * * the block allocation policy uses hints and a multi-step * approach. * * for allocation requests smaller than the number of blocks * per dmap, we first try to allocate the new blocks * immediately following the hint. if these blocks are not * available, we try to allocate blocks near the hint. if * no blocks near the hint are available, we next try to * allocate within the same dmap as contains the hint. * * if no blocks are available in the dmap or the allocation * request is larger than the dmap size, we try to allocate * within the same allocation group as contains the hint. if * this does not succeed, we finally try to allocate anywhere * within the aggregate. * * we also try to allocate anywhere within the aggregate for * for allocation requests larger than the allocation group * size or requests that specify no hint value. * * PARAMETERS: * ip - pointer to in-core inode; * hint - allocation hint. * nblocks - number of contiguous blocks in the range. * results - on successful return, set to the starting block number * of the newly allocated contiguous range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error */ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results) { int rc, agno; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct bmap *bmp; struct metapage *mp; s64 lblkno, blkno; struct dmap *dp; int l2nb; s64 mapSize; int writers; /* assert that nblocks is valid */ assert(nblocks > 0); /* get the log2 number of blocks to be allocated. * if the number of blocks is not a log2 multiple, * it will be rounded up to the next log2 multiple. */ l2nb = BLKSTOL2(nblocks); bmp = JFS_SBI(ip->i_sb)->bmap; mapSize = bmp->db_mapsize; /* the hint should be within the map */ if (hint >= mapSize) { jfs_error(ip->i_sb, "dbAlloc: the hint is outside the map"); return -EIO; } /* if the number of blocks to be allocated is greater than the * allocation group size, try to allocate anywhere. */ if (l2nb > bmp->db_agl2size) { IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP); rc = dbAllocAny(bmp, nblocks, l2nb, results); goto write_unlock; } /* * If no hint, let dbNextAG recommend an allocation group */ if (hint == 0) goto pref_ag; /* we would like to allocate close to the hint. adjust the * hint to the block following the hint since the allocators * will start looking for free space starting at this point. */ blkno = hint + 1; if (blkno >= bmp->db_mapsize) goto pref_ag; agno = blkno >> bmp->db_agl2size; /* check if blkno crosses over into a new allocation group. * if so, check if we should allow allocations within this * allocation group. */ if ((blkno & (bmp->db_agsize - 1)) == 0) /* check if the AG is currently being written to. * if so, call dbNextAG() to find a non-busy * AG with sufficient free space. */ if (atomic_read(&bmp->db_active[agno])) goto pref_ag; /* check if the allocation request size can be satisfied from a * single dmap. if so, try to allocate from the dmap containing * the hint using a tiered strategy. */ if (nblocks <= BPERDMAP) { IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); /* get the buffer for the dmap containing the hint. */ rc = -EIO; lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); mp = read_metapage(ipbmap, lblkno, PSIZE, 0); if (mp == NULL) goto read_unlock; dp = (struct dmap *) mp->data; /* first, try to satisfy the allocation request with the * blocks beginning at the hint. */ if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks)) != -ENOSPC) { if (rc == 0) { *results = blkno; mark_metapage_dirty(mp); } release_metapage(mp); goto read_unlock; } writers = atomic_read(&bmp->db_active[agno]); if ((writers > 1) || ((writers == 1) && (JFS_IP(ip)->active_ag != agno))) { /* * Someone else is writing in this allocation * group. To avoid fragmenting, try another ag */ release_metapage(mp); IREAD_UNLOCK(ipbmap); goto pref_ag; } /* next, try to satisfy the allocation request with blocks * near the hint. */ if ((rc = dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results)) != -ENOSPC) { if (rc == 0) mark_metapage_dirty(mp); release_metapage(mp); goto read_unlock; } /* try to satisfy the allocation request with blocks within * the same dmap as the hint. */ if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results)) != -ENOSPC) { if (rc == 0) mark_metapage_dirty(mp); release_metapage(mp); goto read_unlock; } release_metapage(mp); IREAD_UNLOCK(ipbmap); } /* try to satisfy the allocation request with blocks within * the same allocation group as the hint. */ IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP); if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC) goto write_unlock; IWRITE_UNLOCK(ipbmap); pref_ag: /* * Let dbNextAG recommend a preferred allocation group */ agno = dbNextAG(ipbmap); IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP); /* Try to allocate within this allocation group. if that fails, try to * allocate anywhere in the map. */ if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC) rc = dbAllocAny(bmp, nblocks, l2nb, results); write_unlock: IWRITE_UNLOCK(ipbmap); return (rc); read_unlock: IREAD_UNLOCK(ipbmap); return (rc); } #ifdef _NOTYET /* * NAME: dbAllocExact() * * FUNCTION: try to allocate the requested extent; * * PARAMETERS: * ip - pointer to in-core inode; * blkno - extent address; * nblocks - extent length; * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error */ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks) { int rc; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; struct dmap *dp; s64 lblkno; struct metapage *mp; IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); /* * validate extent request: * * note: defragfs policy: * max 64 blocks will be moved. * allocation request size must be satisfied from a single dmap. */ if (nblocks <= 0 || nblocks > BPERDMAP || blkno >= bmp->db_mapsize) { IREAD_UNLOCK(ipbmap); return -EINVAL; } if (nblocks > ((s64) 1 << bmp->db_maxfreebud)) { /* the free space is no longer available */ IREAD_UNLOCK(ipbmap); return -ENOSPC; } /* read in the dmap covering the extent */ lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); mp = read_metapage(ipbmap, lblkno, PSIZE, 0); if (mp == NULL) { IREAD_UNLOCK(ipbmap); return -EIO; } dp = (struct dmap *) mp->data; /* try to allocate the requested extent */ rc = dbAllocNext(bmp, dp, blkno, nblocks); IREAD_UNLOCK(ipbmap); if (rc == 0) mark_metapage_dirty(mp); release_metapage(mp); return (rc); } #endif /* _NOTYET */ /* * NAME: dbReAlloc() * * FUNCTION: attempt to extend a current allocation by a specified * number of blocks. * * this routine attempts to satisfy the allocation request * by first trying to extend the existing allocation in * place by allocating the additional blocks as the blocks * immediately following the current allocation. if these * blocks are not available, this routine will attempt to * allocate a new set of contiguous blocks large enough * to cover the existing allocation plus the additional * number of blocks required. * * PARAMETERS: * ip - pointer to in-core inode requiring allocation. * blkno - starting block of the current allocation. * nblocks - number of contiguous blocks within the current * allocation. * addnblocks - number of blocks to add to the allocation. * results - on successful return, set to the starting block number * of the existing allocation if the existing allocation * was extended in place or to a newly allocated contiguous * range if the existing allocation could not be extended * in place. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error */ int dbReAlloc(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks, s64 * results) { int rc; /* try to extend the allocation in place. */ if ((rc = dbExtend(ip, blkno, nblocks, addnblocks)) == 0) { *results = blkno; return (0); } else { if (rc != -ENOSPC) return (rc); } /* could not extend the allocation in place, so allocate a * new set of blocks for the entire request (i.e. try to get * a range of contiguous blocks large enough to cover the * existing allocation plus the additional blocks.) */ return (dbAlloc (ip, blkno + nblocks - 1, addnblocks + nblocks, results)); } /* * NAME: dbExtend() * * FUNCTION: attempt to extend a current allocation by a specified * number of blocks. * * this routine attempts to satisfy the allocation request * by first trying to extend the existing allocation in * place by allocating the additional blocks as the blocks * immediately following the current allocation. * * PARAMETERS: * ip - pointer to in-core inode requiring allocation. * blkno - starting block of the current allocation. * nblocks - number of contiguous blocks within the current * allocation. * addnblocks - number of blocks to add to the allocation. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error */ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks) { struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); s64 lblkno, lastblkno, extblkno; uint rel_block; struct metapage *mp; struct dmap *dp; int rc; struct inode *ipbmap = sbi->ipbmap; struct bmap *bmp; /* * We don't want a non-aligned extent to cross a page boundary */ if (((rel_block = blkno & (sbi->nbperpage - 1))) && (rel_block + nblocks + addnblocks > sbi->nbperpage)) return -ENOSPC; /* get the last block of the current allocation */ lastblkno = blkno + nblocks - 1; /* determine the block number of the block following * the existing allocation. */ extblkno = lastblkno + 1; IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); /* better be within the file system */ bmp = sbi->bmap; if (lastblkno < 0 || lastblkno >= bmp->db_mapsize) { IREAD_UNLOCK(ipbmap); jfs_error(ip->i_sb, "dbExtend: the block is outside the filesystem"); return -EIO; } /* we'll attempt to extend the current allocation in place by * allocating the additional blocks as the blocks immediately * following the current allocation. we only try to extend the * current allocation in place if the number of additional blocks * can fit into a dmap, the last block of the current allocation * is not the last block of the file system, and the start of the * inplace extension is not on an allocation group boundary. */ if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize || (extblkno & (bmp->db_agsize - 1)) == 0) { IREAD_UNLOCK(ipbmap); return -ENOSPC; } /* get the buffer for the dmap containing the first block * of the extension. */ lblkno = BLKTODMAP(extblkno, bmp->db_l2nbperpage); mp = read_metapage(ipbmap, lblkno, PSIZE, 0); if (mp == NULL) { IREAD_UNLOCK(ipbmap); return -EIO; } dp = (struct dmap *) mp->data; /* try to allocate the blocks immediately following the * current allocation. */ rc = dbAllocNext(bmp, dp, extblkno, (int) addnblocks); IREAD_UNLOCK(ipbmap); /* were we successful ? */ if (rc == 0) write_metapage(mp); else /* we were not successful */ release_metapage(mp); return (rc); } /* * NAME: dbAllocNext() * * FUNCTION: attempt to allocate the blocks of the specified block * range within a dmap. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap. * blkno - starting block number of the range. * nblocks - number of contiguous free blocks of the range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap) held on entry/exit; */ static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks) { int dbitno, word, rembits, nb, nwords, wbitno, nw; int l2size; s8 *leaf; u32 mask; if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) { jfs_error(bmp->db_ipbmap->i_sb, "dbAllocNext: Corrupt dmap page"); return -EIO; } /* pick up a pointer to the leaves of the dmap tree. */ leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx); /* determine the bit number and word within the dmap of the * starting block. */ dbitno = blkno & (BPERDMAP - 1); word = dbitno >> L2DBWORD; /* check if the specified block range is contained within * this dmap. */ if (dbitno + nblocks > BPERDMAP) return -ENOSPC; /* check if the starting leaf indicates that anything * is free. */ if (leaf[word] == NOFREE) return -ENOSPC; /* check the dmaps words corresponding to block range to see * if the block range is free. not all bits of the first and * last words may be contained within the block range. if this * is the case, we'll work against those words (i.e. partial first * and/or last) on an individual basis (a single pass) and examine * the actual bits to determine if they are free. a single pass * will be used for all dmap words fully contained within the * specified range. within this pass, the leaves of the dmap * tree will be examined to determine if the blocks are free. a * single leaf may describe the free space of multiple dmap * words, so we may visit only a subset of the actual leaves * corresponding to the dmap words of the block range. */ for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { /* determine the bit number within the word and * the number of bits within the word. */ wbitno = dbitno & (DBWORD - 1); nb = min(rembits, DBWORD - wbitno); /* check if only part of the word is to be examined. */ if (nb < DBWORD) { /* check if the bits are free. */ mask = (ONES << (DBWORD - nb) >> wbitno); if ((mask & ~le32_to_cpu(dp->wmap[word])) != mask) return -ENOSPC; word += 1; } else { /* one or more dmap words are fully contained * within the block range. determine how many * words and how many bits. */ nwords = rembits >> L2DBWORD; nb = nwords << L2DBWORD; /* now examine the appropriate leaves to determine * if the blocks are free. */ while (nwords > 0) { /* does the leaf describe any free space ? */ if (leaf[word] < BUDMIN) return -ENOSPC; /* determine the l2 number of bits provided * by this leaf. */ l2size = min((int)leaf[word], NLSTOL2BSZ(nwords)); /* determine how many words were handled. */ nw = BUDSIZE(l2size, BUDMIN); nwords -= nw; word += nw; } } } /* allocate the blocks. */ return (dbAllocDmap(bmp, dp, blkno, nblocks)); } /* * NAME: dbAllocNear() * * FUNCTION: attempt to allocate a number of contiguous free blocks near * a specified block (hint) within a dmap. * * starting with the dmap leaf that covers the hint, we'll * check the next four contiguous leaves for sufficient free * space. if sufficient free space is found, we'll allocate * the desired free space. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap. * blkno - block number to allocate near. * nblocks - actual number of contiguous free blocks desired. * l2nb - log2 number of contiguous free blocks desired. * results - on successful return, set to the starting block number * of the newly allocated range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap) held on entry/exit; */ static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results) { int word, lword, rc; s8 *leaf; if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) { jfs_error(bmp->db_ipbmap->i_sb, "dbAllocNear: Corrupt dmap page"); return -EIO; } leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx); /* determine the word within the dmap that holds the hint * (i.e. blkno). also, determine the last word in the dmap * that we'll include in our examination. */ word = (blkno & (BPERDMAP - 1)) >> L2DBWORD; lword = min(word + 4, LPERDMAP); /* examine the leaves for sufficient free space. */ for (; word < lword; word++) { /* does the leaf describe sufficient free space ? */ if (leaf[word] < l2nb) continue; /* determine the block number within the file system * of the first block described by this dmap word. */ blkno = le64_to_cpu(dp->start) + (word << L2DBWORD); /* if not all bits of the dmap word are free, get the * starting bit number within the dmap word of the required * string of free bits and adjust the block number with the * value. */ if (leaf[word] < BUDMIN) blkno += dbFindBits(le32_to_cpu(dp->wmap[word]), l2nb); /* allocate the blocks. */ if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0) *results = blkno; return (rc); } return -ENOSPC; } /* * NAME: dbAllocAG() * * FUNCTION: attempt to allocate the specified number of contiguous * free blocks within the specified allocation group. * * unless the allocation group size is equal to the number * of blocks per dmap, the dmap control pages will be used to * find the required free space, if available. we start the * search at the highest dmap control page level which * distinctly describes the allocation group's free space * (i.e. the highest level at which the allocation group's * free space is not mixed in with that of any other group). * in addition, we start the search within this level at a * height of the dmapctl dmtree at which the nodes distinctly * describe the allocation group's free space. at this height, * the allocation group's free space may be represented by 1 * or two sub-trees, depending on the allocation group size. * we search the top nodes of these subtrees left to right for * sufficient free space. if sufficient free space is found, * the subtree is searched to find the leftmost leaf that * has free space. once we have made it to the leaf, we * move the search to the next lower level dmap control page * corresponding to this leaf. we continue down the dmap control * pages until we find the dmap that contains or starts the * sufficient free space and we allocate at this dmap. * * if the allocation group size is equal to the dmap size, * we'll start at the dmap corresponding to the allocation * group and attempt the allocation at this level. * * the dmap control page search is also not performed if the * allocation group is completely free and we go to the first * dmap of the allocation group to do the allocation. this is * done because the allocation group may be part (not the first * part) of a larger binary buddy system, causing the dmap * control pages to indicate no free space (NOFREE) within * the allocation group. * * PARAMETERS: * bmp - pointer to bmap descriptor * agno - allocation group number. * nblocks - actual number of contiguous free blocks desired. * l2nb - log2 number of contiguous free blocks desired. * results - on successful return, set to the starting block number * of the newly allocated range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * note: IWRITE_LOCK(ipmap) held on entry/exit; */ static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) { struct metapage *mp; struct dmapctl *dcp; int rc, ti, i, k, m, n, agperlev; s64 blkno, lblkno; int budmin; /* allocation request should not be for more than the * allocation group size. */ if (l2nb > bmp->db_agl2size) { jfs_error(bmp->db_ipbmap->i_sb, "dbAllocAG: allocation request is larger than the " "allocation group size"); return -EIO; } /* determine the starting block number of the allocation * group. */ blkno = (s64) agno << bmp->db_agl2size; /* check if the allocation group size is the minimum allocation * group size or if the allocation group is completely free. if * the allocation group size is the minimum size of BPERDMAP (i.e. * 1 dmap), there is no need to search the dmap control page (below) * that fully describes the allocation group since the allocation * group is already fully described by a dmap. in this case, we * just call dbAllocCtl() to search the dmap tree and allocate the * required space if available. * * if the allocation group is completely free, dbAllocCtl() is * also called to allocate the required space. this is done for * two reasons. first, it makes no sense searching the dmap control * pages for free space when we know that free space exists. second, * the dmap control pages may indicate that the allocation group * has no free space if the allocation group is part (not the first * part) of a larger binary buddy system. */ if (bmp->db_agsize == BPERDMAP || bmp->db_agfree[agno] == bmp->db_agsize) { rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); if ((rc == -ENOSPC) && (bmp->db_agfree[agno] == bmp->db_agsize)) { printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n", (unsigned long long) blkno, (unsigned long long) nblocks); jfs_error(bmp->db_ipbmap->i_sb, "dbAllocAG: dbAllocCtl failed in free AG"); } return (rc); } /* the buffer for the dmap control page that fully describes the * allocation group. */ lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, bmp->db_aglevel); mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) return -EIO; dcp = (struct dmapctl *) mp->data; budmin = dcp->budmin; if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) { jfs_error(bmp->db_ipbmap->i_sb, "dbAllocAG: Corrupt dmapctl page"); release_metapage(mp); return -EIO; } /* search the subtree(s) of the dmap control page that describes * the allocation group, looking for sufficient free space. to begin, * determine how many allocation groups are represented in a dmap * control page at the control page level (i.e. L0, L1, L2) that * fully describes an allocation group. next, determine the starting * tree index of this allocation group within the control page. */ agperlev = (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth; ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); /* dmap control page trees fan-out by 4 and a single allocation * group may be described by 1 or 2 subtrees within the ag level * dmap control page, depending upon the ag size. examine the ag's * subtrees for sufficient free space, starting with the leftmost * subtree. */ for (i = 0; i < bmp->db_agwidth; i++, ti++) { /* is there sufficient free space ? */ if (l2nb > dcp->stree[ti]) continue; /* sufficient free space found in a subtree. now search down * the subtree to find the leftmost leaf that describes this * free space. */ for (k = bmp->db_agheight; k > 0; k--) { for (n = 0, m = (ti << 2) + 1; n < 4; n++) { if (l2nb <= dcp->stree[m + n]) { ti = m + n; break; } } if (n == 4) { jfs_error(bmp->db_ipbmap->i_sb, "dbAllocAG: failed descending stree"); release_metapage(mp); return -EIO; } } /* determine the block number within the file system * that corresponds to this leaf. */ if (bmp->db_aglevel == 2) blkno = 0; else if (bmp->db_aglevel == 1) blkno &= ~(MAXL1SIZE - 1); else /* bmp->db_aglevel == 0 */ blkno &= ~(MAXL0SIZE - 1); blkno += ((s64) (ti - le32_to_cpu(dcp->leafidx))) << budmin; /* release the buffer in preparation for going down * the next level of dmap control pages. */ release_metapage(mp); /* check if we need to continue to search down the lower * level dmap control pages. we need to if the number of * blocks required is less than maximum number of blocks * described at the next lower level. */ if (l2nb < budmin) { /* search the lower level dmap control pages to get * the starting block number of the dmap that * contains or starts off the free space. */ if ((rc = dbFindCtl(bmp, l2nb, bmp->db_aglevel - 1, &blkno))) { if (rc == -ENOSPC) { jfs_error(bmp->db_ipbmap->i_sb, "dbAllocAG: control page " "inconsistent"); return -EIO; } return (rc); } } /* allocate the blocks. */ rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); if (rc == -ENOSPC) { jfs_error(bmp->db_ipbmap->i_sb, "dbAllocAG: unable to allocate blocks"); rc = -EIO; } return (rc); } /* no space in the allocation group. release the buffer and * return -ENOSPC. */ release_metapage(mp); return -ENOSPC; } /* * NAME: dbAllocAny() * * FUNCTION: attempt to allocate the specified number of contiguous * free blocks anywhere in the file system. * * dbAllocAny() attempts to find the sufficient free space by * searching down the dmap control pages, starting with the * highest level (i.e. L0, L1, L2) control page. if free space * large enough to satisfy the desired free space is found, the * desired free space is allocated. * * PARAMETERS: * bmp - pointer to bmap descriptor * nblocks - actual number of contiguous free blocks desired. * l2nb - log2 number of contiguous free blocks desired. * results - on successful return, set to the starting block number * of the newly allocated range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IWRITE_LOCK(ipbmap) held on entry/exit; */ static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results) { int rc; s64 blkno = 0; /* starting with the top level dmap control page, search * down the dmap control levels for sufficient free space. * if free space is found, dbFindCtl() returns the starting * block number of the dmap that contains or starts off the * range of free space. */ if ((rc = dbFindCtl(bmp, l2nb, bmp->db_maxlevel, &blkno))) return (rc); /* allocate the blocks. */ rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); if (rc == -ENOSPC) { jfs_error(bmp->db_ipbmap->i_sb, "dbAllocAny: unable to allocate blocks"); return -EIO; } return (rc); } /* * NAME: dbFindCtl() * * FUNCTION: starting at a specified dmap control page level and block * number, search down the dmap control levels for a range of * contiguous free blocks large enough to satisfy an allocation * request for the specified number of free blocks. * * if sufficient contiguous free blocks are found, this routine * returns the starting block number within a dmap page that * contains or starts a range of contiqious free blocks that * is sufficient in size. * * PARAMETERS: * bmp - pointer to bmap descriptor * level - starting dmap control page level. * l2nb - log2 number of contiguous free blocks desired. * *blkno - on entry, starting block number for conducting the search. * on successful return, the first block within a dmap page * that contains or starts a range of contiguous free blocks. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IWRITE_LOCK(ipbmap) held on entry/exit; */ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno) { int rc, leafidx, lev; s64 b, lblkno; struct dmapctl *dcp; int budmin; struct metapage *mp; /* starting at the specified dmap control page level and block * number, search down the dmap control levels for the starting * block number of a dmap page that contains or starts off * sufficient free blocks. */ for (lev = level, b = *blkno; lev >= 0; lev--) { /* get the buffer of the dmap control page for the block * number and level (i.e. L0, L1, L2). */ lblkno = BLKTOCTL(b, bmp->db_l2nbperpage, lev); mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) return -EIO; dcp = (struct dmapctl *) mp->data; budmin = dcp->budmin; if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) { jfs_error(bmp->db_ipbmap->i_sb, "dbFindCtl: Corrupt dmapctl page"); release_metapage(mp); return -EIO; } /* search the tree within the dmap control page for * sufficient free space. if sufficient free space is found, * dbFindLeaf() returns the index of the leaf at which * free space was found. */ rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx); /* release the buffer. */ release_metapage(mp); /* space found ? */ if (rc) { if (lev != level) { jfs_error(bmp->db_ipbmap->i_sb, "dbFindCtl: dmap inconsistent"); return -EIO; } return -ENOSPC; } /* adjust the block number to reflect the location within * the dmap control page (i.e. the leaf) at which free * space was found. */ b += (((s64) leafidx) << budmin); /* we stop the search at this dmap control page level if * the number of blocks required is greater than or equal * to the maximum number of blocks described at the next * (lower) level. */ if (l2nb >= budmin) break; } *blkno = b; return (0); } /* * NAME: dbAllocCtl() * * FUNCTION: attempt to allocate a specified number of contiguous * blocks starting within a specific dmap. * * this routine is called by higher level routines that search * the dmap control pages above the actual dmaps for contiguous * free space. the result of successful searches by these * routines are the starting block numbers within dmaps, with * the dmaps themselves containing the desired contiguous free * space or starting a contiguous free space of desired size * that is made up of the blocks of one or more dmaps. these * calls should not fail due to insufficent resources. * * this routine is called in some cases where it is not known * whether it will fail due to insufficient resources. more * specifically, this occurs when allocating from an allocation * group whose size is equal to the number of blocks per dmap. * in this case, the dmap control pages are not examined prior * to calling this routine (to save pathlength) and the call * might fail. * * for a request size that fits within a dmap, this routine relies * upon the dmap's dmtree to find the requested contiguous free * space. for request sizes that are larger than a dmap, the * requested free space will start at the first block of the * first dmap (i.e. blkno). * * PARAMETERS: * bmp - pointer to bmap descriptor * nblocks - actual number of contiguous free blocks to allocate. * l2nb - log2 number of contiguous free blocks to allocate. * blkno - starting block number of the dmap to start the allocation * from. * results - on successful return, set to the starting block number * of the newly allocated range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IWRITE_LOCK(ipbmap) held on entry/exit; */ static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results) { int rc, nb; s64 b, lblkno, n; struct metapage *mp; struct dmap *dp; /* check if the allocation request is confined to a single dmap. */ if (l2nb <= L2BPERDMAP) { /* get the buffer for the dmap. */ lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) return -EIO; dp = (struct dmap *) mp->data; /* try to allocate the blocks. */ rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results); if (rc == 0) mark_metapage_dirty(mp); release_metapage(mp); return (rc); } /* allocation request involving multiple dmaps. it must start on * a dmap boundary. */ assert((blkno & (BPERDMAP - 1)) == 0); /* allocate the blocks dmap by dmap. */ for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) { /* get the buffer for the dmap. */ lblkno = BLKTODMAP(b, bmp->db_l2nbperpage); mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) { rc = -EIO; goto backout; } dp = (struct dmap *) mp->data; /* the dmap better be all free. */ if (dp->tree.stree[ROOT] != L2BPERDMAP) { release_metapage(mp); jfs_error(bmp->db_ipbmap->i_sb, "dbAllocCtl: the dmap is not all free"); rc = -EIO; goto backout; } /* determine how many blocks to allocate from this dmap. */ nb = min(n, (s64)BPERDMAP); /* allocate the blocks from the dmap. */ if ((rc = dbAllocDmap(bmp, dp, b, nb))) { release_metapage(mp); goto backout; } /* write the buffer. */ write_metapage(mp); } /* set the results (starting block number) and return. */ *results = blkno; return (0); /* something failed in handling an allocation request involving * multiple dmaps. we'll try to clean up by backing out any * allocation that has already happened for this request. if * we fail in backing out the allocation, we'll mark the file * system to indicate that blocks have been leaked. */ backout: /* try to backout the allocations dmap by dmap. */ for (n = nblocks - n, b = blkno; n > 0; n -= BPERDMAP, b += BPERDMAP) { /* get the buffer for this dmap. */ lblkno = BLKTODMAP(b, bmp->db_l2nbperpage); mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) { /* could not back out. mark the file system * to indicate that we have leaked blocks. */ jfs_error(bmp->db_ipbmap->i_sb, "dbAllocCtl: I/O Error: Block Leakage."); continue; } dp = (struct dmap *) mp->data; /* free the blocks is this dmap. */ if (dbFreeDmap(bmp, dp, b, BPERDMAP)) { /* could not back out. mark the file system * to indicate that we have leaked blocks. */ release_metapage(mp); jfs_error(bmp->db_ipbmap->i_sb, "dbAllocCtl: Block Leakage."); continue; } /* write the buffer. */ write_metapage(mp); } return (rc); } /* * NAME: dbAllocDmapLev() * * FUNCTION: attempt to allocate a specified number of contiguous blocks * from a specified dmap. * * this routine checks if the contiguous blocks are available. * if so, nblocks of blocks are allocated; otherwise, ENOSPC is * returned. * * PARAMETERS: * mp - pointer to bmap descriptor * dp - pointer to dmap to attempt to allocate blocks from. * l2nb - log2 number of contiguous block desired. * nblocks - actual number of contiguous block desired. * results - on successful return, set to the starting block number * of the newly allocated range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or * IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit; */ static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks, int l2nb, s64 * results) { s64 blkno; int leafidx, rc; /* can't be more than a dmaps worth of blocks */ assert(l2nb <= L2BPERDMAP); /* search the tree within the dmap page for sufficient * free space. if sufficient free space is found, dbFindLeaf() * returns the index of the leaf at which free space was found. */ if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx)) return -ENOSPC; /* determine the block number within the file system corresponding * to the leaf at which free space was found. */ blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD); /* if not all bits of the dmap word are free, get the starting * bit number within the dmap word of the required string of free * bits and adjust the block number with this value. */ if (dp->tree.stree[leafidx + LEAFIND] < BUDMIN) blkno += dbFindBits(le32_to_cpu(dp->wmap[leafidx]), l2nb); /* allocate the blocks */ if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0) *results = blkno; return (rc); } /* * NAME: dbAllocDmap() * * FUNCTION: adjust the disk allocation map to reflect the allocation * of a specified block range within a dmap. * * this routine allocates the specified blocks from the dmap * through a call to dbAllocBits(). if the allocation of the * block range causes the maximum string of free blocks within * the dmap to change (i.e. the value of the root of the dmap's * dmtree), this routine will cause this change to be reflected * up through the appropriate levels of the dmap control pages * by a call to dbAdjCtl() for the L0 dmap control page that * covers this dmap. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap to allocate the block range from. * blkno - starting block number of the block to be allocated. * nblocks - number of blocks to be allocated. * * RETURN VALUES: * 0 - success * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; */ static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks) { s8 oldroot; int rc; /* save the current value of the root (i.e. maximum free string) * of the dmap tree. */ oldroot = dp->tree.stree[ROOT]; /* allocate the specified (blocks) bits */ dbAllocBits(bmp, dp, blkno, nblocks); /* if the root has not changed, done. */ if (dp->tree.stree[ROOT] == oldroot) return (0); /* root changed. bubble the change up to the dmap control pages. * if the adjustment of the upper level control pages fails, * backout the bit allocation (thus making everything consistent). */ if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 1, 0))) dbFreeBits(bmp, dp, blkno, nblocks); return (rc); } /* * NAME: dbFreeDmap() * * FUNCTION: adjust the disk allocation map to reflect the allocation * of a specified block range within a dmap. * * this routine frees the specified blocks from the dmap through * a call to dbFreeBits(). if the deallocation of the block range * causes the maximum string of free blocks within the dmap to * change (i.e. the value of the root of the dmap's dmtree), this * routine will cause this change to be reflected up through the * appropriate levels of the dmap control pages by a call to * dbAdjCtl() for the L0 dmap control page that covers this dmap. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap to free the block range from. * blkno - starting block number of the block to be freed. * nblocks - number of blocks to be freed. * * RETURN VALUES: * 0 - success * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; */ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks) { s8 oldroot; int rc = 0, word; /* save the current value of the root (i.e. maximum free string) * of the dmap tree. */ oldroot = dp->tree.stree[ROOT]; /* free the specified (blocks) bits */ rc = dbFreeBits(bmp, dp, blkno, nblocks); /* if error or the root has not changed, done. */ if (rc || (dp->tree.stree[ROOT] == oldroot)) return (rc); /* root changed. bubble the change up to the dmap control pages. * if the adjustment of the upper level control pages fails, * backout the deallocation. */ if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 0, 0))) { word = (blkno & (BPERDMAP - 1)) >> L2DBWORD; /* as part of backing out the deallocation, we will have * to back split the dmap tree if the deallocation caused * the freed blocks to become part of a larger binary buddy * system. */ if (dp->tree.stree[word] == NOFREE) dbBackSplit((dmtree_t *) & dp->tree, word); dbAllocBits(bmp, dp, blkno, nblocks); } return (rc); } /* * NAME: dbAllocBits() * * FUNCTION: allocate a specified block range from a dmap. * * this routine updates the dmap to reflect the working * state allocation of the specified block range. it directly * updates the bits of the working map and causes the adjustment * of the binary buddy system described by the dmap's dmtree * leaves to reflect the bits allocated. it also causes the * dmap's dmtree, as a whole, to reflect the allocated range. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap to allocate bits from. * blkno - starting block number of the bits to be allocated. * nblocks - number of bits to be allocated. * * RETURN VALUES: none * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; */ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks) { int dbitno, word, rembits, nb, nwords, wbitno, nw, agno; dmtree_t *tp = (dmtree_t *) & dp->tree; int size; s8 *leaf; /* pick up a pointer to the leaves of the dmap tree */ leaf = dp->tree.stree + LEAFIND; /* determine the bit number and word within the dmap of the * starting block. */ dbitno = blkno & (BPERDMAP - 1); word = dbitno >> L2DBWORD; /* block range better be within the dmap */ assert(dbitno + nblocks <= BPERDMAP); /* allocate the bits of the dmap's words corresponding to the block * range. not all bits of the first and last words may be contained * within the block range. if this is the case, we'll work against * those words (i.e. partial first and/or last) on an individual basis * (a single pass), allocating the bits of interest by hand and * updating the leaf corresponding to the dmap word. a single pass * will be used for all dmap words fully contained within the * specified range. within this pass, the bits of all fully contained * dmap words will be marked as free in a single shot and the leaves * will be updated. a single leaf may describe the free space of * multiple dmap words, so we may update only a subset of the actual * leaves corresponding to the dmap words of the block range. */ for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { /* determine the bit number within the word and * the number of bits within the word. */ wbitno = dbitno & (DBWORD - 1); nb = min(rembits, DBWORD - wbitno); /* check if only part of a word is to be allocated. */ if (nb < DBWORD) { /* allocate (set to 1) the appropriate bits within * this dmap word. */ dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb) >> wbitno); /* update the leaf for this dmap word. in addition * to setting the leaf value to the binary buddy max * of the updated dmap word, dbSplit() will split * the binary system of the leaves if need be. */ dbSplit(tp, word, BUDMIN, dbMaxBud((u8 *) & dp->wmap[word])); word += 1; } else { /* one or more dmap words are fully contained * within the block range. determine how many * words and allocate (set to 1) the bits of these * words. */ nwords = rembits >> L2DBWORD; memset(&dp->wmap[word], (int) ONES, nwords * 4); /* determine how many bits. */ nb = nwords << L2DBWORD; /* now update the appropriate leaves to reflect * the allocated words. */ for (; nwords > 0; nwords -= nw) { if (leaf[word] < BUDMIN) { jfs_error(bmp->db_ipbmap->i_sb, "dbAllocBits: leaf page " "corrupt"); break; } /* determine what the leaf value should be * updated to as the minimum of the l2 number * of bits being allocated and the l2 number * of bits currently described by this leaf. */ size = min((int)leaf[word], NLSTOL2BSZ(nwords)); /* update the leaf to reflect the allocation. * in addition to setting the leaf value to * NOFREE, dbSplit() will split the binary * system of the leaves to reflect the current * allocation (size). */ dbSplit(tp, word, size, NOFREE); /* get the number of dmap words handled */ nw = BUDSIZE(size, BUDMIN); word += nw; } } } /* update the free count for this dmap */ le32_add_cpu(&dp->nfree, -nblocks); BMAP_LOCK(bmp); /* if this allocation group is completely free, * update the maximum allocation group number if this allocation * group is the new max. */ agno = blkno >> bmp->db_agl2size; if (agno > bmp->db_maxag) bmp->db_maxag = agno; /* update the free count for the allocation group and map */ bmp->db_agfree[agno] -= nblocks; bmp->db_nfree -= nblocks; BMAP_UNLOCK(bmp); } /* * NAME: dbFreeBits() * * FUNCTION: free a specified block range from a dmap. * * this routine updates the dmap to reflect the working * state allocation of the specified block range. it directly * updates the bits of the working map and causes the adjustment * of the binary buddy system described by the dmap's dmtree * leaves to reflect the bits freed. it also causes the dmap's * dmtree, as a whole, to reflect the deallocated range. * * PARAMETERS: * bmp - pointer to bmap descriptor * dp - pointer to dmap to free bits from. * blkno - starting block number of the bits to be freed. * nblocks - number of bits to be freed. * * RETURN VALUES: 0 for success * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; */ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks) { int dbitno, word, rembits, nb, nwords, wbitno, nw, agno; dmtree_t *tp = (dmtree_t *) & dp->tree; int rc = 0; int size; /* determine the bit number and word within the dmap of the * starting block. */ dbitno = blkno & (BPERDMAP - 1); word = dbitno >> L2DBWORD; /* block range better be within the dmap. */ assert(dbitno + nblocks <= BPERDMAP); /* free the bits of the dmaps words corresponding to the block range. * not all bits of the first and last words may be contained within * the block range. if this is the case, we'll work against those * words (i.e. partial first and/or last) on an individual basis * (a single pass), freeing the bits of interest by hand and updating * the leaf corresponding to the dmap word. a single pass will be used * for all dmap words fully contained within the specified range. * within this pass, the bits of all fully contained dmap words will * be marked as free in a single shot and the leaves will be updated. a * single leaf may describe the free space of multiple dmap words, * so we may update only a subset of the actual leaves corresponding * to the dmap words of the block range. * * dbJoin() is used to update leaf values and will join the binary * buddy system of the leaves if the new leaf values indicate this * should be done. */ for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { /* determine the bit number within the word and * the number of bits within the word. */ wbitno = dbitno & (DBWORD - 1); nb = min(rembits, DBWORD - wbitno); /* check if only part of a word is to be freed. */ if (nb < DBWORD) { /* free (zero) the appropriate bits within this * dmap word. */ dp->wmap[word] &= cpu_to_le32(~(ONES << (DBWORD - nb) >> wbitno)); /* update the leaf for this dmap word. */ rc = dbJoin(tp, word, dbMaxBud((u8 *) & dp->wmap[word])); if (rc) return rc; word += 1; } else { /* one or more dmap words are fully contained * within the block range. determine how many * words and free (zero) the bits of these words. */ nwords = rembits >> L2DBWORD; memset(&dp->wmap[word], 0, nwords * 4); /* determine how many bits. */ nb = nwords << L2DBWORD; /* now update the appropriate leaves to reflect * the freed words. */ for (; nwords > 0; nwords -= nw) { /* determine what the leaf value should be * updated to as the minimum of the l2 number * of bits being freed and the l2 (max) number * of bits that can be described by this leaf. */ size = min(LITOL2BSZ (word, L2LPERDMAP, BUDMIN), NLSTOL2BSZ(nwords)); /* update the leaf. */ rc = dbJoin(tp, word, size); if (rc) return rc; /* get the number of dmap words handled. */ nw = BUDSIZE(size, BUDMIN); word += nw; } } } /* update the free count for this dmap. */ le32_add_cpu(&dp->nfree, nblocks); BMAP_LOCK(bmp); /* update the free count for the allocation group and * map. */ agno = blkno >> bmp->db_agl2size; bmp->db_nfree += nblocks; bmp->db_agfree[agno] += nblocks; /* check if this allocation group is not completely free and * if it is currently the maximum (rightmost) allocation group. * if so, establish the new maximum allocation group number by * searching left for the first allocation group with allocation. */ if ((bmp->db_agfree[agno] == bmp->db_agsize && agno == bmp->db_maxag) || (agno == bmp->db_numag - 1 && bmp->db_agfree[agno] == (bmp-> db_mapsize & (BPERDMAP - 1)))) { while (bmp->db_maxag > 0) { bmp->db_maxag -= 1; if (bmp->db_agfree[bmp->db_maxag] != bmp->db_agsize) break; } /* re-establish the allocation group preference if the * current preference is right of the maximum allocation * group. */ if (bmp->db_agpref > bmp->db_maxag) bmp->db_agpref = bmp->db_maxag; } BMAP_UNLOCK(bmp); return 0; } /* * NAME: dbAdjCtl() * * FUNCTION: adjust a dmap control page at a specified level to reflect * the change in a lower level dmap or dmap control page's * maximum string of free blocks (i.e. a change in the root * of the lower level object's dmtree) due to the allocation * or deallocation of a range of blocks with a single dmap. * * on entry, this routine is provided with the new value of * the lower level dmap or dmap control page root and the * starting block number of the block range whose allocation * or deallocation resulted in the root change. this range * is respresented by a single leaf of the current dmapctl * and the leaf will be updated with this value, possibly * causing a binary buddy system within the leaves to be * split or joined. the update may also cause the dmapctl's * dmtree to be updated. * * if the adjustment of the dmap control page, itself, causes its * root to change, this change will be bubbled up to the next dmap * control level by a recursive call to this routine, specifying * the new root value and the next dmap control page level to * be adjusted. * PARAMETERS: * bmp - pointer to bmap descriptor * blkno - the first block of a block range within a dmap. it is * the allocation or deallocation of this block range that * requires the dmap control page to be adjusted. * newval - the new value of the lower level dmap or dmap control * page root. * alloc - 'true' if adjustment is due to an allocation. * level - current level of dmap control page (i.e. L0, L1, L2) to * be adjusted. * * RETURN VALUES: * 0 - success * -EIO - i/o error * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; */ static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level) { struct metapage *mp; s8 oldroot; int oldval; s64 lblkno; struct dmapctl *dcp; int rc, leafno, ti; /* get the buffer for the dmap control page for the specified * block number and control page level. */ lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, level); mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) return -EIO; dcp = (struct dmapctl *) mp->data; if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) { jfs_error(bmp->db_ipbmap->i_sb, "dbAdjCtl: Corrupt dmapctl page"); release_metapage(mp); return -EIO; } /* determine the leaf number corresponding to the block and * the index within the dmap control tree. */ leafno = BLKTOCTLLEAF(blkno, dcp->budmin); ti = leafno + le32_to_cpu(dcp->leafidx); /* save the current leaf value and the current root level (i.e. * maximum l2 free string described by this dmapctl). */ oldval = dcp->stree[ti]; oldroot = dcp->stree[ROOT]; /* check if this is a control page update for an allocation. * if so, update the leaf to reflect the new leaf value using * dbSplit(); otherwise (deallocation), use dbJoin() to update * the leaf with the new value. in addition to updating the * leaf, dbSplit() will also split the binary buddy system of * the leaves, if required, and bubble new values within the * dmapctl tree, if required. similarly, dbJoin() will join * the binary buddy system of leaves and bubble new values up * the dmapctl tree as required by the new leaf value. */ if (alloc) { /* check if we are in the middle of a binary buddy * system. this happens when we are performing the * first allocation out of an allocation group that * is part (not the first part) of a larger binary * buddy system. if we are in the middle, back split * the system prior to calling dbSplit() which assumes * that it is at the front of a binary buddy system. */ if (oldval == NOFREE) { rc = dbBackSplit((dmtree_t *) dcp, leafno); if (rc) return rc; oldval = dcp->stree[ti]; } dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval); } else { rc = dbJoin((dmtree_t *) dcp, leafno, newval); if (rc) return rc; } /* check if the root of the current dmap control page changed due * to the update and if the current dmap control page is not at * the current top level (i.e. L0, L1, L2) of the map. if so (i.e. * root changed and this is not the top level), call this routine * again (recursion) for the next higher level of the mapping to * reflect the change in root for the current dmap control page. */ if (dcp->stree[ROOT] != oldroot) { /* are we below the top level of the map. if so, * bubble the root up to the next higher level. */ if (level < bmp->db_maxlevel) { /* bubble up the new root of this dmap control page to * the next level. */ if ((rc = dbAdjCtl(bmp, blkno, dcp->stree[ROOT], alloc, level + 1))) { /* something went wrong in bubbling up the new * root value, so backout the changes to the * current dmap control page. */ if (alloc) { dbJoin((dmtree_t *) dcp, leafno, oldval); } else { /* the dbJoin() above might have * caused a larger binary buddy system * to form and we may now be in the * middle of it. if this is the case, * back split the buddies. */ if (dcp->stree[ti] == NOFREE) dbBackSplit((dmtree_t *) dcp, leafno); dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, oldval); } /* release the buffer and return the error. */ release_metapage(mp); return (rc); } } else { /* we're at the top level of the map. update * the bmap control page to reflect the size * of the maximum free buddy system. */ assert(level == bmp->db_maxlevel); if (bmp->db_maxfreebud != oldroot) { jfs_error(bmp->db_ipbmap->i_sb, "dbAdjCtl: the maximum free buddy is " "not the old root"); } bmp->db_maxfreebud = dcp->stree[ROOT]; } } /* write the buffer. */ write_metapage(mp); return (0); } /* * NAME: dbSplit() * * FUNCTION: update the leaf of a dmtree with a new value, splitting * the leaf from the binary buddy system of the dmtree's * leaves, as required. * * PARAMETERS: * tp - pointer to the tree containing the leaf. * leafno - the number of the leaf to be updated. * splitsz - the size the binary buddy system starting at the leaf * must be split to, specified as the log2 number of blocks. * newval - the new value for the leaf. * * RETURN VALUES: none * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; */ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval) { int budsz; int cursz; s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx); /* check if the leaf needs to be split. */ if (leaf[leafno] > tp->dmt_budmin) { /* the split occurs by cutting the buddy system in half * at the specified leaf until we reach the specified * size. pick up the starting split size (current size * - 1 in l2) and the corresponding buddy size. */ cursz = leaf[leafno] - 1; budsz = BUDSIZE(cursz, tp->dmt_budmin); /* split until we reach the specified size. */ while (cursz >= splitsz) { /* update the buddy's leaf with its new value. */ dbAdjTree(tp, leafno ^ budsz, cursz); /* on to the next size and buddy. */ cursz -= 1; budsz >>= 1; } } /* adjust the dmap tree to reflect the specified leaf's new * value. */ dbAdjTree(tp, leafno, newval); } /* * NAME: dbBackSplit() * * FUNCTION: back split the binary buddy system of dmtree leaves * that hold a specified leaf until the specified leaf * starts its own binary buddy system. * * the allocators typically perform allocations at the start * of binary buddy systems and dbSplit() is used to accomplish * any required splits. in some cases, however, allocation * may occur in the middle of a binary system and requires a * back split, with the split proceeding out from the middle of * the system (less efficient) rather than the start of the * system (more efficient). the cases in which a back split * is required are rare and are limited to the first allocation * within an allocation group which is a part (not first part) * of a larger binary buddy system and a few exception cases * in which a previous join operation must be backed out. * * PARAMETERS: * tp - pointer to the tree containing the leaf. * leafno - the number of the leaf to be updated. * * RETURN VALUES: none * * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; */ static int dbBackSplit(dmtree_t * tp, int leafno) { int budsz, bud, w, bsz, size; int cursz; s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx); /* leaf should be part (not first part) of a binary * buddy system. */ assert(leaf[leafno] == NOFREE); /* the back split is accomplished by iteratively finding the leaf * that starts the buddy system that contains the specified leaf and * splitting that system in two. this iteration continues until * the specified leaf becomes the start of a buddy system. * * determine maximum possible l2 size for the specified leaf. */ size = LITOL2BSZ(leafno, le32_to_cpu(tp->dmt_l2nleafs), tp->dmt_budmin); /* determine the number of leaves covered by this size. this * is the buddy size that we will start with as we search for * the buddy system that contains the specified leaf. */ budsz = BUDSIZE(size, tp->dmt_budmin); /* back split. */ while (leaf[leafno] == NOFREE) { /* find the leftmost buddy leaf. */ for (w = leafno, bsz = budsz;; bsz <<= 1, w = (w < bud) ? w : bud) { if (bsz >= le32_to_cpu(tp->dmt_nleafs)) { jfs_err("JFS: block map error in dbBackSplit"); return -EIO; } /* determine the buddy. */ bud = w ^ bsz; /* check if this buddy is the start of the system. */ if (leaf[bud] != NOFREE) { /* split the leaf at the start of the * system in two. */ cursz = leaf[bud] - 1; dbSplit(tp, bud, cursz, cursz); break; } } } if (leaf[leafno] != size) { jfs_err("JFS: wrong leaf value in dbBackSplit"); return -EIO; } return 0; } /* * NAME: dbJoin() * * FUNCTION: update the leaf of a dmtree with a new value, joining * the leaf with other leaves of the dmtree into a multi-leaf * binary buddy system, as required. * * PARAMETERS: * tp - pointer to the tree containing the leaf. * leafno - the number of the leaf to be updated. * newval - the new value for the leaf. * * RETURN VALUES: none */ static int dbJoin(dmtree_t * tp, int leafno, int newval) { int budsz, buddy; s8 *leaf; /* can the new leaf value require a join with other leaves ? */ if (newval >= tp->dmt_budmin) { /* pickup a pointer to the leaves of the tree. */ leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx); /* try to join the specified leaf into a large binary * buddy system. the join proceeds by attempting to join * the specified leafno with its buddy (leaf) at new value. * if the join occurs, we attempt to join the left leaf * of the joined buddies with its buddy at new value + 1. * we continue to join until we find a buddy that cannot be * joined (does not have a value equal to the size of the * last join) or until all leaves have been joined into a * single system. * * get the buddy size (number of words covered) of * the new value. */ budsz = BUDSIZE(newval, tp->dmt_budmin); /* try to join. */ while (budsz < le32_to_cpu(tp->dmt_nleafs)) { /* get the buddy leaf. */ buddy = leafno ^ budsz; /* if the leaf's new value is greater than its * buddy's value, we join no more. */ if (newval > leaf[buddy]) break; /* It shouldn't be less */ if (newval < leaf[buddy]) return -EIO; /* check which (leafno or buddy) is the left buddy. * the left buddy gets to claim the blocks resulting * from the join while the right gets to claim none. * the left buddy is also eligible to participate in * a join at the next higher level while the right * is not. * */ if (leafno < buddy) { /* leafno is the left buddy. */ dbAdjTree(tp, buddy, NOFREE); } else { /* buddy is the left buddy and becomes * leafno. */ dbAdjTree(tp, leafno, NOFREE); leafno = buddy; } /* on to try the next join. */ newval += 1; budsz <<= 1; } } /* update the leaf value. */ dbAdjTree(tp, leafno, newval); return 0; } /* * NAME: dbAdjTree() * * FUNCTION: update a leaf of a dmtree with a new value, adjusting * the dmtree, as required, to reflect the new leaf value. * the combination of any buddies must already be done before * this is called. * * PARAMETERS: * tp - pointer to the tree to be adjusted. * leafno - the number of the leaf to be updated. * newval - the new value for the leaf. * * RETURN VALUES: none */ static void dbAdjTree(dmtree_t * tp, int leafno, int newval) { int lp, pp, k; int max; /* pick up the index of the leaf for this leafno. */ lp = leafno + le32_to_cpu(tp->dmt_leafidx); /* is the current value the same as the old value ? if so, * there is nothing to do. */ if (tp->dmt_stree[lp] == newval) return; /* set the new value. */ tp->dmt_stree[lp] = newval; /* bubble the new value up the tree as required. */ for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) { /* get the index of the first leaf of the 4 leaf * group containing the specified leaf (leafno). */ lp = ((lp - 1) & ~0x03) + 1; /* get the index of the parent of this 4 leaf group. */ pp = (lp - 1) >> 2; /* determine the maximum of the 4 leaves. */ max = TREEMAX(&tp->dmt_stree[lp]); /* if the maximum of the 4 is the same as the * parent's value, we're done. */ if (tp->dmt_stree[pp] == max) break; /* parent gets new value. */ tp->dmt_stree[pp] = max; /* parent becomes leaf for next go-round. */ lp = pp; } } /* * NAME: dbFindLeaf() * * FUNCTION: search a dmtree_t for sufficient free blocks, returning * the index of a leaf describing the free blocks if * sufficient free blocks are found. * * the search starts at the top of the dmtree_t tree and * proceeds down the tree to the leftmost leaf with sufficient * free space. * * PARAMETERS: * tp - pointer to the tree to be searched. * l2nb - log2 number of free blocks to search for. * leafidx - return pointer to be set to the index of the leaf * describing at least l2nb free blocks if sufficient * free blocks are found. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient free blocks. */ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx) { int ti, n = 0, k, x = 0; /* first check the root of the tree to see if there is * sufficient free space. */ if (l2nb > tp->dmt_stree[ROOT]) return -ENOSPC; /* sufficient free space available. now search down the tree * starting at the next level for the leftmost leaf that * describes sufficient free space. */ for (k = le32_to_cpu(tp->dmt_height), ti = 1; k > 0; k--, ti = ((ti + n) << 2) + 1) { /* search the four nodes at this level, starting from * the left. */ for (x = ti, n = 0; n < 4; n++) { /* sufficient free space found. move to the next * level (or quit if this is the last level). */ if (l2nb <= tp->dmt_stree[x + n]) break; } /* better have found something since the higher * levels of the tree said it was here. */ assert(n < 4); } /* set the return to the leftmost leaf describing sufficient * free space. */ *leafidx = x + n - le32_to_cpu(tp->dmt_leafidx); return (0); } /* * NAME: dbFindBits() * * FUNCTION: find a specified number of binary buddy free bits within a * dmap bitmap word value. * * this routine searches the bitmap value for (1 << l2nb) free * bits at (1 << l2nb) alignments within the value. * * PARAMETERS: * word - dmap bitmap word value. * l2nb - number of free bits specified as a log2 number. * * RETURN VALUES: * starting bit number of free bits. */ static int dbFindBits(u32 word, int l2nb) { int bitno, nb; u32 mask; /* get the number of bits. */ nb = 1 << l2nb; assert(nb <= DBWORD); /* complement the word so we can use a mask (i.e. 0s represent * free bits) and compute the mask. */ word = ~word; mask = ONES << (DBWORD - nb); /* scan the word for nb free bits at nb alignments. */ for (bitno = 0; mask != 0; bitno += nb, mask >>= nb) { if ((mask & word) == mask) break; } ASSERT(bitno < 32); /* return the bit number. */ return (bitno); } /* * NAME: dbMaxBud(u8 *cp) * * FUNCTION: determine the largest binary buddy string of free * bits within 32-bits of the map. * * PARAMETERS: * cp - pointer to the 32-bit value. * * RETURN VALUES: * largest binary buddy of free bits within a dmap word. */ static int dbMaxBud(u8 * cp) { signed char tmp1, tmp2; /* check if the wmap word is all free. if so, the * free buddy size is BUDMIN. */ if (*((uint *) cp) == 0) return (BUDMIN); /* check if the wmap word is half free. if so, the * free buddy size is BUDMIN-1. */ if (*((u16 *) cp) == 0 || *((u16 *) cp + 1) == 0) return (BUDMIN - 1); /* not all free or half free. determine the free buddy * size thru table lookup using quarters of the wmap word. */ tmp1 = max(budtab[cp[2]], budtab[cp[3]]); tmp2 = max(budtab[cp[0]], budtab[cp[1]]); return (max(tmp1, tmp2)); } /* * NAME: cnttz(uint word) * * FUNCTION: determine the number of trailing zeros within a 32-bit * value. * * PARAMETERS: * value - 32-bit value to be examined. * * RETURN VALUES: * count of trailing zeros */ static int cnttz(u32 word) { int n; for (n = 0; n < 32; n++, word >>= 1) { if (word & 0x01) break; } return (n); } /* * NAME: cntlz(u32 value) * * FUNCTION: determine the number of leading zeros within a 32-bit * value. * * PARAMETERS: * value - 32-bit value to be examined. * * RETURN VALUES: * count of leading zeros */ static int cntlz(u32 value) { int n; for (n = 0; n < 32; n++, value <<= 1) { if (value & HIGHORDER) break; } return (n); } /* * NAME: blkstol2(s64 nb) * * FUNCTION: convert a block count to its log2 value. if the block * count is not a l2 multiple, it is rounded up to the next * larger l2 multiple. * * PARAMETERS: * nb - number of blocks * * RETURN VALUES: * log2 number of blocks */ static int blkstol2(s64 nb) { int l2nb; s64 mask; /* meant to be signed */ mask = (s64) 1 << (64 - 1); /* count the leading bits. */ for (l2nb = 0; l2nb < 64; l2nb++, mask >>= 1) { /* leading bit found. */ if (nb & mask) { /* determine the l2 value. */ l2nb = (64 - 1) - l2nb; /* check if we need to round up. */ if (~mask & nb) l2nb++; return (l2nb); } } assert(0); return 0; /* fix compiler warning */ } /* * NAME: dbAllocBottomUp() * * FUNCTION: alloc the specified block range from the working block * allocation map. * * the blocks will be alloc from the working map one dmap * at a time. * * PARAMETERS: * ip - pointer to in-core inode; * blkno - starting block number to be freed. * nblocks - number of blocks to be freed. * * RETURN VALUES: * 0 - success * -EIO - i/o error */ int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks) { struct metapage *mp; struct dmap *dp; int nb, rc; s64 lblkno, rem; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); /* block to be allocated better be within the mapsize. */ ASSERT(nblocks <= bmp->db_mapsize - blkno); /* * allocate the blocks a dmap at a time. */ mp = NULL; for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) { /* release previous dmap if any */ if (mp) { write_metapage(mp); } /* get the buffer for the current dmap. */ lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); mp = read_metapage(ipbmap, lblkno, PSIZE, 0); if (mp == NULL) { IREAD_UNLOCK(ipbmap); return -EIO; } dp = (struct dmap *) mp->data; /* determine the number of blocks to be allocated from * this dmap. */ nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1))); /* allocate the blocks. */ if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) { release_metapage(mp); IREAD_UNLOCK(ipbmap); return (rc); } } /* write the last buffer. */ write_metapage(mp); IREAD_UNLOCK(ipbmap); return (0); } static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno, int nblocks) { int rc; int dbitno, word, rembits, nb, nwords, wbitno, agno; s8 oldroot, *leaf; struct dmaptree *tp = (struct dmaptree *) & dp->tree; /* save the current value of the root (i.e. maximum free string) * of the dmap tree. */ oldroot = tp->stree[ROOT]; /* pick up a pointer to the leaves of the dmap tree */ leaf = tp->stree + LEAFIND; /* determine the bit number and word within the dmap of the * starting block. */ dbitno = blkno & (BPERDMAP - 1); word = dbitno >> L2DBWORD; /* block range better be within the dmap */ assert(dbitno + nblocks <= BPERDMAP); /* allocate the bits of the dmap's words corresponding to the block * range. not all bits of the first and last words may be contained * within the block range. if this is the case, we'll work against * those words (i.e. partial first and/or last) on an individual basis * (a single pass), allocating the bits of interest by hand and * updating the leaf corresponding to the dmap word. a single pass * will be used for all dmap words fully contained within the * specified range. within this pass, the bits of all fully contained * dmap words will be marked as free in a single shot and the leaves * will be updated. a single leaf may describe the free space of * multiple dmap words, so we may update only a subset of the actual * leaves corresponding to the dmap words of the block range. */ for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { /* determine the bit number within the word and * the number of bits within the word. */ wbitno = dbitno & (DBWORD - 1); nb = min(rembits, DBWORD - wbitno); /* check if only part of a word is to be allocated. */ if (nb < DBWORD) { /* allocate (set to 1) the appropriate bits within * this dmap word. */ dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb) >> wbitno); word++; } else { /* one or more dmap words are fully contained * within the block range. determine how many * words and allocate (set to 1) the bits of these * words. */ nwords = rembits >> L2DBWORD; memset(&dp->wmap[word], (int) ONES, nwords * 4); /* determine how many bits */ nb = nwords << L2DBWORD; word += nwords; } } /* update the free count for this dmap */ le32_add_cpu(&dp->nfree, -nblocks); /* reconstruct summary tree */ dbInitDmapTree(dp); BMAP_LOCK(bmp); /* if this allocation group is completely free, * update the highest active allocation group number * if this allocation group is the new max. */ agno = blkno >> bmp->db_agl2size; if (agno > bmp->db_maxag) bmp->db_maxag = agno; /* update the free count for the allocation group and map */ bmp->db_agfree[agno] -= nblocks; bmp->db_nfree -= nblocks; BMAP_UNLOCK(bmp); /* if the root has not changed, done. */ if (tp->stree[ROOT] == oldroot) return (0); /* root changed. bubble the change up to the dmap control pages. * if the adjustment of the upper level control pages fails, * backout the bit allocation (thus making everything consistent). */ if ((rc = dbAdjCtl(bmp, blkno, tp->stree[ROOT], 1, 0))) dbFreeBits(bmp, dp, blkno, nblocks); return (rc); } /* * NAME: dbExtendFS() * * FUNCTION: extend bmap from blkno for nblocks; * dbExtendFS() updates bmap ready for dbAllocBottomUp(); * * L2 * | * L1---------------------------------L1 * | | * L0---------L0---------L0 L0---------L0---------L0 * | | | | | | * d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,.,dm; * L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm * * <---old---><----------------------------extend-----------------------> */ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks) { struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb); int nbperpage = sbi->nbperpage; int i, i0 = true, j, j0 = true, k, n; s64 newsize; s64 p; struct metapage *mp, *l2mp, *l1mp = NULL, *l0mp = NULL; struct dmapctl *l2dcp, *l1dcp, *l0dcp; struct dmap *dp; s8 *l0leaf, *l1leaf, *l2leaf; struct bmap *bmp = sbi->bmap; int agno, l2agsize, oldl2agsize; s64 ag_rem; newsize = blkno + nblocks; jfs_info("dbExtendFS: blkno:%Ld nblocks:%Ld newsize:%Ld", (long long) blkno, (long long) nblocks, (long long) newsize); /* * initialize bmap control page. * * all the data in bmap control page should exclude * the mkfs hidden dmap page. */ /* update mapsize */ bmp->db_mapsize = newsize; bmp->db_maxlevel = BMAPSZTOLEV(bmp->db_mapsize); /* compute new AG size */ l2agsize = dbGetL2AGSize(newsize); oldl2agsize = bmp->db_agl2size; bmp->db_agl2size = l2agsize; bmp->db_agsize = 1 << l2agsize; /* compute new number of AG */ agno = bmp->db_numag; bmp->db_numag = newsize >> l2agsize; bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0; /* * reconfigure db_agfree[] * from old AG configuration to new AG configuration; * * coalesce contiguous k (newAGSize/oldAGSize) AGs; * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn; * note: new AG size = old AG size * (2**x). */ if (l2agsize == oldl2agsize) goto extend; k = 1 << (l2agsize - oldl2agsize); ag_rem = bmp->db_agfree[0]; /* save agfree[0] */ for (i = 0, n = 0; i < agno; n++) { bmp->db_agfree[n] = 0; /* init collection point */ /* coalesce contiguous k AGs; */ for (j = 0; j < k && i < agno; j++, i++) { /* merge AGi to AGn */ bmp->db_agfree[n] += bmp->db_agfree[i]; } } bmp->db_agfree[0] += ag_rem; /* restore agfree[0] */ for (; n < MAXAG; n++) bmp->db_agfree[n] = 0; /* * update highest active ag number */ bmp->db_maxag = bmp->db_maxag / k; /* * extend bmap * * update bit maps and corresponding level control pages; * global control page db_nfree, db_agfree[agno], db_maxfreebud; */ extend: /* get L2 page */ p = BMAPBLKNO + nbperpage; /* L2 page */ l2mp = read_metapage(ipbmap, p, PSIZE, 0); if (!l2mp) { jfs_error(ipbmap->i_sb, "dbExtendFS: L2 page could not be read"); return -EIO; } l2dcp = (struct dmapctl *) l2mp->data; /* compute start L1 */ k = blkno >> L2MAXL1SIZE; l2leaf = l2dcp->stree + CTLLEAFIND + k; p = BLKTOL1(blkno, sbi->l2nbperpage); /* L1 page */ /* * extend each L1 in L2 */ for (; k < LPERCTL; k++, p += nbperpage) { /* get L1 page */ if (j0) { /* read in L1 page: (blkno & (MAXL1SIZE - 1)) */ l1mp = read_metapage(ipbmap, p, PSIZE, 0); if (l1mp == NULL) goto errout; l1dcp = (struct dmapctl *) l1mp->data; /* compute start L0 */ j = (blkno & (MAXL1SIZE - 1)) >> L2MAXL0SIZE; l1leaf = l1dcp->stree + CTLLEAFIND + j; p = BLKTOL0(blkno, sbi->l2nbperpage); j0 = false; } else { /* assign/init L1 page */ l1mp = get_metapage(ipbmap, p, PSIZE, 0); if (l1mp == NULL) goto errout; l1dcp = (struct dmapctl *) l1mp->data; /* compute start L0 */ j = 0; l1leaf = l1dcp->stree + CTLLEAFIND; p += nbperpage; /* 1st L0 of L1.k */ } /* * extend each L0 in L1 */ for (; j < LPERCTL; j++) { /* get L0 page */ if (i0) { /* read in L0 page: (blkno & (MAXL0SIZE - 1)) */ l0mp = read_metapage(ipbmap, p, PSIZE, 0); if (l0mp == NULL) goto errout; l0dcp = (struct dmapctl *) l0mp->data; /* compute start dmap */ i = (blkno & (MAXL0SIZE - 1)) >> L2BPERDMAP; l0leaf = l0dcp->stree + CTLLEAFIND + i; p = BLKTODMAP(blkno, sbi->l2nbperpage); i0 = false; } else { /* assign/init L0 page */ l0mp = get_metapage(ipbmap, p, PSIZE, 0); if (l0mp == NULL) goto errout; l0dcp = (struct dmapctl *) l0mp->data; /* compute start dmap */ i = 0; l0leaf = l0dcp->stree + CTLLEAFIND; p += nbperpage; /* 1st dmap of L0.j */ } /* * extend each dmap in L0 */ for (; i < LPERCTL; i++) { /* * reconstruct the dmap page, and * initialize corresponding parent L0 leaf */ if ((n = blkno & (BPERDMAP - 1))) { /* read in dmap page: */ mp = read_metapage(ipbmap, p, PSIZE, 0); if (mp == NULL) goto errout; n = min(nblocks, (s64)BPERDMAP - n); } else { /* assign/init dmap page */ mp = read_metapage(ipbmap, p, PSIZE, 0); if (mp == NULL) goto errout; n = min(nblocks, (s64)BPERDMAP); } dp = (struct dmap *) mp->data; *l0leaf = dbInitDmap(dp, blkno, n); bmp->db_nfree += n; agno = le64_to_cpu(dp->start) >> l2agsize; bmp->db_agfree[agno] += n; write_metapage(mp); l0leaf++; p += nbperpage; blkno += n; nblocks -= n; if (nblocks == 0) break; } /* for each dmap in a L0 */ /* * build current L0 page from its leaves, and * initialize corresponding parent L1 leaf */ *l1leaf = dbInitDmapCtl(l0dcp, 0, ++i); write_metapage(l0mp); l0mp = NULL; if (nblocks) l1leaf++; /* continue for next L0 */ else { /* more than 1 L0 ? */ if (j > 0) break; /* build L1 page */ else { /* summarize in global bmap page */ bmp->db_maxfreebud = *l1leaf; release_metapage(l1mp); release_metapage(l2mp); goto finalize; } } } /* for each L0 in a L1 */ /* * build current L1 page from its leaves, and * initialize corresponding parent L2 leaf */ *l2leaf = dbInitDmapCtl(l1dcp, 1, ++j); write_metapage(l1mp); l1mp = NULL; if (nblocks) l2leaf++; /* continue for next L1 */ else { /* more than 1 L1 ? */ if (k > 0) break; /* build L2 page */ else { /* summarize in global bmap page */ bmp->db_maxfreebud = *l2leaf; release_metapage(l2mp); goto finalize; } } } /* for each L1 in a L2 */ jfs_error(ipbmap->i_sb, "dbExtendFS: function has not returned as expected"); errout: if (l0mp) release_metapage(l0mp); if (l1mp) release_metapage(l1mp); release_metapage(l2mp); return -EIO; /* * finalize bmap control page */ finalize: return 0; } /* * dbFinalizeBmap() */ void dbFinalizeBmap(struct inode *ipbmap) { struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; int actags, inactags, l2nl; s64 ag_rem, actfree, inactfree, avgfree; int i, n; /* * finalize bmap control page */ //finalize: /* * compute db_agpref: preferred ag to allocate from * (the leftmost ag with average free space in it); */ //agpref: /* get the number of active ags and inacitve ags */ actags = bmp->db_maxag + 1; inactags = bmp->db_numag - actags; ag_rem = bmp->db_mapsize & (bmp->db_agsize - 1); /* ??? */ /* determine how many blocks are in the inactive allocation * groups. in doing this, we must account for the fact that * the rightmost group might be a partial group (i.e. file * system size is not a multiple of the group size). */ inactfree = (inactags && ag_rem) ? ((inactags - 1) << bmp->db_agl2size) + ag_rem : inactags << bmp->db_agl2size; /* determine how many free blocks are in the active * allocation groups plus the average number of free blocks * within the active ags. */ actfree = bmp->db_nfree - inactfree; avgfree = (u32) actfree / (u32) actags; /* if the preferred allocation group has not average free space. * re-establish the preferred group as the leftmost * group with average free space. */ if (bmp->db_agfree[bmp->db_agpref] < avgfree) { for (bmp->db_agpref = 0; bmp->db_agpref < actags; bmp->db_agpref++) { if (bmp->db_agfree[bmp->db_agpref] >= avgfree) break; } if (bmp->db_agpref >= bmp->db_numag) { jfs_error(ipbmap->i_sb, "cannot find ag with average freespace"); } } /* * compute db_aglevel, db_agheight, db_width, db_agstart: * an ag is covered in aglevel dmapctl summary tree, * at agheight level height (from leaf) with agwidth number of nodes * each, which starts at agstart index node of the smmary tree node * array; */ bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); l2nl = bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); bmp->db_agheight = l2nl >> 1; bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1)); for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0; i--) { bmp->db_agstart += n; n <<= 2; } } /* * NAME: dbInitDmap()/ujfs_idmap_page() * * FUNCTION: initialize working/persistent bitmap of the dmap page * for the specified number of blocks: * * at entry, the bitmaps had been initialized as free (ZEROS); * The number of blocks will only account for the actually * existing blocks. Blocks which don't actually exist in * the aggregate will be marked as allocated (ONES); * * PARAMETERS: * dp - pointer to page of map * nblocks - number of blocks this page * * RETURNS: NONE */ static int dbInitDmap(struct dmap * dp, s64 Blkno, int nblocks) { int blkno, w, b, r, nw, nb, i; /* starting block number within the dmap */ blkno = Blkno & (BPERDMAP - 1); if (blkno == 0) { dp->nblocks = dp->nfree = cpu_to_le32(nblocks); dp->start = cpu_to_le64(Blkno); if (nblocks == BPERDMAP) { memset(&dp->wmap[0], 0, LPERDMAP * 4); memset(&dp->pmap[0], 0, LPERDMAP * 4); goto initTree; } } else { le32_add_cpu(&dp->nblocks, nblocks); le32_add_cpu(&dp->nfree, nblocks); } /* word number containing start block number */ w = blkno >> L2DBWORD; /* * free the bits corresponding to the block range (ZEROS): * note: not all bits of the first and last words may be contained * within the block range. */ for (r = nblocks; r > 0; r -= nb, blkno += nb) { /* number of bits preceding range to be freed in the word */ b = blkno & (DBWORD - 1); /* number of bits to free in the word */ nb = min(r, DBWORD - b); /* is partial word to be freed ? */ if (nb < DBWORD) { /* free (set to 0) from the bitmap word */ dp->wmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb) >> b)); dp->pmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb) >> b)); /* skip the word freed */ w++; } else { /* free (set to 0) contiguous bitmap words */ nw = r >> L2DBWORD; memset(&dp->wmap[w], 0, nw * 4); memset(&dp->pmap[w], 0, nw * 4); /* skip the words freed */ nb = nw << L2DBWORD; w += nw; } } /* * mark bits following the range to be freed (non-existing * blocks) as allocated (ONES) */ if (blkno == BPERDMAP) goto initTree; /* the first word beyond the end of existing blocks */ w = blkno >> L2DBWORD; /* does nblocks fall on a 32-bit boundary ? */ b = blkno & (DBWORD - 1); if (b) { /* mark a partial word allocated */ dp->wmap[w] = dp->pmap[w] = cpu_to_le32(ONES >> b); w++; } /* set the rest of the words in the page to allocated (ONES) */ for (i = w; i < LPERDMAP; i++) dp->pmap[i] = dp->wmap[i] = cpu_to_le32(ONES); /* * init tree */ initTree: return (dbInitDmapTree(dp)); } /* * NAME: dbInitDmapTree()/ujfs_complete_dmap() * * FUNCTION: initialize summary tree of the specified dmap: * * at entry, bitmap of the dmap has been initialized; * * PARAMETERS: * dp - dmap to complete * blkno - starting block number for this dmap * treemax - will be filled in with max free for this dmap * * RETURNS: max free string at the root of the tree */ static int dbInitDmapTree(struct dmap * dp) { struct dmaptree *tp; s8 *cp; int i; /* init fixed info of tree */ tp = &dp->tree; tp->nleafs = cpu_to_le32(LPERDMAP); tp->l2nleafs = cpu_to_le32(L2LPERDMAP); tp->leafidx = cpu_to_le32(LEAFIND); tp->height = cpu_to_le32(4); tp->budmin = BUDMIN; /* init each leaf from corresponding wmap word: * note: leaf is set to NOFREE(-1) if all blocks of corresponding * bitmap word are allocated. */ cp = tp->stree + le32_to_cpu(tp->leafidx); for (i = 0; i < LPERDMAP; i++) *cp++ = dbMaxBud((u8 *) & dp->wmap[i]); /* build the dmap's binary buddy summary tree */ return (dbInitTree(tp)); } /* * NAME: dbInitTree()/ujfs_adjtree() * * FUNCTION: initialize binary buddy summary tree of a dmap or dmapctl. * * at entry, the leaves of the tree has been initialized * from corresponding bitmap word or root of summary tree * of the child control page; * configure binary buddy system at the leaf level, then * bubble up the values of the leaf nodes up the tree. * * PARAMETERS: * cp - Pointer to the root of the tree * l2leaves- Number of leaf nodes as a power of 2 * l2min - Number of blocks that can be covered by a leaf * as a power of 2 * * RETURNS: max free string at the root of the tree */ static int dbInitTree(struct dmaptree * dtp) { int l2max, l2free, bsize, nextb, i; int child, parent, nparent; s8 *tp, *cp, *cp1; tp = dtp->stree; /* Determine the maximum free string possible for the leaves */ l2max = le32_to_cpu(dtp->l2nleafs) + dtp->budmin; /* * configure the leaf levevl into binary buddy system * * Try to combine buddies starting with a buddy size of 1 * (i.e. two leaves). At a buddy size of 1 two buddy leaves * can be combined if both buddies have a maximum free of l2min; * the combination will result in the left-most buddy leaf having * a maximum free of l2min+1. * After processing all buddies for a given size, process buddies * at the next higher buddy size (i.e. current size * 2) and * the next maximum free (current free + 1). * This continues until the maximum possible buddy combination * yields maximum free. */ for (l2free = dtp->budmin, bsize = 1; l2free < l2max; l2free++, bsize = nextb) { /* get next buddy size == current buddy pair size */ nextb = bsize << 1; /* scan each adjacent buddy pair at current buddy size */ for (i = 0, cp = tp + le32_to_cpu(dtp->leafidx); i < le32_to_cpu(dtp->nleafs); i += nextb, cp += nextb) { /* coalesce if both adjacent buddies are max free */ if (*cp == l2free && *(cp + bsize) == l2free) { *cp = l2free + 1; /* left take right */ *(cp + bsize) = -1; /* right give left */ } } } /* * bubble summary information of leaves up the tree. * * Starting at the leaf node level, the four nodes described by * the higher level parent node are compared for a maximum free and * this maximum becomes the value of the parent node. * when all lower level nodes are processed in this fashion then * move up to the next level (parent becomes a lower level node) and * continue the process for that level. */ for (child = le32_to_cpu(dtp->leafidx), nparent = le32_to_cpu(dtp->nleafs) >> 2; nparent > 0; nparent >>= 2, child = parent) { /* get index of 1st node of parent level */ parent = (child - 1) >> 2; /* set the value of the parent node as the maximum * of the four nodes of the current level. */ for (i = 0, cp = tp + child, cp1 = tp + parent; i < nparent; i++, cp += 4, cp1++) *cp1 = TREEMAX(cp); } return (*tp); } /* * dbInitDmapCtl() * * function: initialize dmapctl page */ static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i) { /* start leaf index not covered by range */ s8 *cp; dcp->nleafs = cpu_to_le32(LPERCTL); dcp->l2nleafs = cpu_to_le32(L2LPERCTL); dcp->leafidx = cpu_to_le32(CTLLEAFIND); dcp->height = cpu_to_le32(5); dcp->budmin = L2BPERDMAP + L2LPERCTL * level; /* * initialize the leaves of current level that were not covered * by the specified input block range (i.e. the leaves have no * low level dmapctl or dmap). */ cp = &dcp->stree[CTLLEAFIND + i]; for (; i < LPERCTL; i++) *cp++ = NOFREE; /* build the dmap's binary buddy summary tree */ return (dbInitTree((struct dmaptree *) dcp)); } /* * NAME: dbGetL2AGSize()/ujfs_getagl2size() * * FUNCTION: Determine log2(allocation group size) from aggregate size * * PARAMETERS: * nblocks - Number of blocks in aggregate * * RETURNS: log2(allocation group size) in aggregate blocks */ static int dbGetL2AGSize(s64 nblocks) { s64 sz; s64 m; int l2sz; if (nblocks < BPERDMAP * MAXAG) return (L2BPERDMAP); /* round up aggregate size to power of 2 */ m = ((u64) 1 << (64 - 1)); for (l2sz = 64; l2sz >= 0; l2sz--, m >>= 1) { if (m & nblocks) break; } sz = (s64) 1 << l2sz; if (sz < nblocks) l2sz += 1; /* agsize = roundupSize/max_number_of_ag */ return (l2sz - L2MAXAG); } /* * NAME: dbMapFileSizeToMapSize() * * FUNCTION: compute number of blocks the block allocation map file * can cover from the map file size; * * RETURNS: Number of blocks which can be covered by this block map file; */ /* * maximum number of map pages at each level including control pages */ #define MAXL0PAGES (1 + LPERCTL) #define MAXL1PAGES (1 + LPERCTL * MAXL0PAGES) #define MAXL2PAGES (1 + LPERCTL * MAXL1PAGES) /* * convert number of map pages to the zero origin top dmapctl level */ #define BMAPPGTOLEV(npages) \ (((npages) <= 3 + MAXL0PAGES) ? 0 : \ ((npages) <= 2 + MAXL1PAGES) ? 1 : 2) s64 dbMapFileSizeToMapSize(struct inode * ipbmap) { struct super_block *sb = ipbmap->i_sb; s64 nblocks; s64 npages, ndmaps; int level, i; int complete, factor; nblocks = ipbmap->i_size >> JFS_SBI(sb)->l2bsize; npages = nblocks >> JFS_SBI(sb)->l2nbperpage; level = BMAPPGTOLEV(npages); /* At each level, accumulate the number of dmap pages covered by * the number of full child levels below it; * repeat for the last incomplete child level. */ ndmaps = 0; npages--; /* skip the first global control page */ /* skip higher level control pages above top level covered by map */ npages -= (2 - level); npages--; /* skip top level's control page */ for (i = level; i >= 0; i--) { factor = (i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1); complete = (u32) npages / factor; ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL : ((i == 1) ? LPERCTL : 1)); /* pages in last/incomplete child */ npages = (u32) npages % factor; /* skip incomplete child's level control page */ npages--; } /* convert the number of dmaps into the number of blocks * which can be covered by the dmaps; */ nblocks = ndmaps << L2BPERDMAP; return (nblocks); }
gpl-2.0
eyeballer/LenovoA1-3.0.8
drivers/ata/sata_vsc.c
2531
12506
/* * sata_vsc.c - Vitesse VSC7174 4 port DPA SATA * * Maintained by: Jeremy Higdon @ SGI * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * Copyright 2004 SGI * * Bits from Jeff Garzik, Copyright RedHat, Inc. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * Vitesse hardware documentation presumably available under NDA. * Intel 31244 (same hardware interface) documentation presumably * available from http://developer.intel.com/ * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "sata_vsc" #define DRV_VERSION "2.3" enum { VSC_MMIO_BAR = 0, /* Interrupt register offsets (from chip base address) */ VSC_SATA_INT_STAT_OFFSET = 0x00, VSC_SATA_INT_MASK_OFFSET = 0x04, /* Taskfile registers offsets */ VSC_SATA_TF_CMD_OFFSET = 0x00, VSC_SATA_TF_DATA_OFFSET = 0x00, VSC_SATA_TF_ERROR_OFFSET = 0x04, VSC_SATA_TF_FEATURE_OFFSET = 0x06, VSC_SATA_TF_NSECT_OFFSET = 0x08, VSC_SATA_TF_LBAL_OFFSET = 0x0c, VSC_SATA_TF_LBAM_OFFSET = 0x10, VSC_SATA_TF_LBAH_OFFSET = 0x14, VSC_SATA_TF_DEVICE_OFFSET = 0x18, VSC_SATA_TF_STATUS_OFFSET = 0x1c, VSC_SATA_TF_COMMAND_OFFSET = 0x1d, VSC_SATA_TF_ALTSTATUS_OFFSET = 0x28, VSC_SATA_TF_CTL_OFFSET = 0x29, /* DMA base */ VSC_SATA_UP_DESCRIPTOR_OFFSET = 0x64, VSC_SATA_UP_DATA_BUFFER_OFFSET = 0x6C, VSC_SATA_DMA_CMD_OFFSET = 0x70, /* SCRs base */ VSC_SATA_SCR_STATUS_OFFSET = 0x100, VSC_SATA_SCR_ERROR_OFFSET = 0x104, VSC_SATA_SCR_CONTROL_OFFSET = 0x108, /* Port stride */ VSC_SATA_PORT_OFFSET = 0x200, /* Error interrupt status bit offsets */ VSC_SATA_INT_ERROR_CRC = 0x40, VSC_SATA_INT_ERROR_T = 0x20, VSC_SATA_INT_ERROR_P = 0x10, VSC_SATA_INT_ERROR_R = 0x8, VSC_SATA_INT_ERROR_E = 0x4, VSC_SATA_INT_ERROR_M = 0x2, VSC_SATA_INT_PHY_CHANGE = 0x1, VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC | VSC_SATA_INT_ERROR_T | \ VSC_SATA_INT_ERROR_P | VSC_SATA_INT_ERROR_R | \ VSC_SATA_INT_ERROR_E | VSC_SATA_INT_ERROR_M | \ VSC_SATA_INT_PHY_CHANGE), }; static int vsc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) return -EINVAL; *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4)); return 0; } static int vsc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) return -EINVAL; writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); return 0; } static void vsc_freeze(struct ata_port *ap) { void __iomem *mask_addr; mask_addr = ap->host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_MASK_OFFSET + ap->port_no; writeb(0, mask_addr); } static void vsc_thaw(struct ata_port *ap) { void __iomem *mask_addr; mask_addr = ap->host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_MASK_OFFSET + ap->port_no; writeb(0xff, mask_addr); } static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl) { void __iomem *mask_addr; u8 mask; mask_addr = ap->host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_MASK_OFFSET + ap->port_no; mask = readb(mask_addr); if (ctl & ATA_NIEN) mask |= 0x80; else mask &= 0x7F; writeb(mask, mask_addr); } static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; /* * The only thing the ctl register is used for is SRST. * That is not enabled or disabled via tf_load. * However, if ATA_NIEN is changed, then we need to change * the interrupt register. */ if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) { ap->last_ctl = tf->ctl; vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr); writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr); writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr); writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr); writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr); } else if (is_addr) { writew(tf->feature, ioaddr->feature_addr); writew(tf->nsect, ioaddr->nsect_addr); writew(tf->lbal, ioaddr->lbal_addr); writew(tf->lbam, ioaddr->lbam_addr); writew(tf->lbah, ioaddr->lbah_addr); } if (tf->flags & ATA_TFLAG_DEVICE) writeb(tf->device, ioaddr->device_addr); ata_wait_idle(ap); } static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; u16 nsect, lbal, lbam, lbah, feature; tf->command = ata_sff_check_status(ap); tf->device = readw(ioaddr->device_addr); feature = readw(ioaddr->error_addr); nsect = readw(ioaddr->nsect_addr); lbal = readw(ioaddr->lbal_addr); lbam = readw(ioaddr->lbam_addr); lbah = readw(ioaddr->lbah_addr); tf->feature = feature; tf->nsect = nsect; tf->lbal = lbal; tf->lbam = lbam; tf->lbah = lbah; if (tf->flags & ATA_TFLAG_LBA48) { tf->hob_feature = feature >> 8; tf->hob_nsect = nsect >> 8; tf->hob_lbal = lbal >> 8; tf->hob_lbam = lbam >> 8; tf->hob_lbah = lbah >> 8; } } static inline void vsc_error_intr(u8 port_status, struct ata_port *ap) { if (port_status & (VSC_SATA_INT_PHY_CHANGE | VSC_SATA_INT_ERROR_M)) ata_port_freeze(ap); else ata_port_abort(ap); } static void vsc_port_intr(u8 port_status, struct ata_port *ap) { struct ata_queued_cmd *qc; int handled = 0; if (unlikely(port_status & VSC_SATA_INT_ERROR)) { vsc_error_intr(port_status, ap); return; } qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) handled = ata_bmdma_port_intr(ap, qc); /* We received an interrupt during a polled command, * or some other spurious condition. Interrupt reporting * with this hardware is fairly reliable so it is safe to * simply clear the interrupt */ if (unlikely(!handled)) ap->ops->sff_check_status(ap); } /* * vsc_sata_interrupt * * Read the interrupt register and process for the devices that have * them pending. */ static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; unsigned int i; unsigned int handled = 0; u32 status; status = readl(host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_STAT_OFFSET); if (unlikely(status == 0xffffffff || status == 0)) { if (status) dev_printk(KERN_ERR, host->dev, ": IRQ status == 0xffffffff, " "PCI fault or device removal?\n"); goto out; } spin_lock(&host->lock); for (i = 0; i < host->n_ports; i++) { u8 port_status = (status >> (8 * i)) & 0xff; if (port_status) { vsc_port_intr(port_status, host->ports[i]); handled++; } } spin_unlock(&host->lock); out: return IRQ_RETVAL(handled); } static struct scsi_host_template vsc_sata_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations vsc_sata_ops = { .inherits = &ata_bmdma_port_ops, /* The IRQ handling is not quite standard SFF behaviour so we cannot use the default lost interrupt handler */ .lost_interrupt = ATA_OP_NULL, .sff_tf_load = vsc_sata_tf_load, .sff_tf_read = vsc_sata_tf_read, .freeze = vsc_freeze, .thaw = vsc_thaw, .scr_read = vsc_sata_scr_read, .scr_write = vsc_sata_scr_write, }; static void __devinit vsc_sata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET; port->data_addr = base + VSC_SATA_TF_DATA_OFFSET; port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET; port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET; port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET; port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET; port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET; port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET; port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET; port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET; port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET; port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET; port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET; port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET; port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET; writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET); writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET); } static int __devinit vsc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static const struct ata_port_info pi = { .flags = ATA_FLAG_SATA, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &vsc_sata_ops, }; const struct ata_port_info *ppi[] = { &pi, NULL }; static int printed_version; struct ata_host *host; void __iomem *mmio_base; int i, rc; u8 cls; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); /* allocate host */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4); if (!host) return -ENOMEM; rc = pcim_enable_device(pdev); if (rc) return rc; /* check if we have needed resource mapped */ if (pci_resource_len(pdev, 0) == 0) return -ENODEV; /* map IO regions and initialize host accordingly */ rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); mmio_base = host->iomap[VSC_MMIO_BAR]; for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; unsigned int offset = (i + 1) * VSC_SATA_PORT_OFFSET; vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset); ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio"); ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port"); } /* * Use 32 bit DMA mask, because 64 bit address support is poor. */ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) return rc; /* * Due to a bug in the chip, the default cache line size can't be * used (unless the default is non-zero). */ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls); if (cls == 0x00) pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80); if (pci_enable_msi(pdev) == 0) pci_intx(pdev, 0); /* * Config offset 0x98 is "Extended Control and Status Register 0" * Default value is (1 << 28). All bits except bit 28 are reserved in * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity. * If bit 28 is clear, each port has its own LED. */ pci_write_config_dword(pdev, 0x98, 0); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, vsc_sata_interrupt, IRQF_SHARED, &vsc_sata_sht); } static const struct pci_device_id vsc_sata_pci_tbl[] = { { PCI_VENDOR_ID_VITESSE, 0x7174, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, { PCI_VENDOR_ID_INTEL, 0x3200, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, { } /* terminate list */ }; static struct pci_driver vsc_sata_pci_driver = { .name = DRV_NAME, .id_table = vsc_sata_pci_tbl, .probe = vsc_sata_init_one, .remove = ata_pci_remove_one, }; static int __init vsc_sata_init(void) { return pci_register_driver(&vsc_sata_pci_driver); } static void __exit vsc_sata_exit(void) { pci_unregister_driver(&vsc_sata_pci_driver); } MODULE_AUTHOR("Jeremy Higdon"); MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl); MODULE_VERSION(DRV_VERSION); module_init(vsc_sata_init); module_exit(vsc_sata_exit);
gpl-2.0
TeamWin/android_kernel_samsung_goyave
drivers/media/usb/stk1160/stk1160-ac97.c
2787
4003
/* * STK1160 driver * * Copyright (C) 2012 Ezequiel Garcia * <elezegarcia--a.t--gmail.com> * * Based on Easycap driver by R.M. Thomas * Copyright (C) 2010 R.M. Thomas * <rmthomas--a.t--sciolus.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/ac97_codec.h> #include "stk1160.h" #include "stk1160-reg.h" static struct snd_ac97 *stk1160_ac97; static void stk1160_write_ac97(struct snd_ac97 *ac97, u16 reg, u16 value) { struct stk1160 *dev = ac97->private_data; /* Set codec register address */ stk1160_write_reg(dev, STK1160_AC97_ADDR, reg); /* Set codec command */ stk1160_write_reg(dev, STK1160_AC97_CMD, value & 0xff); stk1160_write_reg(dev, STK1160_AC97_CMD + 1, (value & 0xff00) >> 8); /* * Set command write bit to initiate write operation. * The bit will be cleared when transfer is done. */ stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x8c); } static u16 stk1160_read_ac97(struct snd_ac97 *ac97, u16 reg) { struct stk1160 *dev = ac97->private_data; u8 vall = 0; u8 valh = 0; /* Set codec register address */ stk1160_write_reg(dev, STK1160_AC97_ADDR, reg); /* * Set command read bit to initiate read operation. * The bit will be cleared when transfer is done. */ stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x8b); /* Retrieve register value */ stk1160_read_reg(dev, STK1160_AC97_CMD, &vall); stk1160_read_reg(dev, STK1160_AC97_CMD + 1, &valh); return (valh << 8) | vall; } static void stk1160_reset_ac97(struct snd_ac97 *ac97) { struct stk1160 *dev = ac97->private_data; /* Two-step reset AC97 interface and hardware codec */ stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x94); stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x88); /* Set 16-bit audio data and choose L&R channel*/ stk1160_write_reg(dev, STK1160_AC97CTL_1 + 2, 0x01); } static struct snd_ac97_bus_ops stk1160_ac97_ops = { .read = stk1160_read_ac97, .write = stk1160_write_ac97, .reset = stk1160_reset_ac97, }; int stk1160_ac97_register(struct stk1160 *dev) { struct snd_card *card = NULL; struct snd_ac97_bus *ac97_bus; struct snd_ac97_template ac97_template; int rc; /* * Just want a card to access ac96 controls, * the actual capture interface will be handled by snd-usb-audio */ rc = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, 0, &card); if (rc < 0) return rc; snd_card_set_dev(card, dev->dev); /* TODO: I'm not sure where should I get these names :-( */ snprintf(card->shortname, sizeof(card->shortname), "stk1160-mixer"); snprintf(card->longname, sizeof(card->longname), "stk1160 ac97 codec mixer control"); strncpy(card->driver, dev->dev->driver->name, sizeof(card->driver)); rc = snd_ac97_bus(card, 0, &stk1160_ac97_ops, NULL, &ac97_bus); if (rc) goto err; /* We must set private_data before calling snd_ac97_mixer */ memset(&ac97_template, 0, sizeof(ac97_template)); ac97_template.private_data = dev; ac97_template.scaps = AC97_SCAP_SKIP_MODEM; rc = snd_ac97_mixer(ac97_bus, &ac97_template, &stk1160_ac97); if (rc) goto err; dev->snd_card = card; rc = snd_card_register(card); if (rc) goto err; return 0; err: dev->snd_card = NULL; snd_card_free(card); return rc; } int stk1160_ac97_unregister(struct stk1160 *dev) { struct snd_card *card = dev->snd_card; /* * We need to check usb_device, * because ac97 release attempts to communicate with codec */ if (card && dev->udev) snd_card_free(card); return 0; }
gpl-2.0
dzo/kernel_ville
drivers/input/joystick/as5011.c
3043
9361
/* * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com> * Sponsored by ARMadeus Systems * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Driver for Austria Microsystems joysticks AS5011 * * TODO: * - Power on the chip when open() and power down when close() * - Manage power mode */ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/input/as5011.h> #include <linux/slab.h> #define DRIVER_DESC "Driver for Austria Microsystems AS5011 joystick" #define MODULE_DEVICE_ALIAS "as5011" MODULE_AUTHOR("Fabien Marteau <fabien.marteau@armadeus.com>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* registers */ #define AS5011_CTRL1 0x76 #define AS5011_CTRL2 0x75 #define AS5011_XP 0x43 #define AS5011_XN 0x44 #define AS5011_YP 0x53 #define AS5011_YN 0x54 #define AS5011_X_REG 0x41 #define AS5011_Y_REG 0x42 #define AS5011_X_RES_INT 0x51 #define AS5011_Y_RES_INT 0x52 /* CTRL1 bits */ #define AS5011_CTRL1_LP_PULSED 0x80 #define AS5011_CTRL1_LP_ACTIVE 0x40 #define AS5011_CTRL1_LP_CONTINUE 0x20 #define AS5011_CTRL1_INT_WUP_EN 0x10 #define AS5011_CTRL1_INT_ACT_EN 0x08 #define AS5011_CTRL1_EXT_CLK_EN 0x04 #define AS5011_CTRL1_SOFT_RST 0x02 #define AS5011_CTRL1_DATA_VALID 0x01 /* CTRL2 bits */ #define AS5011_CTRL2_EXT_SAMPLE_EN 0x08 #define AS5011_CTRL2_RC_BIAS_ON 0x04 #define AS5011_CTRL2_INV_SPINNING 0x02 #define AS5011_MAX_AXIS 80 #define AS5011_MIN_AXIS (-80) #define AS5011_FUZZ 8 #define AS5011_FLAT 40 struct as5011_device { struct input_dev *input_dev; struct i2c_client *i2c_client; unsigned int button_gpio; unsigned int button_irq; unsigned int axis_irq; }; static int as5011_i2c_write(struct i2c_client *client, uint8_t aregaddr, uint8_t avalue) { uint8_t data[2] = { aregaddr, avalue }; struct i2c_msg msg = { client->addr, I2C_M_IGNORE_NAK, 2, (uint8_t *)data }; int error; error = i2c_transfer(client->adapter, &msg, 1); return error < 0 ? error : 0; } static int as5011_i2c_read(struct i2c_client *client, uint8_t aregaddr, signed char *value) { uint8_t data[2] = { aregaddr }; struct i2c_msg msg_set[2] = { { client->addr, I2C_M_REV_DIR_ADDR, 1, (uint8_t *)data }, { client->addr, I2C_M_RD | I2C_M_NOSTART, 1, (uint8_t *)data } }; int error; error = i2c_transfer(client->adapter, msg_set, 2); if (error < 0) return error; *value = data[0] & 0x80 ? -1 * (1 + ~data[0]) : data[0]; return 0; } static irqreturn_t as5011_button_interrupt(int irq, void *dev_id) { struct as5011_device *as5011 = dev_id; int val = gpio_get_value_cansleep(as5011->button_gpio); input_report_key(as5011->input_dev, BTN_JOYSTICK, !val); input_sync(as5011->input_dev); return IRQ_HANDLED; } static irqreturn_t as5011_axis_interrupt(int irq, void *dev_id) { struct as5011_device *as5011 = dev_id; int error; signed char x, y; error = as5011_i2c_read(as5011->i2c_client, AS5011_X_RES_INT, &x); if (error < 0) goto out; error = as5011_i2c_read(as5011->i2c_client, AS5011_Y_RES_INT, &y); if (error < 0) goto out; input_report_abs(as5011->input_dev, ABS_X, x); input_report_abs(as5011->input_dev, ABS_Y, y); input_sync(as5011->input_dev); out: return IRQ_HANDLED; } static int __devinit as5011_configure_chip(struct as5011_device *as5011, const struct as5011_platform_data *plat_dat) { struct i2c_client *client = as5011->i2c_client; int error; signed char value; /* chip soft reset */ error = as5011_i2c_write(client, AS5011_CTRL1, AS5011_CTRL1_SOFT_RST); if (error < 0) { dev_err(&client->dev, "Soft reset failed\n"); return error; } mdelay(10); error = as5011_i2c_write(client, AS5011_CTRL1, AS5011_CTRL1_LP_PULSED | AS5011_CTRL1_LP_ACTIVE | AS5011_CTRL1_INT_ACT_EN); if (error < 0) { dev_err(&client->dev, "Power config failed\n"); return error; } error = as5011_i2c_write(client, AS5011_CTRL2, AS5011_CTRL2_INV_SPINNING); if (error < 0) { dev_err(&client->dev, "Can't invert spinning\n"); return error; } /* write threshold */ error = as5011_i2c_write(client, AS5011_XP, plat_dat->xp); if (error < 0) { dev_err(&client->dev, "Can't write threshold\n"); return error; } error = as5011_i2c_write(client, AS5011_XN, plat_dat->xn); if (error < 0) { dev_err(&client->dev, "Can't write threshold\n"); return error; } error = as5011_i2c_write(client, AS5011_YP, plat_dat->yp); if (error < 0) { dev_err(&client->dev, "Can't write threshold\n"); return error; } error = as5011_i2c_write(client, AS5011_YN, plat_dat->yn); if (error < 0) { dev_err(&client->dev, "Can't write threshold\n"); return error; } /* to free irq gpio in chip */ error = as5011_i2c_read(client, AS5011_X_RES_INT, &value); if (error < 0) { dev_err(&client->dev, "Can't read i2c X resolution value\n"); return error; } return 0; } static int __devinit as5011_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct as5011_platform_data *plat_data; struct as5011_device *as5011; struct input_dev *input_dev; int irq; int error; plat_data = client->dev.platform_data; if (!plat_data) return -EINVAL; if (!plat_data->axis_irq) { dev_err(&client->dev, "No axis IRQ?\n"); return -EINVAL; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_PROTOCOL_MANGLING)) { dev_err(&client->dev, "need i2c bus that supports protocol mangling\n"); return -ENODEV; } as5011 = kmalloc(sizeof(struct as5011_device), GFP_KERNEL); input_dev = input_allocate_device(); if (!as5011 || !input_dev) { dev_err(&client->dev, "Can't allocate memory for device structure\n"); error = -ENOMEM; goto err_free_mem; } as5011->i2c_client = client; as5011->input_dev = input_dev; as5011->button_gpio = plat_data->button_gpio; as5011->axis_irq = plat_data->axis_irq; input_dev->name = "Austria Microsystem as5011 joystick"; input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; __set_bit(EV_KEY, input_dev->evbit); __set_bit(EV_ABS, input_dev->evbit); __set_bit(BTN_JOYSTICK, input_dev->keybit); input_set_abs_params(input_dev, ABS_X, AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT); input_set_abs_params(as5011->input_dev, ABS_Y, AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT); error = gpio_request(as5011->button_gpio, "AS5011 button"); if (error < 0) { dev_err(&client->dev, "Failed to request button gpio\n"); goto err_free_mem; } irq = gpio_to_irq(as5011->button_gpio); if (irq < 0) { dev_err(&client->dev, "Failed to get irq number for button gpio\n"); goto err_free_button_gpio; } as5011->button_irq = irq; error = request_threaded_irq(as5011->button_irq, NULL, as5011_button_interrupt, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "as5011_button", as5011); if (error < 0) { dev_err(&client->dev, "Can't allocate button irq %d\n", as5011->button_irq); goto err_free_button_gpio; } error = as5011_configure_chip(as5011, plat_data); if (error) goto err_free_button_irq; error = request_threaded_irq(as5011->axis_irq, NULL, as5011_axis_interrupt, plat_data->axis_irqflags, "as5011_joystick", as5011); if (error) { dev_err(&client->dev, "Can't allocate axis irq %d\n", plat_data->axis_irq); goto err_free_button_irq; } error = input_register_device(as5011->input_dev); if (error) { dev_err(&client->dev, "Failed to register input device\n"); goto err_free_axis_irq; } i2c_set_clientdata(client, as5011); return 0; err_free_axis_irq: free_irq(as5011->axis_irq, as5011); err_free_button_irq: free_irq(as5011->button_irq, as5011); err_free_button_gpio: gpio_free(as5011->button_gpio); err_free_mem: input_free_device(input_dev); kfree(as5011); return error; } static int __devexit as5011_remove(struct i2c_client *client) { struct as5011_device *as5011 = i2c_get_clientdata(client); free_irq(as5011->axis_irq, as5011); free_irq(as5011->button_irq, as5011); gpio_free(as5011->button_gpio); input_unregister_device(as5011->input_dev); kfree(as5011); return 0; } static const struct i2c_device_id as5011_id[] = { { MODULE_DEVICE_ALIAS, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, as5011_id); static struct i2c_driver as5011_driver = { .driver = { .name = "as5011", }, .probe = as5011_probe, .remove = __devexit_p(as5011_remove), .id_table = as5011_id, }; static int __init as5011_init(void) { return i2c_add_driver(&as5011_driver); } module_init(as5011_init); static void __exit as5011_exit(void) { i2c_del_driver(&as5011_driver); } module_exit(as5011_exit);
gpl-2.0
rhtu/linux
fs/hfs/mdb.c
4067
10464
/* * linux/fs/hfs/mdb.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <roman@ardistech.com> * This file may be distributed under the terms of the GNU General Public License. * * This file contains functions for reading/writing the MDB. */ #include <linux/cdrom.h> #include <linux/genhd.h> #include <linux/nls.h> #include <linux/slab.h> #include "hfs_fs.h" #include "btree.h" /*================ File-local data types ================*/ /* * The HFS Master Directory Block (MDB). * * Also known as the Volume Information Block (VIB), this structure is * the HFS equivalent of a superblock. * * Reference: _Inside Macintosh: Files_ pages 2-59 through 2-62 * * modified for HFS Extended */ static int hfs_get_last_session(struct super_block *sb, sector_t *start, sector_t *size) { struct cdrom_multisession ms_info; struct cdrom_tocentry te; int res; /* default values */ *start = 0; *size = sb->s_bdev->bd_inode->i_size >> 9; if (HFS_SB(sb)->session >= 0) { te.cdte_track = HFS_SB(sb)->session; te.cdte_format = CDROM_LBA; res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te); if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) { *start = (sector_t)te.cdte_addr.lba << 2; return 0; } pr_err("invalid session number or type of track\n"); return -EINVAL; } ms_info.addr_format = CDROM_LBA; res = ioctl_by_bdev(sb->s_bdev, CDROMMULTISESSION, (unsigned long)&ms_info); if (!res && ms_info.xa_flag) *start = (sector_t)ms_info.addr.lba << 2; return 0; } /* * hfs_mdb_get() * * Build the in-core MDB for a filesystem, including * the B-trees and the volume bitmap. */ int hfs_mdb_get(struct super_block *sb) { struct buffer_head *bh; struct hfs_mdb *mdb, *mdb2; unsigned int block; char *ptr; int off2, len, size, sect; sector_t part_start, part_size; loff_t off; __be16 attrib; /* set the device driver to 512-byte blocks */ size = sb_min_blocksize(sb, HFS_SECTOR_SIZE); if (!size) return -EINVAL; if (hfs_get_last_session(sb, &part_start, &part_size)) return -EINVAL; while (1) { /* See if this is an HFS filesystem */ bh = sb_bread512(sb, part_start + HFS_MDB_BLK, mdb); if (!bh) goto out; if (mdb->drSigWord == cpu_to_be16(HFS_SUPER_MAGIC)) break; brelse(bh); /* check for a partition block * (should do this only for cdrom/loop though) */ if (hfs_part_find(sb, &part_start, &part_size)) goto out; } HFS_SB(sb)->alloc_blksz = size = be32_to_cpu(mdb->drAlBlkSiz); if (!size || (size & (HFS_SECTOR_SIZE - 1))) { pr_err("bad allocation block size %d\n", size); goto out_bh; } size = min(HFS_SB(sb)->alloc_blksz, (u32)PAGE_SIZE); /* size must be a multiple of 512 */ while (size & (size - 1)) size -= HFS_SECTOR_SIZE; sect = be16_to_cpu(mdb->drAlBlSt) + part_start; /* align block size to first sector */ while (sect & ((size - 1) >> HFS_SECTOR_SIZE_BITS)) size >>= 1; /* align block size to weird alloc size */ while (HFS_SB(sb)->alloc_blksz & (size - 1)) size >>= 1; brelse(bh); if (!sb_set_blocksize(sb, size)) { pr_err("unable to set blocksize to %u\n", size); goto out; } bh = sb_bread512(sb, part_start + HFS_MDB_BLK, mdb); if (!bh) goto out; if (mdb->drSigWord != cpu_to_be16(HFS_SUPER_MAGIC)) goto out_bh; HFS_SB(sb)->mdb_bh = bh; HFS_SB(sb)->mdb = mdb; /* These parameters are read from the MDB, and never written */ HFS_SB(sb)->part_start = part_start; HFS_SB(sb)->fs_ablocks = be16_to_cpu(mdb->drNmAlBlks); HFS_SB(sb)->fs_div = HFS_SB(sb)->alloc_blksz >> sb->s_blocksize_bits; HFS_SB(sb)->clumpablks = be32_to_cpu(mdb->drClpSiz) / HFS_SB(sb)->alloc_blksz; if (!HFS_SB(sb)->clumpablks) HFS_SB(sb)->clumpablks = 1; HFS_SB(sb)->fs_start = (be16_to_cpu(mdb->drAlBlSt) + part_start) >> (sb->s_blocksize_bits - HFS_SECTOR_SIZE_BITS); /* These parameters are read from and written to the MDB */ HFS_SB(sb)->free_ablocks = be16_to_cpu(mdb->drFreeBks); HFS_SB(sb)->next_id = be32_to_cpu(mdb->drNxtCNID); HFS_SB(sb)->root_files = be16_to_cpu(mdb->drNmFls); HFS_SB(sb)->root_dirs = be16_to_cpu(mdb->drNmRtDirs); HFS_SB(sb)->file_count = be32_to_cpu(mdb->drFilCnt); HFS_SB(sb)->folder_count = be32_to_cpu(mdb->drDirCnt); /* TRY to get the alternate (backup) MDB. */ sect = part_start + part_size - 2; bh = sb_bread512(sb, sect, mdb2); if (bh) { if (mdb2->drSigWord == cpu_to_be16(HFS_SUPER_MAGIC)) { HFS_SB(sb)->alt_mdb_bh = bh; HFS_SB(sb)->alt_mdb = mdb2; } else brelse(bh); } if (!HFS_SB(sb)->alt_mdb) { pr_warn("unable to locate alternate MDB\n"); pr_warn("continuing without an alternate MDB\n"); } HFS_SB(sb)->bitmap = (__be32 *)__get_free_pages(GFP_KERNEL, PAGE_SIZE < 8192 ? 1 : 0); if (!HFS_SB(sb)->bitmap) goto out; /* read in the bitmap */ block = be16_to_cpu(mdb->drVBMSt) + part_start; off = (loff_t)block << HFS_SECTOR_SIZE_BITS; size = (HFS_SB(sb)->fs_ablocks + 8) / 8; ptr = (u8 *)HFS_SB(sb)->bitmap; while (size) { bh = sb_bread(sb, off >> sb->s_blocksize_bits); if (!bh) { pr_err("unable to read volume bitmap\n"); goto out; } off2 = off & (sb->s_blocksize - 1); len = min((int)sb->s_blocksize - off2, size); memcpy(ptr, bh->b_data + off2, len); brelse(bh); ptr += len; off += len; size -= len; } HFS_SB(sb)->ext_tree = hfs_btree_open(sb, HFS_EXT_CNID, hfs_ext_keycmp); if (!HFS_SB(sb)->ext_tree) { pr_err("unable to open extent tree\n"); goto out; } HFS_SB(sb)->cat_tree = hfs_btree_open(sb, HFS_CAT_CNID, hfs_cat_keycmp); if (!HFS_SB(sb)->cat_tree) { pr_err("unable to open catalog tree\n"); goto out; } attrib = mdb->drAtrb; if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) { pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. mounting read-only.\n"); sb->s_flags |= MS_RDONLY; } if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) { pr_warn("filesystem is marked locked, mounting read-only.\n"); sb->s_flags |= MS_RDONLY; } if (!(sb->s_flags & MS_RDONLY)) { /* Mark the volume uncleanly unmounted in case we crash */ attrib &= cpu_to_be16(~HFS_SB_ATTRIB_UNMNT); attrib |= cpu_to_be16(HFS_SB_ATTRIB_INCNSTNT); mdb->drAtrb = attrib; be32_add_cpu(&mdb->drWrCnt, 1); mdb->drLsMod = hfs_mtime(); mark_buffer_dirty(HFS_SB(sb)->mdb_bh); sync_dirty_buffer(HFS_SB(sb)->mdb_bh); } return 0; out_bh: brelse(bh); out: hfs_mdb_put(sb); return -EIO; } /* * hfs_mdb_commit() * * Description: * This updates the MDB on disk. * It does not check, if the superblock has been modified, or * if the filesystem has been mounted read-only. It is mainly * called by hfs_sync_fs() and flush_mdb(). * Input Variable(s): * struct hfs_mdb *mdb: Pointer to the hfs MDB * int backup; * Output Variable(s): * NONE * Returns: * void * Preconditions: * 'mdb' points to a "valid" (struct hfs_mdb). * Postconditions: * The HFS MDB and on disk will be updated, by copying the possibly * modified fields from the in memory MDB (in native byte order) to * the disk block buffer. * If 'backup' is non-zero then the alternate MDB is also written * and the function doesn't return until it is actually on disk. */ void hfs_mdb_commit(struct super_block *sb) { struct hfs_mdb *mdb = HFS_SB(sb)->mdb; if (sb->s_flags & MS_RDONLY) return; lock_buffer(HFS_SB(sb)->mdb_bh); if (test_and_clear_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags)) { /* These parameters may have been modified, so write them back */ mdb->drLsMod = hfs_mtime(); mdb->drFreeBks = cpu_to_be16(HFS_SB(sb)->free_ablocks); mdb->drNxtCNID = cpu_to_be32(HFS_SB(sb)->next_id); mdb->drNmFls = cpu_to_be16(HFS_SB(sb)->root_files); mdb->drNmRtDirs = cpu_to_be16(HFS_SB(sb)->root_dirs); mdb->drFilCnt = cpu_to_be32(HFS_SB(sb)->file_count); mdb->drDirCnt = cpu_to_be32(HFS_SB(sb)->folder_count); /* write MDB to disk */ mark_buffer_dirty(HFS_SB(sb)->mdb_bh); } /* write the backup MDB, not returning until it is written. * we only do this when either the catalog or extents overflow * files grow. */ if (test_and_clear_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags) && HFS_SB(sb)->alt_mdb) { hfs_inode_write_fork(HFS_SB(sb)->ext_tree->inode, mdb->drXTExtRec, &mdb->drXTFlSize, NULL); hfs_inode_write_fork(HFS_SB(sb)->cat_tree->inode, mdb->drCTExtRec, &mdb->drCTFlSize, NULL); lock_buffer(HFS_SB(sb)->alt_mdb_bh); memcpy(HFS_SB(sb)->alt_mdb, HFS_SB(sb)->mdb, HFS_SECTOR_SIZE); HFS_SB(sb)->alt_mdb->drAtrb |= cpu_to_be16(HFS_SB_ATTRIB_UNMNT); HFS_SB(sb)->alt_mdb->drAtrb &= cpu_to_be16(~HFS_SB_ATTRIB_INCNSTNT); unlock_buffer(HFS_SB(sb)->alt_mdb_bh); mark_buffer_dirty(HFS_SB(sb)->alt_mdb_bh); sync_dirty_buffer(HFS_SB(sb)->alt_mdb_bh); } if (test_and_clear_bit(HFS_FLG_BITMAP_DIRTY, &HFS_SB(sb)->flags)) { struct buffer_head *bh; sector_t block; char *ptr; int off, size, len; block = be16_to_cpu(HFS_SB(sb)->mdb->drVBMSt) + HFS_SB(sb)->part_start; off = (block << HFS_SECTOR_SIZE_BITS) & (sb->s_blocksize - 1); block >>= sb->s_blocksize_bits - HFS_SECTOR_SIZE_BITS; size = (HFS_SB(sb)->fs_ablocks + 7) / 8; ptr = (u8 *)HFS_SB(sb)->bitmap; while (size) { bh = sb_bread(sb, block); if (!bh) { pr_err("unable to read volume bitmap\n"); break; } len = min((int)sb->s_blocksize - off, size); lock_buffer(bh); memcpy(bh->b_data + off, ptr, len); unlock_buffer(bh); mark_buffer_dirty(bh); brelse(bh); block++; off = 0; ptr += len; size -= len; } } unlock_buffer(HFS_SB(sb)->mdb_bh); } void hfs_mdb_close(struct super_block *sb) { /* update volume attributes */ if (sb->s_flags & MS_RDONLY) return; HFS_SB(sb)->mdb->drAtrb |= cpu_to_be16(HFS_SB_ATTRIB_UNMNT); HFS_SB(sb)->mdb->drAtrb &= cpu_to_be16(~HFS_SB_ATTRIB_INCNSTNT); mark_buffer_dirty(HFS_SB(sb)->mdb_bh); } /* * hfs_mdb_put() * * Release the resources associated with the in-core MDB. */ void hfs_mdb_put(struct super_block *sb) { if (!HFS_SB(sb)) return; /* free the B-trees */ hfs_btree_close(HFS_SB(sb)->ext_tree); hfs_btree_close(HFS_SB(sb)->cat_tree); /* free the buffers holding the primary and alternate MDBs */ brelse(HFS_SB(sb)->mdb_bh); brelse(HFS_SB(sb)->alt_mdb_bh); unload_nls(HFS_SB(sb)->nls_io); unload_nls(HFS_SB(sb)->nls_disk); free_pages((unsigned long)HFS_SB(sb)->bitmap, PAGE_SIZE < 8192 ? 1 : 0); kfree(HFS_SB(sb)); sb->s_fs_info = NULL; }
gpl-2.0
AragaoAnderson/n5x_kernel_google_msm
net/rfkill/rfkill-gpio.c
5091
6166
/* * Copyright (c) 2011, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/rfkill.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/rfkill-gpio.h> enum rfkill_gpio_clk_state { UNSPECIFIED = 0, PWR_ENABLED, PWR_DISABLED }; #define PWR_CLK_SET(_RF, _EN) \ ((_RF)->pwr_clk_enabled = (!(_EN) ? PWR_ENABLED : PWR_DISABLED)) #define PWR_CLK_ENABLED(_RF) ((_RF)->pwr_clk_enabled == PWR_ENABLED) #define PWR_CLK_DISABLED(_RF) ((_RF)->pwr_clk_enabled != PWR_ENABLED) struct rfkill_gpio_data { struct rfkill_gpio_platform_data *pdata; struct rfkill *rfkill_dev; char *reset_name; char *shutdown_name; enum rfkill_gpio_clk_state pwr_clk_enabled; struct clk *pwr_clk; }; static int rfkill_gpio_set_power(void *data, bool blocked) { struct rfkill_gpio_data *rfkill = data; if (blocked) { if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) gpio_direction_output(rfkill->pdata->shutdown_gpio, 0); if (gpio_is_valid(rfkill->pdata->reset_gpio)) gpio_direction_output(rfkill->pdata->reset_gpio, 0); if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill)) clk_disable(rfkill->pwr_clk); } else { if (rfkill->pwr_clk && PWR_CLK_DISABLED(rfkill)) clk_enable(rfkill->pwr_clk); if (gpio_is_valid(rfkill->pdata->reset_gpio)) gpio_direction_output(rfkill->pdata->reset_gpio, 1); if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) gpio_direction_output(rfkill->pdata->shutdown_gpio, 1); } if (rfkill->pwr_clk) PWR_CLK_SET(rfkill, blocked); return 0; } static const struct rfkill_ops rfkill_gpio_ops = { .set_block = rfkill_gpio_set_power, }; static int rfkill_gpio_probe(struct platform_device *pdev) { struct rfkill_gpio_data *rfkill; struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data; int ret = 0; int len = 0; if (!pdata) { pr_warn("%s: No platform data specified\n", __func__); return -EINVAL; } /* make sure at-least one of the GPIO is defined and that * a name is specified for this instance */ if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) && !gpio_is_valid(pdata->shutdown_gpio))) { pr_warn("%s: invalid platform data\n", __func__); return -EINVAL; } rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); if (!rfkill) return -ENOMEM; if (pdata->gpio_runtime_setup) { ret = pdata->gpio_runtime_setup(pdev); if (ret) { pr_warn("%s: can't set up gpio\n", __func__); goto fail_alloc; } } rfkill->pdata = pdata; len = strlen(pdata->name); rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL); if (!rfkill->reset_name) { ret = -ENOMEM; goto fail_alloc; } rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL); if (!rfkill->shutdown_name) { ret = -ENOMEM; goto fail_reset_name; } snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name); snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name); if (pdata->power_clk_name) { rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name); if (IS_ERR(rfkill->pwr_clk)) { pr_warn("%s: can't find pwr_clk.\n", __func__); goto fail_shutdown_name; } } if (gpio_is_valid(pdata->reset_gpio)) { ret = gpio_request(pdata->reset_gpio, rfkill->reset_name); if (ret) { pr_warn("%s: failed to get reset gpio.\n", __func__); goto fail_clock; } } if (gpio_is_valid(pdata->shutdown_gpio)) { ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name); if (ret) { pr_warn("%s: failed to get shutdown gpio.\n", __func__); goto fail_reset; } } rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type, &rfkill_gpio_ops, rfkill); if (!rfkill->rfkill_dev) goto fail_shutdown; ret = rfkill_register(rfkill->rfkill_dev); if (ret < 0) goto fail_rfkill; platform_set_drvdata(pdev, rfkill); dev_info(&pdev->dev, "%s device registered.\n", pdata->name); return 0; fail_rfkill: rfkill_destroy(rfkill->rfkill_dev); fail_shutdown: if (gpio_is_valid(pdata->shutdown_gpio)) gpio_free(pdata->shutdown_gpio); fail_reset: if (gpio_is_valid(pdata->reset_gpio)) gpio_free(pdata->reset_gpio); fail_clock: if (rfkill->pwr_clk) clk_put(rfkill->pwr_clk); fail_shutdown_name: kfree(rfkill->shutdown_name); fail_reset_name: kfree(rfkill->reset_name); fail_alloc: kfree(rfkill); return ret; } static int rfkill_gpio_remove(struct platform_device *pdev) { struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev); struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data; if (pdata->gpio_runtime_close) pdata->gpio_runtime_close(pdev); rfkill_unregister(rfkill->rfkill_dev); rfkill_destroy(rfkill->rfkill_dev); if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) gpio_free(rfkill->pdata->shutdown_gpio); if (gpio_is_valid(rfkill->pdata->reset_gpio)) gpio_free(rfkill->pdata->reset_gpio); if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill)) clk_disable(rfkill->pwr_clk); if (rfkill->pwr_clk) clk_put(rfkill->pwr_clk); kfree(rfkill->shutdown_name); kfree(rfkill->reset_name); kfree(rfkill); return 0; } static struct platform_driver rfkill_gpio_driver = { .probe = rfkill_gpio_probe, .remove = __devexit_p(rfkill_gpio_remove), .driver = { .name = "rfkill_gpio", .owner = THIS_MODULE, }, }; module_platform_driver(rfkill_gpio_driver); MODULE_DESCRIPTION("gpio rfkill"); MODULE_AUTHOR("NVIDIA"); MODULE_LICENSE("GPL");
gpl-2.0
TeamWin/android_kernel_huawei_mt2l03
drivers/block/xd.c
5091
34866
/* * This file contains the driver for an XT hard disk controller * (at least the DTC 5150X) for Linux. * * Author: Pat Mackinlay, pat@it.com.au * Date: 29/09/92 * * Revised: 01/01/93, ... * * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler, * kevinf@agora.rain.com) * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and * Wim Van Dorst. * * Revised: 04/04/94 by Risto Kankkunen * Moved the detection code from xd_init() to xd_geninit() as it needed * interrupts enabled and Linus didn't want to enable them in that first * phase. xd_geninit() is the place to do these kinds of things anyway, * he says. * * Modularized: 04/10/96 by Todd Fries, tfries@umr.edu * * Revised: 13/12/97 by Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl * Fixed some problems with disk initialization and module initiation. * Added support for manual geometry setting (except Seagate controllers) * in form: * xd_geo=<cyl_xda>,<head_xda>,<sec_xda>[,<cyl_xdb>,<head_xdb>,<sec_xdb>] * Recovered DMA access. Abridged messages. Added support for DTC5051CX, * WD1002-27X & XEBEC controllers. Driver uses now some jumper settings. * Extended ioctl() support. * * Bugfix: 15/02/01, Paul G. - inform queue layer of tiny xd_maxsect. * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/genhd.h> #include <linux/hdreg.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/blkpg.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/gfp.h> #include <asm/uaccess.h> #include <asm/dma.h> #include "xd.h" static DEFINE_MUTEX(xd_mutex); static void __init do_xd_setup (int *integers); #ifdef MODULE static int xd[5] = { -1,-1,-1,-1, }; #endif #define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using "nodma" module option */ #define XD_INIT_DISK_DELAY (30) /* 30 ms delay during disk initialization */ /* Above may need to be increased if a problem with the 2nd drive detection (ST11M controller) or resetting a controller (WD) appears */ static XD_INFO xd_info[XD_MAXDRIVES]; /* If you try this driver and find that your card is not detected by the driver at bootup, you need to add your BIOS signature and details to the following list of signatures. A BIOS signature is a string embedded into the first few bytes of your controller's on-board ROM BIOS. To find out what yours is, use something like MS-DOS's DEBUG command. Run DEBUG, and then you can examine your BIOS signature with: d xxxx:0000 where xxxx is the segment of your controller (like C800 or D000 or something). On the ASCII dump at the right, you should be able to see a string mentioning the manufacturer's copyright etc. Add this string into the table below. The parameters in the table are, in order: offset ; this is the offset (in bytes) from the start of your ROM where the signature starts signature ; this is the actual text of the signature xd_?_init_controller ; this is the controller init routine used by your controller xd_?_init_drive ; this is the drive init routine used by your controller The controllers directly supported at the moment are: DTC 5150x, WD 1004A27X, ST11M/R and override. If your controller is made by the same manufacturer as one of these, try using the same init routines as they do. If that doesn't work, your best bet is to use the "override" routines. These routines use a "portable" method of getting the disk's geometry, and may work with your card. If none of these seem to work, try sending me some email and I'll see what I can do <grin>. NOTE: You can now specify your XT controller's parameters from the command line in the form xd=TYPE,IRQ,IO,DMA. The driver should be able to detect your drive's geometry from this info. (eg: xd=0,5,0x320,3 is the "standard"). */ #include <asm/page.h> #define xd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size)) #define xd_dma_mem_free(addr, size) free_pages(addr, get_order(size)) static char *xd_dma_buffer; static XD_SIGNATURE xd_sigs[] __initdata = { { 0x0000,"Override geometry handler",NULL,xd_override_init_drive,"n unknown" }, /* Pat Mackinlay, pat@it.com.au */ { 0x0008,"[BXD06 (C) DTC 17-MAY-1985]",xd_dtc_init_controller,xd_dtc5150cx_init_drive," DTC 5150CX" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */ { 0x000B,"CRD18A Not an IBM rom. (C) Copyright Data Technology Corp. 05/31/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Todd Fries, tfries@umr.edu */ { 0x000B,"CXD23A Not an IBM ROM (C)Copyright Data Technology Corp 12/03/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Pat Mackinlay, pat@it.com.au */ { 0x0008,"07/15/86(C) Copyright 1986 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. 1002-27X" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */ { 0x0008,"06/24/88(C) Copyright 1988 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. WDXT-GEN2" }, /* Dan Newcombe, newcombe@aa.csc.peachnet.edu */ { 0x0015,"SEAGATE ST11 BIOS REVISION",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Salvador Abreu, spa@fct.unl.pt */ { 0x0010,"ST11R BIOS",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Risto Kankkunen, risto.kankkunen@cs.helsinki.fi */ { 0x0010,"ST11 BIOS v1.7",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11R" }, /* Alan Hourihane, alanh@fairlite.demon.co.uk */ { 0x1000,"(c)Copyright 1987 SMS",xd_omti_init_controller,xd_omti_init_drive,"n OMTI 5520" }, /* Dirk Melchers, dirk@merlin.nbg.sub.org */ { 0x0006,"COPYRIGHT XEBEC (C) 1984",xd_xebec_init_controller,xd_xebec_init_drive," XEBEC" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */ { 0x0008,"(C) Copyright 1984 Western Digital Corp", xd_wd_init_controller, xd_wd_init_drive," Western Dig. 1002s-wx2" }, { 0x0008,"(C) Copyright 1986 Western Digital Corporation", xd_wd_init_controller, xd_wd_init_drive," 1986 Western Digital" }, /* jfree@sovereign.org */ }; static unsigned int xd_bases[] __initdata = { 0xC8000, 0xCA000, 0xCC000, 0xCE000, 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000, 0xE0000 }; static DEFINE_SPINLOCK(xd_lock); static struct gendisk *xd_gendisk[2]; static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo); static const struct block_device_operations xd_fops = { .owner = THIS_MODULE, .ioctl = xd_ioctl, .getgeo = xd_getgeo, }; static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int); static u_char xd_drives, xd_irq = 5, xd_dma = 3, xd_maxsectors; static u_char xd_override __initdata = 0, xd_type __initdata = 0; static u_short xd_iobase = 0x320; static int xd_geo[XD_MAXDRIVES*3] __initdata = { 0, }; static volatile int xdc_busy; static struct timer_list xd_watchdog_int; static volatile u_char xd_error; static bool nodma = XD_DONT_USE_DMA; static struct request_queue *xd_queue; /* xd_init: register the block device number and set up pointer tables */ static int __init xd_init(void) { u_char i,controller; unsigned int address; int err; #ifdef MODULE { u_char count = 0; for (i = 4; i > 0; i--) if (((xd[i] = xd[i-1]) >= 0) && !count) count = i; if ((xd[0] = count)) do_xd_setup(xd); } #endif init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog; err = -EBUSY; if (register_blkdev(XT_DISK_MAJOR, "xd")) goto out1; err = -ENOMEM; xd_queue = blk_init_queue(do_xd_request, &xd_lock); if (!xd_queue) goto out1a; if (xd_detect(&controller,&address)) { printk("Detected a%s controller (type %d) at address %06x\n", xd_sigs[controller].name,controller,address); if (!request_region(xd_iobase,4,"xd")) { printk("xd: Ports at 0x%x are not available\n", xd_iobase); goto out2; } if (controller) xd_sigs[controller].init_controller(address); xd_drives = xd_initdrives(xd_sigs[controller].init_drive); printk("Detected %d hard drive%s (using IRQ%d & DMA%d)\n", xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma); } /* * With the drive detected, xd_maxsectors should now be known. * If xd_maxsectors is 0, nothing was detected and we fall through * to return -ENODEV */ if (!xd_dma_buffer && xd_maxsectors) { xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); if (!xd_dma_buffer) { printk(KERN_ERR "xd: Out of memory.\n"); goto out3; } } err = -ENODEV; if (!xd_drives) goto out3; for (i = 0; i < xd_drives; i++) { XD_INFO *p = &xd_info[i]; struct gendisk *disk = alloc_disk(64); if (!disk) goto Enomem; p->unit = i; disk->major = XT_DISK_MAJOR; disk->first_minor = i<<6; sprintf(disk->disk_name, "xd%c", i+'a'); disk->fops = &xd_fops; disk->private_data = p; disk->queue = xd_queue; set_capacity(disk, p->heads * p->cylinders * p->sectors); printk(" %s: CHS=%d/%d/%d\n", disk->disk_name, p->cylinders, p->heads, p->sectors); xd_gendisk[i] = disk; } err = -EBUSY; if (request_irq(xd_irq,xd_interrupt_handler, 0, "XT hard disk", NULL)) { printk("xd: unable to get IRQ%d\n",xd_irq); goto out4; } if (request_dma(xd_dma,"xd")) { printk("xd: unable to get DMA%d\n",xd_dma); goto out5; } /* xd_maxsectors depends on controller - so set after detection */ blk_queue_max_hw_sectors(xd_queue, xd_maxsectors); for (i = 0; i < xd_drives; i++) add_disk(xd_gendisk[i]); return 0; out5: free_irq(xd_irq, NULL); out4: for (i = 0; i < xd_drives; i++) put_disk(xd_gendisk[i]); out3: if (xd_maxsectors) release_region(xd_iobase,4); if (xd_dma_buffer) xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200); out2: blk_cleanup_queue(xd_queue); out1a: unregister_blkdev(XT_DISK_MAJOR, "xd"); out1: return err; Enomem: err = -ENOMEM; while (i--) put_disk(xd_gendisk[i]); goto out3; } /* xd_detect: scan the possible BIOS ROM locations for the signature strings */ static u_char __init xd_detect (u_char *controller, unsigned int *address) { int i, j; if (xd_override) { *controller = xd_type; *address = 0; return(1); } for (i = 0; i < ARRAY_SIZE(xd_bases); i++) { void __iomem *p = ioremap(xd_bases[i], 0x2000); if (!p) continue; for (j = 1; j < ARRAY_SIZE(xd_sigs); j++) { const char *s = xd_sigs[j].string; if (check_signature(p + xd_sigs[j].offset, s, strlen(s))) { *controller = j; xd_type = j; *address = xd_bases[i]; iounmap(p); return 1; } } iounmap(p); } return 0; } /* do_xd_request: handle an incoming request */ static void do_xd_request (struct request_queue * q) { struct request *req; if (xdc_busy) return; req = blk_fetch_request(q); while (req) { unsigned block = blk_rq_pos(req); unsigned count = blk_rq_cur_sectors(req); XD_INFO *disk = req->rq_disk->private_data; int res = -EIO; int retry; if (req->cmd_type != REQ_TYPE_FS) goto done; if (block + count > get_capacity(req->rq_disk)) goto done; for (retry = 0; (retry < XD_RETRIES) && !res; retry++) res = xd_readwrite(rq_data_dir(req), disk, req->buffer, block, count); done: /* wrap up, 0 = success, -errno = fail */ if (!__blk_end_request_cur(req, res)) req = blk_fetch_request(q); } } static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { XD_INFO *p = bdev->bd_disk->private_data; geo->heads = p->heads; geo->sectors = p->sectors; geo->cylinders = p->cylinders; return 0; } /* xd_ioctl: handle device ioctl's */ static int xd_locked_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg) { switch (cmd) { case HDIO_SET_DMA: if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (xdc_busy) return -EBUSY; nodma = !arg; if (nodma && xd_dma_buffer) { xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200); xd_dma_buffer = NULL; } else if (!nodma && !xd_dma_buffer) { xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); if (!xd_dma_buffer) { nodma = XD_DONT_USE_DMA; return -ENOMEM; } } return 0; case HDIO_GET_DMA: return put_user(!nodma, (long __user *) arg); case HDIO_GET_MULTCOUNT: return put_user(xd_maxsectors, (long __user *) arg); default: return -EINVAL; } } static int xd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param) { int ret; mutex_lock(&xd_mutex); ret = xd_locked_ioctl(bdev, mode, cmd, param); mutex_unlock(&xd_mutex); return ret; } /* xd_readwrite: handle a read/write request */ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count) { int drive = p->unit; u_char cmdblk[6],sense[4]; u_short track,cylinder; u_char head,sector,control,mode = PIO_MODE,temp; char **real_buffer; register int i; #ifdef DEBUG_READWRITE printk("xd_readwrite: operation = %s, drive = %d, buffer = 0x%X, block = %d, count = %d\n",operation == READ ? "read" : "write",drive,buffer,block,count); #endif /* DEBUG_READWRITE */ spin_unlock_irq(&xd_lock); control = p->control; if (!xd_dma_buffer) xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); while (count) { temp = count < xd_maxsectors ? count : xd_maxsectors; track = block / p->sectors; head = track % p->heads; cylinder = track / p->heads; sector = block % p->sectors; #ifdef DEBUG_READWRITE printk("xd_readwrite: drive = %d, head = %d, cylinder = %d, sector = %d, count = %d\n",drive,head,cylinder,sector,temp); #endif /* DEBUG_READWRITE */ if (xd_dma_buffer) { mode = xd_setup_dma(operation == READ ? DMA_MODE_READ : DMA_MODE_WRITE,(u_char *)(xd_dma_buffer),temp * 0x200); real_buffer = &xd_dma_buffer; for (i=0; i < (temp * 0x200); i++) xd_dma_buffer[i] = buffer[i]; } else real_buffer = &buffer; xd_build(cmdblk,operation == READ ? CMD_READ : CMD_WRITE,drive,head,cylinder,sector,temp & 0xFF,control); switch (xd_command(cmdblk,mode,(u_char *)(*real_buffer),(u_char *)(*real_buffer),sense,XD_TIMEOUT)) { case 1: printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); xd_recalibrate(drive); spin_lock_irq(&xd_lock); return -EIO; case 2: if (sense[0] & 0x30) { printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); switch ((sense[0] & 0x30) >> 4) { case 0: printk("drive error, code = 0x%X",sense[0] & 0x0F); break; case 1: printk("controller error, code = 0x%X",sense[0] & 0x0F); break; case 2: printk("command error, code = 0x%X",sense[0] & 0x0F); break; case 3: printk("miscellaneous error, code = 0x%X",sense[0] & 0x0F); break; } } if (sense[0] & 0x80) printk(" - CHS = %d/%d/%d\n",((sense[2] & 0xC0) << 2) | sense[3],sense[1] & 0x1F,sense[2] & 0x3F); /* reported drive number = (sense[1] & 0xE0) >> 5 */ else printk(" - no valid disk address\n"); spin_lock_irq(&xd_lock); return -EIO; } if (xd_dma_buffer) for (i=0; i < (temp * 0x200); i++) buffer[i] = xd_dma_buffer[i]; count -= temp, buffer += temp * 0x200, block += temp; } spin_lock_irq(&xd_lock); return 0; } /* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ static void xd_recalibrate (u_char drive) { u_char cmdblk[6]; xd_build(cmdblk,CMD_RECALIBRATE,drive,0,0,0,0,0); if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 8)) printk("xd%c: warning! error recalibrating, controller may be unstable\n", 'a'+drive); } /* xd_interrupt_handler: interrupt service routine */ static irqreturn_t xd_interrupt_handler(int irq, void *dev_id) { if (inb(XD_STATUS) & STAT_INTERRUPT) { /* check if it was our device */ #ifdef DEBUG_OTHER printk("xd_interrupt_handler: interrupt detected\n"); #endif /* DEBUG_OTHER */ outb(0,XD_CONTROL); /* acknowledge interrupt */ wake_up(&xd_wait_int); /* and wake up sleeping processes */ return IRQ_HANDLED; } else printk("xd: unexpected interrupt\n"); return IRQ_NONE; } /* xd_setup_dma: set up the DMA controller for a data transfer */ static u_char xd_setup_dma (u_char mode,u_char *buffer,u_int count) { unsigned long f; if (nodma) return (PIO_MODE); if (((unsigned long) buffer & 0xFFFF0000) != (((unsigned long) buffer + count) & 0xFFFF0000)) { #ifdef DEBUG_OTHER printk("xd_setup_dma: using PIO, transfer overlaps 64k boundary\n"); #endif /* DEBUG_OTHER */ return (PIO_MODE); } f=claim_dma_lock(); disable_dma(xd_dma); clear_dma_ff(xd_dma); set_dma_mode(xd_dma,mode); set_dma_addr(xd_dma, (unsigned long) buffer); set_dma_count(xd_dma,count); release_dma_lock(f); return (DMA_MODE); /* use DMA and INT */ } /* xd_build: put stuff into an array in a format suitable for the controller */ static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control) { cmdblk[0] = command; cmdblk[1] = ((drive & 0x07) << 5) | (head & 0x1F); cmdblk[2] = ((cylinder & 0x300) >> 2) | (sector & 0x3F); cmdblk[3] = cylinder & 0xFF; cmdblk[4] = count; cmdblk[5] = control; return (cmdblk); } static void xd_watchdog (unsigned long unused) { xd_error = 1; wake_up(&xd_wait_int); } /* xd_waitport: waits until port & mask == flags or a timeout occurs. return 1 for a timeout */ static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout) { u_long expiry = jiffies + timeout; int success; xdc_busy = 1; while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) schedule_timeout_uninterruptible(1); xdc_busy = 0; return (success); } static inline u_int xd_wait_for_IRQ (void) { unsigned long flags; xd_watchdog_int.expires = jiffies + 8 * HZ; add_timer(&xd_watchdog_int); flags=claim_dma_lock(); enable_dma(xd_dma); release_dma_lock(flags); sleep_on(&xd_wait_int); del_timer(&xd_watchdog_int); xdc_busy = 0; flags=claim_dma_lock(); disable_dma(xd_dma); release_dma_lock(flags); if (xd_error) { printk("xd: missed IRQ - command aborted\n"); xd_error = 0; return (1); } return (0); } /* xd_command: handle all data transfers necessary for a single command */ static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout) { u_char cmdblk[6],csb,complete = 0; #ifdef DEBUG_COMMAND printk("xd_command: command = 0x%X, mode = 0x%X, indata = 0x%X, outdata = 0x%X, sense = 0x%X\n",command,mode,indata,outdata,sense); #endif /* DEBUG_COMMAND */ outb(0,XD_SELECT); outb(mode,XD_CONTROL); if (xd_waitport(XD_STATUS,STAT_SELECT,STAT_SELECT,timeout)) return (1); while (!complete) { if (xd_waitport(XD_STATUS,STAT_READY,STAT_READY,timeout)) return (1); switch (inb(XD_STATUS) & (STAT_COMMAND | STAT_INPUT)) { case 0: if (mode == DMA_MODE) { if (xd_wait_for_IRQ()) return (1); } else outb(outdata ? *outdata++ : 0,XD_DATA); break; case STAT_INPUT: if (mode == DMA_MODE) { if (xd_wait_for_IRQ()) return (1); } else if (indata) *indata++ = inb(XD_DATA); else inb(XD_DATA); break; case STAT_COMMAND: outb(command ? *command++ : 0,XD_DATA); break; case STAT_COMMAND | STAT_INPUT: complete = 1; break; } } csb = inb(XD_DATA); if (xd_waitport(XD_STATUS,0,STAT_SELECT,timeout)) /* wait until deselected */ return (1); if (csb & CSB_ERROR) { /* read sense data if error */ xd_build(cmdblk,CMD_SENSE,(csb & CSB_LUN) >> 5,0,0,0,0,0); if (xd_command(cmdblk,0,sense,NULL,NULL,XD_TIMEOUT)) printk("xd: warning! sense command failed!\n"); } #ifdef DEBUG_COMMAND printk("xd_command: completed with csb = 0x%X\n",csb); #endif /* DEBUG_COMMAND */ return (csb & CSB_ERROR); } static u_char __init xd_initdrives (void (*init_drive)(u_char drive)) { u_char cmdblk[6],i,count = 0; for (i = 0; i < XD_MAXDRIVES; i++) { xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0); if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) { msleep_interruptible(XD_INIT_DISK_DELAY); init_drive(count); count++; msleep_interruptible(XD_INIT_DISK_DELAY); } } return (count); } static void __init xd_manual_geo_set (u_char drive) { xd_info[drive].heads = (u_char)(xd_geo[3 * drive + 1]); xd_info[drive].cylinders = (u_short)(xd_geo[3 * drive]); xd_info[drive].sectors = (u_char)(xd_geo[3 * drive + 2]); } static void __init xd_dtc_init_controller (unsigned int address) { switch (address) { case 0x00000: case 0xC8000: break; /*initial: 0x320 */ case 0xCA000: xd_iobase = 0x324; case 0xD0000: /*5150CX*/ case 0xD8000: break; /*5150CX & 5150XL*/ default: printk("xd_dtc_init_controller: unsupported BIOS address %06x\n",address); break; } xd_maxsectors = 0x01; /* my card seems to have trouble doing multi-block transfers? */ outb(0,XD_RESET); /* reset the controller */ } static void __init xd_dtc5150cx_init_drive (u_char drive) { /* values from controller's BIOS - BIOS chip may be removed */ static u_short geometry_table[][4] = { {0x200,8,0x200,0x100}, {0x267,2,0x267,0x267}, {0x264,4,0x264,0x80}, {0x132,4,0x132,0x0}, {0x132,2,0x80, 0x132}, {0x177,8,0x177,0x0}, {0x132,8,0x84, 0x0}, {}, /* not used */ {0x132,6,0x80, 0x100}, {0x200,6,0x100,0x100}, {0x264,2,0x264,0x80}, {0x280,4,0x280,0x100}, {0x2B9,3,0x2B9,0x2B9}, {0x2B9,5,0x2B9,0x2B9}, {0x280,6,0x280,0x100}, {0x132,4,0x132,0x0}}; u_char n; n = inb(XD_JUMPER); n = (drive ? n : (n >> 2)) & 0x33; n = (n | (n >> 2)) & 0x0F; if (xd_geo[3*drive]) xd_manual_geo_set(drive); else if (n != 7) { xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */ xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */ xd_info[drive].sectors = 17; /* sectors */ #if 0 xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */ xd_info[drive].precomp = geometry_table[n][3] /* write precomp */ xd_info[drive].ecc = 0x0B; /* ecc length */ #endif /* 0 */ } else { printk("xd%c: undetermined drive geometry\n",'a'+drive); return; } xd_info[drive].control = 5; /* control byte */ xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B); xd_recalibrate(drive); } static void __init xd_dtc_init_drive (u_char drive) { u_char cmdblk[6],buf[64]; xd_build(cmdblk,CMD_DTCGETGEOM,drive,0,0,0,0,0); if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) { xd_info[drive].heads = buf[0x0A]; /* heads */ xd_info[drive].cylinders = ((u_short *) (buf))[0x04]; /* cylinders */ xd_info[drive].sectors = 17; /* sectors */ if (xd_geo[3*drive]) xd_manual_geo_set(drive); #if 0 xd_info[drive].rwrite = ((u_short *) (buf + 1))[0x05]; /* reduced write */ xd_info[drive].precomp = ((u_short *) (buf + 1))[0x06]; /* write precomp */ xd_info[drive].ecc = buf[0x0F]; /* ecc length */ #endif /* 0 */ xd_info[drive].control = 0; /* control byte */ xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,((u_short *) (buf + 1))[0x05],((u_short *) (buf + 1))[0x06],buf[0x0F]); xd_build(cmdblk,CMD_DTCSETSTEP,drive,0,0,0,0,7); if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2)) printk("xd_dtc_init_drive: error setting step rate for xd%c\n", 'a'+drive); } else printk("xd_dtc_init_drive: error reading geometry for xd%c\n", 'a'+drive); } static void __init xd_wd_init_controller (unsigned int address) { switch (address) { case 0x00000: case 0xC8000: break; /*initial: 0x320 */ case 0xCA000: xd_iobase = 0x324; break; case 0xCC000: xd_iobase = 0x328; break; case 0xCE000: xd_iobase = 0x32C; break; case 0xD0000: xd_iobase = 0x328; break; /* ? */ case 0xD8000: xd_iobase = 0x32C; break; /* ? */ default: printk("xd_wd_init_controller: unsupported BIOS address %06x\n",address); break; } xd_maxsectors = 0x01; /* this one doesn't wrap properly either... */ outb(0,XD_RESET); /* reset the controller */ msleep(XD_INIT_DISK_DELAY); } static void __init xd_wd_init_drive (u_char drive) { /* values from controller's BIOS - BIOS may be disabled */ static u_short geometry_table[][4] = { {0x264,4,0x1C2,0x1C2}, /* common part */ {0x132,4,0x099,0x0}, {0x267,2,0x1C2,0x1C2}, {0x267,4,0x1C2,0x1C2}, {0x334,6,0x335,0x335}, /* 1004 series RLL */ {0x30E,4,0x30F,0x3DC}, {0x30E,2,0x30F,0x30F}, {0x267,4,0x268,0x268}, {0x3D5,5,0x3D6,0x3D6}, /* 1002 series RLL */ {0x3DB,7,0x3DC,0x3DC}, {0x264,4,0x265,0x265}, {0x267,4,0x268,0x268}}; u_char cmdblk[6],buf[0x200]; u_char n = 0,rll,jumper_state,use_jumper_geo; u_char wd_1002 = (xd_sigs[xd_type].string[7] == '6'); jumper_state = ~(inb(0x322)); if (jumper_state & 0x40) xd_irq = 9; rll = (jumper_state & 0x30) ? (0x04 << wd_1002) : 0; xd_build(cmdblk,CMD_READ,drive,0,0,0,1,0); if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) { xd_info[drive].heads = buf[0x1AF]; /* heads */ xd_info[drive].cylinders = ((u_short *) (buf + 1))[0xD6]; /* cylinders */ xd_info[drive].sectors = 17; /* sectors */ if (xd_geo[3*drive]) xd_manual_geo_set(drive); #if 0 xd_info[drive].rwrite = ((u_short *) (buf))[0xD8]; /* reduced write */ xd_info[drive].wprecomp = ((u_short *) (buf))[0xDA]; /* write precomp */ xd_info[drive].ecc = buf[0x1B4]; /* ecc length */ #endif /* 0 */ xd_info[drive].control = buf[0x1B5]; /* control byte */ use_jumper_geo = !(xd_info[drive].heads) || !(xd_info[drive].cylinders); if (xd_geo[3*drive]) { xd_manual_geo_set(drive); xd_info[drive].control = rll ? 7 : 5; } else if (use_jumper_geo) { n = (((jumper_state & 0x0F) >> (drive << 1)) & 0x03) | rll; xd_info[drive].cylinders = geometry_table[n][0]; xd_info[drive].heads = (u_char)(geometry_table[n][1]); xd_info[drive].control = rll ? 7 : 5; #if 0 xd_info[drive].rwrite = geometry_table[n][2]; xd_info[drive].wprecomp = geometry_table[n][3]; xd_info[drive].ecc = 0x0B; #endif /* 0 */ } if (!wd_1002) { if (use_jumper_geo) xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders, geometry_table[n][2],geometry_table[n][3],0x0B); else xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders, ((u_short *) (buf))[0xD8],((u_short *) (buf))[0xDA],buf[0x1B4]); } /* 1002 based RLL controller requests converted addressing, but reports physical (physical 26 sec., logical 17 sec.) 1004 based ???? */ if (rll & wd_1002) { if ((xd_info[drive].cylinders *= 26, xd_info[drive].cylinders /= 17) > 1023) xd_info[drive].cylinders = 1023; /* 1024 ? */ #if 0 xd_info[drive].rwrite *= 26; xd_info[drive].rwrite /= 17; xd_info[drive].wprecomp *= 26 xd_info[drive].wprecomp /= 17; #endif /* 0 */ } } else printk("xd_wd_init_drive: error reading geometry for xd%c\n",'a'+drive); } static void __init xd_seagate_init_controller (unsigned int address) { switch (address) { case 0x00000: case 0xC8000: break; /*initial: 0x320 */ case 0xD0000: xd_iobase = 0x324; break; case 0xD8000: xd_iobase = 0x328; break; case 0xE0000: xd_iobase = 0x32C; break; default: printk("xd_seagate_init_controller: unsupported BIOS address %06x\n",address); break; } xd_maxsectors = 0x40; outb(0,XD_RESET); /* reset the controller */ } static void __init xd_seagate_init_drive (u_char drive) { u_char cmdblk[6],buf[0x200]; xd_build(cmdblk,CMD_ST11GETGEOM,drive,0,0,0,1,0); if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) { xd_info[drive].heads = buf[0x04]; /* heads */ xd_info[drive].cylinders = (buf[0x02] << 8) | buf[0x03]; /* cylinders */ xd_info[drive].sectors = buf[0x05]; /* sectors */ xd_info[drive].control = 0; /* control byte */ } else printk("xd_seagate_init_drive: error reading geometry from xd%c\n", 'a'+drive); } /* Omti support courtesy Dirk Melchers */ static void __init xd_omti_init_controller (unsigned int address) { switch (address) { case 0x00000: case 0xC8000: break; /*initial: 0x320 */ case 0xD0000: xd_iobase = 0x324; break; case 0xD8000: xd_iobase = 0x328; break; case 0xE0000: xd_iobase = 0x32C; break; default: printk("xd_omti_init_controller: unsupported BIOS address %06x\n",address); break; } xd_maxsectors = 0x40; outb(0,XD_RESET); /* reset the controller */ } static void __init xd_omti_init_drive (u_char drive) { /* gets infos from drive */ xd_override_init_drive(drive); /* set other parameters, Hardcoded, not that nice :-) */ xd_info[drive].control = 2; } /* Xebec support (AK) */ static void __init xd_xebec_init_controller (unsigned int address) { /* iobase may be set manually in range 0x300 - 0x33C irq may be set manually to 2(9),3,4,5,6,7 dma may be set manually to 1,2,3 (How to detect them ???) BIOS address may be set manually in range 0x0 - 0xF8000 If you need non-standard settings use the xd=... command */ switch (address) { case 0x00000: case 0xC8000: /* initially: xd_iobase==0x320 */ case 0xD0000: case 0xD2000: case 0xD4000: case 0xD6000: case 0xD8000: case 0xDA000: case 0xDC000: case 0xDE000: case 0xE0000: break; default: printk("xd_xebec_init_controller: unsupported BIOS address %06x\n",address); break; } xd_maxsectors = 0x01; outb(0,XD_RESET); /* reset the controller */ msleep(XD_INIT_DISK_DELAY); } static void __init xd_xebec_init_drive (u_char drive) { /* values from controller's BIOS - BIOS chip may be removed */ static u_short geometry_table[][5] = { {0x132,4,0x080,0x080,0x7}, {0x132,4,0x080,0x080,0x17}, {0x264,2,0x100,0x100,0x7}, {0x264,2,0x100,0x100,0x17}, {0x132,8,0x080,0x080,0x7}, {0x132,8,0x080,0x080,0x17}, {0x264,4,0x100,0x100,0x6}, {0x264,4,0x100,0x100,0x17}, {0x2BC,5,0x2BC,0x12C,0x6}, {0x3A5,4,0x3A5,0x3A5,0x7}, {0x26C,6,0x26C,0x26C,0x7}, {0x200,8,0x200,0x100,0x17}, {0x400,5,0x400,0x400,0x7}, {0x400,6,0x400,0x400,0x7}, {0x264,8,0x264,0x200,0x17}, {0x33E,7,0x33E,0x200,0x7}}; u_char n; n = inb(XD_JUMPER) & 0x0F; /* BIOS's drive number: same geometry is assumed for BOTH drives */ if (xd_geo[3*drive]) xd_manual_geo_set(drive); else { xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */ xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */ xd_info[drive].sectors = 17; /* sectors */ #if 0 xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */ xd_info[drive].precomp = geometry_table[n][3] /* write precomp */ xd_info[drive].ecc = 0x0B; /* ecc length */ #endif /* 0 */ } xd_info[drive].control = geometry_table[n][4]; /* control byte */ xd_setparam(CMD_XBSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B); xd_recalibrate(drive); } /* xd_override_init_drive: this finds disk geometry in a "binary search" style, narrowing in on the "correct" number of heads etc. by trying values until it gets the highest successful value. Idea courtesy Salvador Abreu (spa@fct.unl.pt). */ static void __init xd_override_init_drive (u_char drive) { u_short min[] = { 0,0,0 },max[] = { 16,1024,64 },test[] = { 0,0,0 }; u_char cmdblk[6],i; if (xd_geo[3*drive]) xd_manual_geo_set(drive); else { for (i = 0; i < 3; i++) { while (min[i] != max[i] - 1) { test[i] = (min[i] + max[i]) / 2; xd_build(cmdblk,CMD_SEEK,drive,(u_char) test[0],(u_short) test[1],(u_char) test[2],0,0); if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2)) min[i] = test[i]; else max[i] = test[i]; } test[i] = min[i]; } xd_info[drive].heads = (u_char) min[0] + 1; xd_info[drive].cylinders = (u_short) min[1] + 1; xd_info[drive].sectors = (u_char) min[2] + 1; } xd_info[drive].control = 0; } /* xd_setup: initialise controller from command line parameters */ static void __init do_xd_setup (int *integers) { switch (integers[0]) { case 4: if (integers[4] < 0) nodma = 1; else if (integers[4] < 8) xd_dma = integers[4]; case 3: if ((integers[3] > 0) && (integers[3] <= 0x3FC)) xd_iobase = integers[3]; case 2: if ((integers[2] > 0) && (integers[2] < 16)) xd_irq = integers[2]; case 1: xd_override = 1; if ((integers[1] >= 0) && (integers[1] < ARRAY_SIZE(xd_sigs))) xd_type = integers[1]; case 0: break; default:printk("xd: too many parameters for xd\n"); } xd_maxsectors = 0x01; } /* xd_setparam: set the drive characteristics */ static void __init xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc) { u_char cmdblk[14]; xd_build(cmdblk,command,drive,0,0,0,0,0); cmdblk[6] = (u_char) (cylinders >> 8) & 0x03; cmdblk[7] = (u_char) (cylinders & 0xFF); cmdblk[8] = heads & 0x1F; cmdblk[9] = (u_char) (rwrite >> 8) & 0x03; cmdblk[10] = (u_char) (rwrite & 0xFF); cmdblk[11] = (u_char) (wprecomp >> 8) & 0x03; cmdblk[12] = (u_char) (wprecomp & 0xFF); cmdblk[13] = ecc; /* Some controllers require geometry info as data, not command */ if (xd_command(cmdblk,PIO_MODE,NULL,&cmdblk[6],NULL,XD_TIMEOUT * 2)) printk("xd: error setting characteristics for xd%c\n", 'a'+drive); } #ifdef MODULE module_param_array(xd, int, NULL, 0); module_param_array(xd_geo, int, NULL, 0); module_param(nodma, bool, 0); MODULE_LICENSE("GPL"); void cleanup_module(void) { int i; unregister_blkdev(XT_DISK_MAJOR, "xd"); for (i = 0; i < xd_drives; i++) { del_gendisk(xd_gendisk[i]); put_disk(xd_gendisk[i]); } blk_cleanup_queue(xd_queue); release_region(xd_iobase,4); if (xd_drives) { free_irq(xd_irq, NULL); free_dma(xd_dma); if (xd_dma_buffer) xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200); } } #else static int __init xd_setup (char *str) { int ints[5]; get_options (str, ARRAY_SIZE (ints), ints); do_xd_setup (ints); return 1; } /* xd_manual_geo_init: initialise drive geometry from command line parameters (used only for WD drives) */ static int __init xd_manual_geo_init (char *str) { int i, integers[1 + 3*XD_MAXDRIVES]; get_options (str, ARRAY_SIZE (integers), integers); if (integers[0]%3 != 0) { printk("xd: incorrect number of parameters for xd_geo\n"); return 1; } for (i = 0; (i < integers[0]) && (i < 3*XD_MAXDRIVES); i++) xd_geo[i] = integers[i+1]; return 1; } __setup ("xd=", xd_setup); __setup ("xd_geo=", xd_manual_geo_init); #endif /* MODULE */ module_init(xd_init); MODULE_ALIAS_BLOCKDEV_MAJOR(XT_DISK_MAJOR);
gpl-2.0
Zenfone2-development/android_kernel_asus_moorefield
drivers/net/wireless/orinoco/fw.c
5091
9866
/* Firmware file reading and download helpers * * See copyright notice in main.c */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/device.h> #include <linux/module.h> #include "hermes.h" #include "hermes_dld.h" #include "orinoco.h" #include "fw.h" /* End markers (for Symbol firmware only) */ #define TEXT_END 0x1A /* End of text header */ struct fw_info { char *pri_fw; char *sta_fw; char *ap_fw; u32 pda_addr; u16 pda_size; }; static const struct fw_info orinoco_fw[] = { { NULL, "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 }, { NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 }, { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 } }; MODULE_FIRMWARE("agere_sta_fw.bin"); MODULE_FIRMWARE("agere_ap_fw.bin"); MODULE_FIRMWARE("prism_sta_fw.bin"); MODULE_FIRMWARE("prism_ap_fw.bin"); MODULE_FIRMWARE("symbol_sp24t_prim_fw"); MODULE_FIRMWARE("symbol_sp24t_sec_fw"); /* Structure used to access fields in FW * Make sure LE decoding macros are used */ struct orinoco_fw_header { char hdr_vers[6]; /* ASCII string for header version */ __le16 headersize; /* Total length of header */ __le32 entry_point; /* NIC entry point */ __le32 blocks; /* Number of blocks to program */ __le32 block_offset; /* Offset of block data from eof header */ __le32 pdr_offset; /* Offset to PDR data from eof header */ __le32 pri_offset; /* Offset to primary plug data */ __le32 compat_offset; /* Offset to compatibility data*/ char signature[0]; /* FW signature length headersize-20 */ } __packed; /* Check the range of various header entries. Return a pointer to a * description of the problem, or NULL if everything checks out. */ static const char *validate_fw(const struct orinoco_fw_header *hdr, size_t len) { u16 hdrsize; if (len < sizeof(*hdr)) return "image too small"; if (memcmp(hdr->hdr_vers, "HFW", 3) != 0) return "format not recognised"; hdrsize = le16_to_cpu(hdr->headersize); if (hdrsize > len) return "bad headersize"; if ((hdrsize + le32_to_cpu(hdr->block_offset)) > len) return "bad block offset"; if ((hdrsize + le32_to_cpu(hdr->pdr_offset)) > len) return "bad PDR offset"; if ((hdrsize + le32_to_cpu(hdr->pri_offset)) > len) return "bad PRI offset"; if ((hdrsize + le32_to_cpu(hdr->compat_offset)) > len) return "bad compat offset"; /* TODO: consider adding a checksum or CRC to the firmware format */ return NULL; } #if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP) static inline const struct firmware * orinoco_cached_fw_get(struct orinoco_private *priv, bool primary) { if (primary) return priv->cached_pri_fw; else return priv->cached_fw; } #else #define orinoco_cached_fw_get(priv, primary) (NULL) #endif /* Download either STA or AP firmware into the card. */ static int orinoco_dl_firmware(struct orinoco_private *priv, const struct fw_info *fw, int ap) { /* Plug Data Area (PDA) */ __le16 *pda; struct hermes *hw = &priv->hw; const struct firmware *fw_entry; const struct orinoco_fw_header *hdr; const unsigned char *first_block; const void *end; const char *firmware; const char *fw_err; struct device *dev = priv->dev; int err = 0; pda = kzalloc(fw->pda_size, GFP_KERNEL); if (!pda) return -ENOMEM; if (ap) firmware = fw->ap_fw; else firmware = fw->sta_fw; dev_dbg(dev, "Attempting to download firmware %s\n", firmware); /* Read current plug data */ err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size); dev_dbg(dev, "Read PDA returned %d\n", err); if (err) goto free; if (!orinoco_cached_fw_get(priv, false)) { err = request_firmware(&fw_entry, firmware, priv->dev); if (err) { dev_err(dev, "Cannot find firmware %s\n", firmware); err = -ENOENT; goto free; } } else fw_entry = orinoco_cached_fw_get(priv, false); hdr = (const struct orinoco_fw_header *) fw_entry->data; fw_err = validate_fw(hdr, fw_entry->size); if (fw_err) { dev_warn(dev, "Invalid firmware image detected (%s). " "Aborting download\n", fw_err); err = -EINVAL; goto abort; } /* Enable aux port to allow programming */ err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point)); dev_dbg(dev, "Program init returned %d\n", err); if (err != 0) goto abort; /* Program data */ first_block = (fw_entry->data + le16_to_cpu(hdr->headersize) + le32_to_cpu(hdr->block_offset)); end = fw_entry->data + fw_entry->size; err = hermes_program(hw, first_block, end); dev_dbg(dev, "Program returned %d\n", err); if (err != 0) goto abort; /* Update production data */ first_block = (fw_entry->data + le16_to_cpu(hdr->headersize) + le32_to_cpu(hdr->pdr_offset)); err = hermes_apply_pda_with_defaults(hw, first_block, end, pda, &pda[fw->pda_size / sizeof(*pda)]); dev_dbg(dev, "Apply PDA returned %d\n", err); if (err) goto abort; /* Tell card we've finished */ err = hw->ops->program_end(hw); dev_dbg(dev, "Program end returned %d\n", err); if (err != 0) goto abort; /* Check if we're running */ dev_dbg(dev, "hermes_present returned %d\n", hermes_present(hw)); abort: /* If we requested the firmware, release it. */ if (!orinoco_cached_fw_get(priv, false)) release_firmware(fw_entry); free: kfree(pda); return err; } /* * Process a firmware image - stop the card, load the firmware, reset * the card and make sure it responds. For the secondary firmware take * care of the PDA - read it and then write it on top of the firmware. */ static int symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw, const unsigned char *image, const void *end, int secondary) { struct hermes *hw = &priv->hw; int ret = 0; const unsigned char *ptr; const unsigned char *first_block; /* Plug Data Area (PDA) */ __le16 *pda = NULL; /* Binary block begins after the 0x1A marker */ ptr = image; while (*ptr++ != TEXT_END); first_block = ptr; /* Read the PDA from EEPROM */ if (secondary) { pda = kzalloc(fw->pda_size, GFP_KERNEL); if (!pda) return -ENOMEM; ret = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size); if (ret) goto free; } /* Stop the firmware, so that it can be safely rewritten */ if (priv->stop_fw) { ret = priv->stop_fw(priv, 1); if (ret) goto free; } /* Program the adapter with new firmware */ ret = hermes_program(hw, first_block, end); if (ret) goto free; /* Write the PDA to the adapter */ if (secondary) { size_t len = hermes_blocks_length(first_block, end); ptr = first_block + len; ret = hermes_apply_pda(hw, ptr, end, pda, &pda[fw->pda_size / sizeof(*pda)]); kfree(pda); if (ret) return ret; } /* Run the firmware */ if (priv->stop_fw) { ret = priv->stop_fw(priv, 0); if (ret) return ret; } /* Reset hermes chip and make sure it responds */ ret = hw->ops->init(hw); /* hermes_reset() should return 0 with the secondary firmware */ if (secondary && ret != 0) return -ENODEV; /* And this should work with any firmware */ if (!hermes_present(hw)) return -ENODEV; return 0; free: kfree(pda); return ret; } /* * Download the firmware into the card, this also does a PCMCIA soft * reset on the card, to make sure it's in a sane state. */ static int symbol_dl_firmware(struct orinoco_private *priv, const struct fw_info *fw) { struct device *dev = priv->dev; int ret; const struct firmware *fw_entry; if (!orinoco_cached_fw_get(priv, true)) { if (request_firmware(&fw_entry, fw->pri_fw, priv->dev) != 0) { dev_err(dev, "Cannot find firmware: %s\n", fw->pri_fw); return -ENOENT; } } else fw_entry = orinoco_cached_fw_get(priv, true); /* Load primary firmware */ ret = symbol_dl_image(priv, fw, fw_entry->data, fw_entry->data + fw_entry->size, 0); if (!orinoco_cached_fw_get(priv, true)) release_firmware(fw_entry); if (ret) { dev_err(dev, "Primary firmware download failed\n"); return ret; } if (!orinoco_cached_fw_get(priv, false)) { if (request_firmware(&fw_entry, fw->sta_fw, priv->dev) != 0) { dev_err(dev, "Cannot find firmware: %s\n", fw->sta_fw); return -ENOENT; } } else fw_entry = orinoco_cached_fw_get(priv, false); /* Load secondary firmware */ ret = symbol_dl_image(priv, fw, fw_entry->data, fw_entry->data + fw_entry->size, 1); if (!orinoco_cached_fw_get(priv, false)) release_firmware(fw_entry); if (ret) dev_err(dev, "Secondary firmware download failed\n"); return ret; } int orinoco_download(struct orinoco_private *priv) { int err = 0; /* Reload firmware */ switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* case FIRMWARE_TYPE_INTERSIL: */ err = orinoco_dl_firmware(priv, &orinoco_fw[priv->firmware_type], 0); break; case FIRMWARE_TYPE_SYMBOL: err = symbol_dl_firmware(priv, &orinoco_fw[priv->firmware_type]); break; case FIRMWARE_TYPE_INTERSIL: break; } /* TODO: if we fail we probably need to reinitialise * the driver */ return err; } #if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP) void orinoco_cache_fw(struct orinoco_private *priv, int ap) { const struct firmware *fw_entry = NULL; const char *pri_fw; const char *fw; pri_fw = orinoco_fw[priv->firmware_type].pri_fw; if (ap) fw = orinoco_fw[priv->firmware_type].ap_fw; else fw = orinoco_fw[priv->firmware_type].sta_fw; if (pri_fw) { if (request_firmware(&fw_entry, pri_fw, priv->dev) == 0) priv->cached_pri_fw = fw_entry; } if (fw) { if (request_firmware(&fw_entry, fw, priv->dev) == 0) priv->cached_fw = fw_entry; } } void orinoco_uncache_fw(struct orinoco_private *priv) { release_firmware(priv->cached_pri_fw); release_firmware(priv->cached_fw); priv->cached_pri_fw = NULL; priv->cached_fw = NULL; } #endif
gpl-2.0
Ezekeel/GLaDOS-nexus-prime
drivers/acpi/acpi_memhotplug.c
7395
14984
/* * Copyright (C) 2004 Intel Corporation <naveen.b.s@intel.com> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * * ACPI based HotPlug driver that supports Memory Hotplug * This driver fields notifications from firmware for memory add * and remove operations and alerts the VM of the affected memory * ranges. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/memory_hotplug.h> #include <linux/slab.h> #include <acpi/acpi_drivers.h> #define ACPI_MEMORY_DEVICE_CLASS "memory" #define ACPI_MEMORY_DEVICE_HID "PNP0C80" #define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device" #define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT #undef PREFIX #define PREFIX "ACPI:memory_hp:" ACPI_MODULE_NAME("acpi_memhotplug"); MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>"); MODULE_DESCRIPTION("Hotplug Mem Driver"); MODULE_LICENSE("GPL"); /* Memory Device States */ #define MEMORY_INVALID_STATE 0 #define MEMORY_POWER_ON_STATE 1 #define MEMORY_POWER_OFF_STATE 2 static int acpi_memory_device_add(struct acpi_device *device); static int acpi_memory_device_remove(struct acpi_device *device, int type); static const struct acpi_device_id memory_device_ids[] = { {ACPI_MEMORY_DEVICE_HID, 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, memory_device_ids); static struct acpi_driver acpi_memory_device_driver = { .name = "acpi_memhotplug", .class = ACPI_MEMORY_DEVICE_CLASS, .ids = memory_device_ids, .ops = { .add = acpi_memory_device_add, .remove = acpi_memory_device_remove, }, }; struct acpi_memory_info { struct list_head list; u64 start_addr; /* Memory Range start physical addr */ u64 length; /* Memory Range length */ unsigned short caching; /* memory cache attribute */ unsigned short write_protect; /* memory read/write attribute */ unsigned int enabled:1; }; struct acpi_memory_device { struct acpi_device * device; unsigned int state; /* State of the memory device */ struct list_head res_list; }; static int acpi_hotmem_initialized; static acpi_status acpi_memory_get_resource(struct acpi_resource *resource, void *context) { struct acpi_memory_device *mem_device = context; struct acpi_resource_address64 address64; struct acpi_memory_info *info, *new; acpi_status status; status = acpi_resource_to_address64(resource, &address64); if (ACPI_FAILURE(status) || (address64.resource_type != ACPI_MEMORY_RANGE)) return AE_OK; list_for_each_entry(info, &mem_device->res_list, list) { /* Can we combine the resource range information? */ if ((info->caching == address64.info.mem.caching) && (info->write_protect == address64.info.mem.write_protect) && (info->start_addr + info->length == address64.minimum)) { info->length += address64.address_length; return AE_OK; } } new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL); if (!new) return AE_ERROR; INIT_LIST_HEAD(&new->list); new->caching = address64.info.mem.caching; new->write_protect = address64.info.mem.write_protect; new->start_addr = address64.minimum; new->length = address64.address_length; list_add_tail(&new->list, &mem_device->res_list); return AE_OK; } static int acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) { acpi_status status; struct acpi_memory_info *info, *n; if (!list_empty(&mem_device->res_list)) return 0; status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS, acpi_memory_get_resource, mem_device); if (ACPI_FAILURE(status)) { list_for_each_entry_safe(info, n, &mem_device->res_list, list) kfree(info); INIT_LIST_HEAD(&mem_device->res_list); return -EINVAL; } return 0; } static int acpi_memory_get_device(acpi_handle handle, struct acpi_memory_device **mem_device) { acpi_status status; acpi_handle phandle; struct acpi_device *device = NULL; struct acpi_device *pdevice = NULL; int result; if (!acpi_bus_get_device(handle, &device) && device) goto end; status = acpi_get_parent(handle, &phandle); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Cannot find acpi parent")); return -EINVAL; } /* Get the parent device */ result = acpi_bus_get_device(phandle, &pdevice); if (result) { printk(KERN_WARNING PREFIX "Cannot get acpi bus device"); return -EINVAL; } /* * Now add the notified device. This creates the acpi_device * and invokes .add function */ result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); if (result) { printk(KERN_WARNING PREFIX "Cannot add acpi bus"); return -EINVAL; } end: *mem_device = acpi_driver_data(device); if (!(*mem_device)) { printk(KERN_ERR "\n driver data not found"); return -ENODEV; } return 0; } static int acpi_memory_check_device(struct acpi_memory_device *mem_device) { unsigned long long current_status; /* Get device present/absent information from the _STA */ if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA", NULL, &current_status))) return -ENODEV; /* * Check for device status. Device should be * present/enabled/functioning. */ if (!((current_status & ACPI_STA_DEVICE_PRESENT) && (current_status & ACPI_STA_DEVICE_ENABLED) && (current_status & ACPI_STA_DEVICE_FUNCTIONING))) return -ENODEV; return 0; } static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) { int result, num_enabled = 0; struct acpi_memory_info *info; int node; /* Get the range from the _CRS */ result = acpi_memory_get_device_resources(mem_device); if (result) { printk(KERN_ERR PREFIX "get_device_resources failed\n"); mem_device->state = MEMORY_INVALID_STATE; return result; } node = acpi_get_node(mem_device->device->handle); /* * Tell the VM there is more memory here... * Note: Assume that this function returns zero on success * We don't have memory-hot-add rollback function,now. * (i.e. memory-hot-remove function) */ list_for_each_entry(info, &mem_device->res_list, list) { if (info->enabled) { /* just sanity check...*/ num_enabled++; continue; } /* * If the memory block size is zero, please ignore it. * Don't try to do the following memory hotplug flowchart. */ if (!info->length) continue; if (node < 0) node = memory_add_physaddr_to_nid(info->start_addr); result = add_memory(node, info->start_addr, info->length); if (result) continue; info->enabled = 1; num_enabled++; } if (!num_enabled) { printk(KERN_ERR PREFIX "add_memory failed\n"); mem_device->state = MEMORY_INVALID_STATE; return -EINVAL; } /* * Sometimes the memory device will contain several memory blocks. * When one memory block is hot-added to the system memory, it will * be regarded as a success. * Otherwise if the last memory block can't be hot-added to the system * memory, it will be failure and the memory device can't be bound with * driver. */ return 0; } static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device) { acpi_status status; struct acpi_object_list arg_list; union acpi_object arg; unsigned long long current_status; /* Issue the _EJ0 command */ arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = 1; status = acpi_evaluate_object(mem_device->device->handle, "_EJ0", &arg_list, NULL); /* Return on _EJ0 failure */ if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "_EJ0 failed")); return -ENODEV; } /* Evalute _STA to check if the device is disabled */ status = acpi_evaluate_integer(mem_device->device->handle, "_STA", NULL, &current_status); if (ACPI_FAILURE(status)) return -ENODEV; /* Check for device status. Device should be disabled */ if (current_status & ACPI_STA_DEVICE_ENABLED) return -EINVAL; return 0; } static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) { int result; struct acpi_memory_info *info, *n; /* * Ask the VM to offline this memory range. * Note: Assume that this function returns zero on success */ list_for_each_entry_safe(info, n, &mem_device->res_list, list) { if (info->enabled) { result = remove_memory(info->start_addr, info->length); if (result) return result; } kfree(info); } /* Power-off and eject the device */ result = acpi_memory_powerdown_device(mem_device); if (result) { /* Set the status of the device to invalid */ mem_device->state = MEMORY_INVALID_STATE; return result; } mem_device->state = MEMORY_POWER_OFF_STATE; return result; } static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) { struct acpi_memory_device *mem_device; struct acpi_device *device; switch (event) { case ACPI_NOTIFY_BUS_CHECK: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "\nReceived BUS CHECK notification for device\n")); /* Fall Through */ case ACPI_NOTIFY_DEVICE_CHECK: if (event == ACPI_NOTIFY_DEVICE_CHECK) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "\nReceived DEVICE CHECK notification for device\n")); if (acpi_memory_get_device(handle, &mem_device)) { printk(KERN_ERR PREFIX "Cannot find driver data\n"); return; } if (!acpi_memory_check_device(mem_device)) { if (acpi_memory_enable_device(mem_device)) printk(KERN_ERR PREFIX "Cannot enable memory device\n"); } break; case ACPI_NOTIFY_EJECT_REQUEST: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "\nReceived EJECT REQUEST notification for device\n")); if (acpi_bus_get_device(handle, &device)) { printk(KERN_ERR PREFIX "Device doesn't exist\n"); break; } mem_device = acpi_driver_data(device); if (!mem_device) { printk(KERN_ERR PREFIX "Driver Data is NULL\n"); break; } /* * Currently disabling memory device from kernel mode * TBD: Can also be disabled from user mode scripts * TBD: Can also be disabled by Callback registration * with generic sysfs driver */ if (acpi_memory_disable_device(mem_device)) printk(KERN_ERR PREFIX "Disable memory device\n"); /* * TBD: Invoke acpi_bus_remove to cleanup data structures */ break; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); break; } return; } static int acpi_memory_device_add(struct acpi_device *device) { int result; struct acpi_memory_device *mem_device = NULL; if (!device) return -EINVAL; mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL); if (!mem_device) return -ENOMEM; INIT_LIST_HEAD(&mem_device->res_list); mem_device->device = device; sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME); sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS); device->driver_data = mem_device; /* Get the range from the _CRS */ result = acpi_memory_get_device_resources(mem_device); if (result) { kfree(mem_device); return result; } /* Set the device state */ mem_device->state = MEMORY_POWER_ON_STATE; printk(KERN_DEBUG "%s \n", acpi_device_name(device)); /* * Early boot code has recognized memory area by EFI/E820. * If DSDT shows these memory devices on boot, hotplug is not necessary * for them. So, it just returns until completion of this driver's * start up. */ if (!acpi_hotmem_initialized) return 0; if (!acpi_memory_check_device(mem_device)) { /* call add_memory func */ result = acpi_memory_enable_device(mem_device); if (result) printk(KERN_ERR PREFIX "Error in acpi_memory_enable_device\n"); } return result; } static int acpi_memory_device_remove(struct acpi_device *device, int type) { struct acpi_memory_device *mem_device = NULL; if (!device || !acpi_driver_data(device)) return -EINVAL; mem_device = acpi_driver_data(device); kfree(mem_device); return 0; } /* * Helper function to check for memory device */ static acpi_status is_memory_device(acpi_handle handle) { char *hardware_id; acpi_status status; struct acpi_device_info *info; status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) return status; if (!(info->valid & ACPI_VALID_HID)) { kfree(info); return AE_ERROR; } hardware_id = info->hardware_id.string; if ((hardware_id == NULL) || (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID))) status = AE_ERROR; kfree(info); return status; } static acpi_status acpi_memory_register_notify_handler(acpi_handle handle, u32 level, void *ctxt, void **retv) { acpi_status status; status = is_memory_device(handle); if (ACPI_FAILURE(status)) return AE_OK; /* continue */ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, acpi_memory_device_notify, NULL); /* continue */ return AE_OK; } static acpi_status acpi_memory_deregister_notify_handler(acpi_handle handle, u32 level, void *ctxt, void **retv) { acpi_status status; status = is_memory_device(handle); if (ACPI_FAILURE(status)) return AE_OK; /* continue */ status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, acpi_memory_device_notify); return AE_OK; /* continue */ } static int __init acpi_memory_device_init(void) { int result; acpi_status status; result = acpi_bus_register_driver(&acpi_memory_device_driver); if (result < 0) return -ENODEV; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_memory_register_notify_handler, NULL, NULL, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed")); acpi_bus_unregister_driver(&acpi_memory_device_driver); return -ENODEV; } acpi_hotmem_initialized = 1; return 0; } static void __exit acpi_memory_device_exit(void) { acpi_status status; /* * Adding this to un-install notification handlers for all the device * handles. */ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_memory_deregister_notify_handler, NULL, NULL, NULL); if (ACPI_FAILURE(status)) ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed")); acpi_bus_unregister_driver(&acpi_memory_device_driver); return; } module_init(acpi_memory_device_init); module_exit(acpi_memory_device_exit);
gpl-2.0
upndwn4par/android_kernel_lge_hammerhead
net/wireless/debugfs.c
7395
3115
/* * cfg80211 debugfs * * Copyright 2009 Luis R. Rodriguez <lrodriguez@atheros.com> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/slab.h> #include "core.h" #include "debugfs.h" #define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct wiphy *wiphy= file->private_data; \ char buf[buflen]; \ int res; \ \ res = scnprintf(buf, buflen, fmt "\n", ##value); \ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ } \ \ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", wiphy->rts_threshold) DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", wiphy->frag_threshold); DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d", wiphy->retry_short) DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", wiphy->retry_long); static int ht_print_chan(struct ieee80211_channel *chan, char *buf, int buf_size, int offset) { if (WARN_ON(offset > buf_size)) return 0; if (chan->flags & IEEE80211_CHAN_DISABLED) return snprintf(buf + offset, buf_size - offset, "%d Disabled\n", chan->center_freq); return snprintf(buf + offset, buf_size - offset, "%d HT40 %c%c\n", chan->center_freq, (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-', (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+'); } static ssize_t ht40allow_map_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct wiphy *wiphy = file->private_data; char *buf; unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; enum ieee80211_band band; struct ieee80211_supported_band *sband; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&cfg80211_mutex); for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) offset += ht_print_chan(&sband->channels[i], buf, buf_size, offset); } mutex_unlock(&cfg80211_mutex); r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); kfree(buf); return r; } static const struct file_operations ht40allow_map_ops = { .read = ht40allow_map_read, .open = simple_open, .llseek = default_llseek, }; #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops); void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) { struct dentry *phyd = rdev->wiphy.debugfsdir; DEBUGFS_ADD(rts_threshold); DEBUGFS_ADD(fragmentation_threshold); DEBUGFS_ADD(short_retry_limit); DEBUGFS_ADD(long_retry_limit); DEBUGFS_ADD(ht40allow_map); }
gpl-2.0
CyanideDevices/android_kernel_samsung_smdk4412
net/sched/ematch.c
7651
14816
/* * net/sched/ematch.c Extended Match API * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> * * ========================================================================== * * An extended match (ematch) is a small classification tool not worth * writing a full classifier for. Ematches can be interconnected to form * a logic expression and get attached to classifiers to extend their * functionatlity. * * The userspace part transforms the logic expressions into an array * consisting of multiple sequences of interconnected ematches separated * by markers. Precedence is implemented by a special ematch kind * referencing a sequence beyond the marker of the current sequence * causing the current position in the sequence to be pushed onto a stack * to allow the current position to be overwritten by the position referenced * in the special ematch. Matching continues in the new sequence until a * marker is reached causing the position to be restored from the stack. * * Example: * A AND (B1 OR B2) AND C AND D * * ------->-PUSH------- * -->-- / -->-- \ -->-- * / \ / / \ \ / \ * +-------+-------+-------+-------+-------+--------+ * | A AND | B AND | C AND | D END | B1 OR | B2 END | * +-------+-------+-------+-------+-------+--------+ * \ / * --------<-POP--------- * * where B is a virtual ematch referencing to sequence starting with B1. * * ========================================================================== * * How to write an ematch in 60 seconds * ------------------------------------ * * 1) Provide a matcher function: * static int my_match(struct sk_buff *skb, struct tcf_ematch *m, * struct tcf_pkt_info *info) * { * struct mydata *d = (struct mydata *) m->data; * * if (...matching goes here...) * return 1; * else * return 0; * } * * 2) Fill out a struct tcf_ematch_ops: * static struct tcf_ematch_ops my_ops = { * .kind = unique id, * .datalen = sizeof(struct mydata), * .match = my_match, * .owner = THIS_MODULE, * }; * * 3) Register/Unregister your ematch: * static int __init init_my_ematch(void) * { * return tcf_em_register(&my_ops); * } * * static void __exit exit_my_ematch(void) * { * tcf_em_unregister(&my_ops); * } * * module_init(init_my_ematch); * module_exit(exit_my_ematch); * * 4) By now you should have two more seconds left, barely enough to * open up a beer to watch the compilation going. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <net/pkt_cls.h> static LIST_HEAD(ematch_ops); static DEFINE_RWLOCK(ematch_mod_lock); static struct tcf_ematch_ops *tcf_em_lookup(u16 kind) { struct tcf_ematch_ops *e = NULL; read_lock(&ematch_mod_lock); list_for_each_entry(e, &ematch_ops, link) { if (kind == e->kind) { if (!try_module_get(e->owner)) e = NULL; read_unlock(&ematch_mod_lock); return e; } } read_unlock(&ematch_mod_lock); return NULL; } /** * tcf_em_register - register an extended match * * @ops: ematch operations lookup table * * This function must be called by ematches to announce their presence. * The given @ops must have kind set to a unique identifier and the * callback match() must be implemented. All other callbacks are optional * and a fallback implementation is used instead. * * Returns -EEXISTS if an ematch of the same kind has already registered. */ int tcf_em_register(struct tcf_ematch_ops *ops) { int err = -EEXIST; struct tcf_ematch_ops *e; if (ops->match == NULL) return -EINVAL; write_lock(&ematch_mod_lock); list_for_each_entry(e, &ematch_ops, link) if (ops->kind == e->kind) goto errout; list_add_tail(&ops->link, &ematch_ops); err = 0; errout: write_unlock(&ematch_mod_lock); return err; } EXPORT_SYMBOL(tcf_em_register); /** * tcf_em_unregister - unregster and extended match * * @ops: ematch operations lookup table * * This function must be called by ematches to announce their disappearance * for examples when the module gets unloaded. The @ops parameter must be * the same as the one used for registration. * * Returns -ENOENT if no matching ematch was found. */ void tcf_em_unregister(struct tcf_ematch_ops *ops) { write_lock(&ematch_mod_lock); list_del(&ops->link); write_unlock(&ematch_mod_lock); } EXPORT_SYMBOL(tcf_em_unregister); static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree, int index) { return &tree->matches[index]; } static int tcf_em_validate(struct tcf_proto *tp, struct tcf_ematch_tree_hdr *tree_hdr, struct tcf_ematch *em, struct nlattr *nla, int idx) { int err = -EINVAL; struct tcf_ematch_hdr *em_hdr = nla_data(nla); int data_len = nla_len(nla) - sizeof(*em_hdr); void *data = (void *) em_hdr + sizeof(*em_hdr); if (!TCF_EM_REL_VALID(em_hdr->flags)) goto errout; if (em_hdr->kind == TCF_EM_CONTAINER) { /* Special ematch called "container", carries an index * referencing an external ematch sequence. */ u32 ref; if (data_len < sizeof(ref)) goto errout; ref = *(u32 *) data; if (ref >= tree_hdr->nmatches) goto errout; /* We do not allow backward jumps to avoid loops and jumps * to our own position are of course illegal. */ if (ref <= idx) goto errout; em->data = ref; } else { /* Note: This lookup will increase the module refcnt * of the ematch module referenced. In case of a failure, * a destroy function is called by the underlying layer * which automatically releases the reference again, therefore * the module MUST not be given back under any circumstances * here. Be aware, the destroy function assumes that the * module is held if the ops field is non zero. */ em->ops = tcf_em_lookup(em_hdr->kind); if (em->ops == NULL) { err = -ENOENT; #ifdef CONFIG_MODULES __rtnl_unlock(); request_module("ematch-kind-%u", em_hdr->kind); rtnl_lock(); em->ops = tcf_em_lookup(em_hdr->kind); if (em->ops) { /* We dropped the RTNL mutex in order to * perform the module load. Tell the caller * to replay the request. */ module_put(em->ops->owner); err = -EAGAIN; } #endif goto errout; } /* ematch module provides expected length of data, so we * can do a basic sanity check. */ if (em->ops->datalen && data_len < em->ops->datalen) goto errout; if (em->ops->change) { err = em->ops->change(tp, data, data_len, em); if (err < 0) goto errout; } else if (data_len > 0) { /* ematch module doesn't provide an own change * procedure and expects us to allocate and copy * the ematch data. * * TCF_EM_SIMPLE may be specified stating that the * data only consists of a u32 integer and the module * does not expected a memory reference but rather * the value carried. */ if (em_hdr->flags & TCF_EM_SIMPLE) { if (data_len < sizeof(u32)) goto errout; em->data = *(u32 *) data; } else { void *v = kmemdup(data, data_len, GFP_KERNEL); if (v == NULL) { err = -ENOBUFS; goto errout; } em->data = (unsigned long) v; } } } em->matchid = em_hdr->matchid; em->flags = em_hdr->flags; em->datalen = data_len; err = 0; errout: return err; } static const struct nla_policy em_policy[TCA_EMATCH_TREE_MAX + 1] = { [TCA_EMATCH_TREE_HDR] = { .len = sizeof(struct tcf_ematch_tree_hdr) }, [TCA_EMATCH_TREE_LIST] = { .type = NLA_NESTED }, }; /** * tcf_em_tree_validate - validate ematch config TLV and build ematch tree * * @tp: classifier kind handle * @nla: ematch tree configuration TLV * @tree: destination ematch tree variable to store the resulting * ematch tree. * * This function validates the given configuration TLV @nla and builds an * ematch tree in @tree. The resulting tree must later be copied into * the private classifier data using tcf_em_tree_change(). You MUST NOT * provide the ematch tree variable of the private classifier data directly, * the changes would not be locked properly. * * Returns a negative error code if the configuration TLV contains errors. */ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, struct tcf_ematch_tree *tree) { int idx, list_len, matches_len, err; struct nlattr *tb[TCA_EMATCH_TREE_MAX + 1]; struct nlattr *rt_match, *rt_hdr, *rt_list; struct tcf_ematch_tree_hdr *tree_hdr; struct tcf_ematch *em; memset(tree, 0, sizeof(*tree)); if (!nla) return 0; err = nla_parse_nested(tb, TCA_EMATCH_TREE_MAX, nla, em_policy); if (err < 0) goto errout; err = -EINVAL; rt_hdr = tb[TCA_EMATCH_TREE_HDR]; rt_list = tb[TCA_EMATCH_TREE_LIST]; if (rt_hdr == NULL || rt_list == NULL) goto errout; tree_hdr = nla_data(rt_hdr); memcpy(&tree->hdr, tree_hdr, sizeof(*tree_hdr)); rt_match = nla_data(rt_list); list_len = nla_len(rt_list); matches_len = tree_hdr->nmatches * sizeof(*em); tree->matches = kzalloc(matches_len, GFP_KERNEL); if (tree->matches == NULL) goto errout; /* We do not use nla_parse_nested here because the maximum * number of attributes is unknown. This saves us the allocation * for a tb buffer which would serve no purpose at all. * * The array of rt attributes is parsed in the order as they are * provided, their type must be incremental from 1 to n. Even * if it does not serve any real purpose, a failure of sticking * to this policy will result in parsing failure. */ for (idx = 0; nla_ok(rt_match, list_len); idx++) { err = -EINVAL; if (rt_match->nla_type != (idx + 1)) goto errout_abort; if (idx >= tree_hdr->nmatches) goto errout_abort; if (nla_len(rt_match) < sizeof(struct tcf_ematch_hdr)) goto errout_abort; em = tcf_em_get_match(tree, idx); err = tcf_em_validate(tp, tree_hdr, em, rt_match, idx); if (err < 0) goto errout_abort; rt_match = nla_next(rt_match, &list_len); } /* Check if the number of matches provided by userspace actually * complies with the array of matches. The number was used for * the validation of references and a mismatch could lead to * undefined references during the matching process. */ if (idx != tree_hdr->nmatches) { err = -EINVAL; goto errout_abort; } err = 0; errout: return err; errout_abort: tcf_em_tree_destroy(tp, tree); return err; } EXPORT_SYMBOL(tcf_em_tree_validate); /** * tcf_em_tree_destroy - destroy an ematch tree * * @tp: classifier kind handle * @tree: ematch tree to be deleted * * This functions destroys an ematch tree previously created by * tcf_em_tree_validate()/tcf_em_tree_change(). You must ensure that * the ematch tree is not in use before calling this function. */ void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree) { int i; if (tree->matches == NULL) return; for (i = 0; i < tree->hdr.nmatches; i++) { struct tcf_ematch *em = tcf_em_get_match(tree, i); if (em->ops) { if (em->ops->destroy) em->ops->destroy(tp, em); else if (!tcf_em_is_simple(em)) kfree((void *) em->data); module_put(em->ops->owner); } } tree->hdr.nmatches = 0; kfree(tree->matches); tree->matches = NULL; } EXPORT_SYMBOL(tcf_em_tree_destroy); /** * tcf_em_tree_dump - dump ematch tree into a rtnl message * * @skb: skb holding the rtnl message * @t: ematch tree to be dumped * @tlv: TLV type to be used to encapsulate the tree * * This function dumps a ematch tree into a rtnl message. It is valid to * call this function while the ematch tree is in use. * * Returns -1 if the skb tailroom is insufficient. */ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) { int i; u8 *tail; struct nlattr *top_start; struct nlattr *list_start; top_start = nla_nest_start(skb, tlv); if (top_start == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr); list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST); if (list_start == NULL) goto nla_put_failure; tail = skb_tail_pointer(skb); for (i = 0; i < tree->hdr.nmatches; i++) { struct nlattr *match_start = (struct nlattr *)tail; struct tcf_ematch *em = tcf_em_get_match(tree, i); struct tcf_ematch_hdr em_hdr = { .kind = em->ops ? em->ops->kind : TCF_EM_CONTAINER, .matchid = em->matchid, .flags = em->flags }; NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr); if (em->ops && em->ops->dump) { if (em->ops->dump(skb, em) < 0) goto nla_put_failure; } else if (tcf_em_is_container(em) || tcf_em_is_simple(em)) { u32 u = em->data; nla_put_nohdr(skb, sizeof(u), &u); } else if (em->datalen > 0) nla_put_nohdr(skb, em->datalen, (void *) em->data); tail = skb_tail_pointer(skb); match_start->nla_len = tail - (u8 *)match_start; } nla_nest_end(skb, list_start); nla_nest_end(skb, top_start); return 0; nla_put_failure: return -1; } EXPORT_SYMBOL(tcf_em_tree_dump); static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em, struct tcf_pkt_info *info) { int r = em->ops->match(skb, em, info); return tcf_em_is_inverted(em) ? !r : r; } /* Do not use this function directly, use tcf_em_tree_match instead */ int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree, struct tcf_pkt_info *info) { int stackp = 0, match_idx = 0, res = 0; struct tcf_ematch *cur_match; int stack[CONFIG_NET_EMATCH_STACK]; proceed: while (match_idx < tree->hdr.nmatches) { cur_match = tcf_em_get_match(tree, match_idx); if (tcf_em_is_container(cur_match)) { if (unlikely(stackp >= CONFIG_NET_EMATCH_STACK)) goto stack_overflow; stack[stackp++] = match_idx; match_idx = cur_match->data; goto proceed; } res = tcf_em_match(skb, cur_match, info); if (tcf_em_early_end(cur_match, res)) break; match_idx++; } pop_stack: if (stackp > 0) { match_idx = stack[--stackp]; cur_match = tcf_em_get_match(tree, match_idx); if (tcf_em_early_end(cur_match, res)) goto pop_stack; else { match_idx++; goto proceed; } } return res; stack_overflow: if (net_ratelimit()) pr_warning("tc ematch: local stack overflow," " increase NET_EMATCH_STACK\n"); return -1; } EXPORT_SYMBOL(__tcf_em_tree_match);
gpl-2.0
txuki2005/TaUrUs_Kernel
sound/pci/ctxfi/ctvmem.c
7907
5684
/** * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. * * @File ctvmem.c * * @Brief * This file contains the implementation of virtual memory management object * for card device. * * @Author Liu Chun * @Date Apr 1 2008 */ #include "ctvmem.h" #include <linux/slab.h> #include <linux/mm.h> #include <linux/io.h> #include <sound/pcm.h> #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *)) #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE) /* * * Find or create vm block based on requested @size. * @size must be page aligned. * */ static struct ct_vm_block * get_vm_block(struct ct_vm *vm, unsigned int size) { struct ct_vm_block *block = NULL, *entry; struct list_head *pos; size = CT_PAGE_ALIGN(size); if (size > vm->size) { printk(KERN_ERR "ctxfi: Fail! No sufficient device virtual " "memory space available!\n"); return NULL; } mutex_lock(&vm->lock); list_for_each(pos, &vm->unused) { entry = list_entry(pos, struct ct_vm_block, list); if (entry->size >= size) break; /* found a block that is big enough */ } if (pos == &vm->unused) goto out; if (entry->size == size) { /* Move the vm node from unused list to used list directly */ list_move(&entry->list, &vm->used); vm->size -= size; block = entry; goto out; } block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) goto out; block->addr = entry->addr; block->size = size; list_add(&block->list, &vm->used); entry->addr += size; entry->size -= size; vm->size -= size; out: mutex_unlock(&vm->lock); return block; } static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block) { struct ct_vm_block *entry, *pre_ent; struct list_head *pos, *pre; block->size = CT_PAGE_ALIGN(block->size); mutex_lock(&vm->lock); list_del(&block->list); vm->size += block->size; list_for_each(pos, &vm->unused) { entry = list_entry(pos, struct ct_vm_block, list); if (entry->addr >= (block->addr + block->size)) break; /* found a position */ } if (pos == &vm->unused) { list_add_tail(&block->list, &vm->unused); entry = block; } else { if ((block->addr + block->size) == entry->addr) { entry->addr = block->addr; entry->size += block->size; kfree(block); } else { __list_add(&block->list, pos->prev, pos); entry = block; } } pos = &entry->list; pre = pos->prev; while (pre != &vm->unused) { entry = list_entry(pos, struct ct_vm_block, list); pre_ent = list_entry(pre, struct ct_vm_block, list); if ((pre_ent->addr + pre_ent->size) > entry->addr) break; pre_ent->size += entry->size; list_del(pos); kfree(entry); pos = pre; pre = pos->prev; } mutex_unlock(&vm->lock); } /* Map host addr (kmalloced/vmalloced) to device logical addr. */ static struct ct_vm_block * ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size) { struct ct_vm_block *block; unsigned int pte_start; unsigned i, pages; unsigned long *ptp; block = get_vm_block(vm, size); if (block == NULL) { printk(KERN_ERR "ctxfi: No virtual memory block that is big " "enough to allocate!\n"); return NULL; } ptp = (unsigned long *)vm->ptp[0].area; pte_start = (block->addr >> CT_PAGE_SHIFT); pages = block->size >> CT_PAGE_SHIFT; for (i = 0; i < pages; i++) { unsigned long addr; addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT); ptp[pte_start + i] = addr; } block->size = size; return block; } static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block) { /* do unmapping */ put_vm_block(vm, block); } /* * * return the host physical addr of the @index-th device * page table page on success, or ~0UL on failure. * The first returned ~0UL indicates the termination. * */ static dma_addr_t ct_get_ptp_phys(struct ct_vm *vm, int index) { dma_addr_t addr; addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr; return addr; } int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci) { struct ct_vm *vm; struct ct_vm_block *block; int i, err = 0; *rvm = NULL; vm = kzalloc(sizeof(*vm), GFP_KERNEL); if (!vm) return -ENOMEM; mutex_init(&vm->lock); /* Allocate page table pages */ for (i = 0; i < CT_PTP_NUM; i++) { err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), PAGE_SIZE, &vm->ptp[i]); if (err < 0) break; } if (err < 0) { /* no page table pages are allocated */ ct_vm_destroy(vm); return -ENOMEM; } vm->size = CT_ADDRS_PER_PAGE * i; vm->map = ct_vm_map; vm->unmap = ct_vm_unmap; vm->get_ptp_phys = ct_get_ptp_phys; INIT_LIST_HEAD(&vm->unused); INIT_LIST_HEAD(&vm->used); block = kzalloc(sizeof(*block), GFP_KERNEL); if (NULL != block) { block->addr = 0; block->size = vm->size; list_add(&block->list, &vm->unused); } *rvm = vm; return 0; } /* The caller must ensure no mapping pages are being used * by hardware before calling this function */ void ct_vm_destroy(struct ct_vm *vm) { int i; struct list_head *pos; struct ct_vm_block *entry; /* free used and unused list nodes */ while (!list_empty(&vm->used)) { pos = vm->used.next; list_del(pos); entry = list_entry(pos, struct ct_vm_block, list); kfree(entry); } while (!list_empty(&vm->unused)) { pos = vm->unused.next; list_del(pos); entry = list_entry(pos, struct ct_vm_block, list); kfree(entry); } /* free allocated page table pages */ for (i = 0; i < CT_PTP_NUM; i++) snd_dma_free_pages(&vm->ptp[i]); vm->size = 0; kfree(vm); }
gpl-2.0
JoeyJiao/kernel-2.6.32-V858
arch/xtensa/kernel/xtensa_ksyms.c
9443
2236
/* * arch/xtensa/kernel/xtensa_ksyms.c * * Export Xtensa-specific functions for loadable modules. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. * * Joe Taylor <joe@tensilica.com> */ #include <linux/module.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <asm/irq.h> #include <linux/in6.h> #include <asm/uaccess.h> #include <asm/checksum.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/page.h> #include <asm/pgalloc.h> #ifdef CONFIG_BLK_DEV_FD #include <asm/floppy.h> #endif #ifdef CONFIG_NET #include <net/checksum.h> #endif /* CONFIG_NET */ /* * String functions */ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(kernel_thread); /* * gcc internal math functions */ extern long long __ashrdi3(long long, int); extern long long __ashldi3(long long, int); extern long long __lshrdi3(long long, int); extern int __divsi3(int, int); extern int __modsi3(int, int); extern long long __muldi3(long long, long long); extern int __mulsi3(int, int); extern unsigned int __udivsi3(unsigned int, unsigned int); extern unsigned int __umodsi3(unsigned int, unsigned int); extern unsigned long long __umoddi3(unsigned long long, unsigned long long); extern unsigned long long __udivdi3(unsigned long long, unsigned long long); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__mulsi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__udivdi3); EXPORT_SYMBOL(__umoddi3); #ifdef CONFIG_NET /* * Networking support */ EXPORT_SYMBOL(csum_partial_copy_generic); #endif /* CONFIG_NET */ /* * Architecture-specific symbols */ EXPORT_SYMBOL(__xtensa_copy_user); /* * Kernel hacking ... */ #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) // FIXME EXPORT_SYMBOL(screen_info); #endif EXPORT_SYMBOL(outsb); EXPORT_SYMBOL(outsw); EXPORT_SYMBOL(outsl); EXPORT_SYMBOL(insb); EXPORT_SYMBOL(insw); EXPORT_SYMBOL(insl);
gpl-2.0
adamdmcbride/Nufront_linux_kernel
drivers/net/arcnet/arc-rawmode.c
13283
5295
/* * Linux ARCnet driver - "raw mode" packet encapsulation (no soft headers) * * Written 1994-1999 by Avery Pennarun. * Derived from skeleton.c by Donald Becker. * * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) * for sponsoring the further development of this driver. * * ********************** * * The original copyright of skeleton.c was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * * For more details, see drivers/net/arcnet.c * * ********************** */ #include <linux/module.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/if_arp.h> #include <net/arp.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/arcdevice.h> #define VERSION "arcnet: raw mode (`r') encapsulation support loaded.\n" static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length); static int build_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, uint8_t daddr); static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, int bufnum); static struct ArcProto rawmode_proto = { .suffix = 'r', .mtu = XMTU, .rx = rx, .build_header = build_header, .prepare_tx = prepare_tx, .continue_tx = NULL, .ack_tx = NULL }; static int __init arcnet_raw_init(void) { int count; printk(VERSION); for (count = 0; count < 256; count++) if (arc_proto_map[count] == arc_proto_default) arc_proto_map[count] = &rawmode_proto; /* for raw mode, we only set the bcast proto if there's no better one */ if (arc_bcast_proto == arc_proto_default) arc_bcast_proto = &rawmode_proto; arc_proto_default = &rawmode_proto; return 0; } static void __exit arcnet_raw_exit(void) { arcnet_unregister_proto(&rawmode_proto); } module_init(arcnet_raw_init); module_exit(arcnet_raw_exit); MODULE_LICENSE("GPL"); /* packet receiver */ static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length) { struct arcnet_local *lp = netdev_priv(dev); struct sk_buff *skb; struct archdr *pkt = pkthdr; int ofs; BUGMSG(D_DURING, "it's a raw packet (length=%d)\n", length); if (length > MTU) ofs = 512 - length; else ofs = 256 - length; skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); if (skb == NULL) { BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); dev->stats.rx_dropped++; return; } skb_put(skb, length + ARC_HDR_SIZE); skb->dev = dev; pkt = (struct archdr *) skb->data; skb_reset_mac_header(skb); skb_pull(skb, ARC_HDR_SIZE); /* up to sizeof(pkt->soft) has already been copied from the card */ memcpy(pkt, pkthdr, sizeof(struct archdr)); if (length > sizeof(pkt->soft)) lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft), pkt->soft.raw + sizeof(pkt->soft), length - sizeof(pkt->soft)); BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); skb->protocol = cpu_to_be16(ETH_P_ARCNET); netif_rx(skb); } /* * Create the ARCnet hard/soft headers for raw mode. * There aren't any soft headers in raw mode - not even the protocol id. */ static int build_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, uint8_t daddr) { int hdr_size = ARC_HDR_SIZE; struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size); /* * Set the source hardware address. * * This is pretty pointless for most purposes, but it can help in * debugging. ARCnet does not allow us to change the source address in * the actual packet sent) */ pkt->hard.source = *dev->dev_addr; /* see linux/net/ethernet/eth.c to see where I got the following */ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { /* * FIXME: fill in the last byte of the dest ipaddr here to better * comply with RFC1051 in "noarp" mode. */ pkt->hard.dest = 0; return hdr_size; } /* otherwise, just fill it in and go! */ pkt->hard.dest = daddr; return hdr_size; /* success */ } static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); struct arc_hardware *hard = &pkt->hard; int ofs; BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n", lp->next_tx, lp->cur_tx, bufnum); length -= ARC_HDR_SIZE; /* hard header is not included in packet length */ if (length > XMTU) { /* should never happen! other people already check for this. */ BUGMSG(D_NORMAL, "Bug! prepare_tx with size %d (> %d)\n", length, XMTU); length = XMTU; } if (length >= MinTU) { hard->offset[0] = 0; hard->offset[1] = ofs = 512 - length; } else if (length > MTU) { hard->offset[0] = 0; hard->offset[1] = ofs = 512 - length - 3; } else hard->offset[0] = ofs = 256 - length; BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n", length,ofs); lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE); lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length); lp->lastload_dest = hard->dest; return 1; /* done */ }
gpl-2.0
OpenELEC/linux
drivers/staging/iio/meter/ade7759.c
228
11217
/* * ADE7759 Active Energy Metering IC with di/dt Sensor Interface Driver * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include "meter.h" #include "ade7759.h" static int ade7759_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val) { int ret; struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ade7759_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); st->tx[0] = ADE7759_WRITE_REG(reg_address); st->tx[1] = val; ret = spi_write(st->us, st->tx, 2); mutex_unlock(&st->buf_lock); return ret; } static int ade7759_spi_write_reg_16(struct device *dev, u8 reg_address, u16 value) { int ret; struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ade7759_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); st->tx[0] = ADE7759_WRITE_REG(reg_address); st->tx[1] = (value >> 8) & 0xFF; st->tx[2] = value & 0xFF; ret = spi_write(st->us, st->tx, 3); mutex_unlock(&st->buf_lock); return ret; } static int ade7759_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ade7759_state *st = iio_priv(indio_dev); int ret; ret = spi_w8r8(st->us, ADE7759_READ_REG(reg_address)); if (ret < 0) { dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X", reg_address); return ret; } *val = ret; return 0; } static int ade7759_spi_read_reg_16(struct device *dev, u8 reg_address, u16 *val) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ade7759_state *st = iio_priv(indio_dev); int ret; ret = spi_w8r16be(st->us, ADE7759_READ_REG(reg_address)); if (ret < 0) { dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", reg_address); return ret; } *val = ret; return 0; } static int ade7759_spi_read_reg_40(struct device *dev, u8 reg_address, u64 *val) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ade7759_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .rx_buf = st->rx, .bits_per_word = 8, .len = 6, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7759_READ_REG(reg_address); memset(&st->tx[1], 0, 5); ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers)); if (ret) { dev_err(&st->us->dev, "problem when reading 40 bit register 0x%02X", reg_address); goto error_ret; } *val = ((u64)st->rx[1] << 32) | (st->rx[2] << 24) | (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static ssize_t ade7759_read_8bit(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u8 val = 0; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); ret = ade7759_spi_read_reg_8(dev, this_attr->address, &val); if (ret) return ret; return sprintf(buf, "%u\n", val); } static ssize_t ade7759_read_16bit(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u16 val = 0; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); ret = ade7759_spi_read_reg_16(dev, this_attr->address, &val); if (ret) return ret; return sprintf(buf, "%u\n", val); } static ssize_t ade7759_read_40bit(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u64 val = 0; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); ret = ade7759_spi_read_reg_40(dev, this_attr->address, &val); if (ret) return ret; return sprintf(buf, "%llu\n", val); } static ssize_t ade7759_write_8bit(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; u8 val; ret = kstrtou8(buf, 10, &val); if (ret) goto error_ret; ret = ade7759_spi_write_reg_8(dev, this_attr->address, val); error_ret: return ret ? ret : len; } static ssize_t ade7759_write_16bit(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; u16 val; ret = kstrtou16(buf, 10, &val); if (ret) goto error_ret; ret = ade7759_spi_write_reg_16(dev, this_attr->address, val); error_ret: return ret ? ret : len; } static int ade7759_reset(struct device *dev) { int ret; u16 val; ret = ade7759_spi_read_reg_16(dev, ADE7759_MODE, &val); if (ret < 0) return ret; val |= BIT(6); /* Software Chip Reset */ return ade7759_spi_write_reg_16(dev, ADE7759_MODE, val); } static IIO_DEV_ATTR_AENERGY(ade7759_read_40bit, ADE7759_AENERGY); static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO, ade7759_read_16bit, ade7759_write_16bit, ADE7759_CFDEN); static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO, ade7759_read_8bit, ade7759_write_8bit, ADE7759_CFNUM); static IIO_DEV_ATTR_CHKSUM(ade7759_read_8bit, ADE7759_CHKSUM); static IIO_DEV_ATTR_PHCAL(S_IWUSR | S_IRUGO, ade7759_read_16bit, ade7759_write_16bit, ADE7759_PHCAL); static IIO_DEV_ATTR_APOS(S_IWUSR | S_IRUGO, ade7759_read_16bit, ade7759_write_16bit, ADE7759_APOS); static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO, ade7759_read_8bit, ade7759_write_8bit, ADE7759_SAGCYC); static IIO_DEV_ATTR_SAGLVL(S_IWUSR | S_IRUGO, ade7759_read_8bit, ade7759_write_8bit, ADE7759_SAGLVL); static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO, ade7759_read_8bit, ade7759_write_8bit, ADE7759_LINECYC); static IIO_DEV_ATTR_LENERGY(ade7759_read_40bit, ADE7759_LENERGY); static IIO_DEV_ATTR_PGA_GAIN(S_IWUSR | S_IRUGO, ade7759_read_8bit, ade7759_write_8bit, ADE7759_GAIN); static IIO_DEV_ATTR_ACTIVE_POWER_GAIN(S_IWUSR | S_IRUGO, ade7759_read_16bit, ade7759_write_16bit, ADE7759_APGAIN); static IIO_DEV_ATTR_CH_OFF(1, S_IWUSR | S_IRUGO, ade7759_read_8bit, ade7759_write_8bit, ADE7759_CH1OS); static IIO_DEV_ATTR_CH_OFF(2, S_IWUSR | S_IRUGO, ade7759_read_8bit, ade7759_write_8bit, ADE7759_CH2OS); static int ade7759_set_irq(struct device *dev, bool enable) { int ret; u8 irqen; ret = ade7759_spi_read_reg_8(dev, ADE7759_IRQEN, &irqen); if (ret) goto error_ret; if (enable) irqen |= BIT(3); /* Enables an interrupt when a data is present in the waveform register */ else irqen &= ~BIT(3); ret = ade7759_spi_write_reg_8(dev, ADE7759_IRQEN, irqen); error_ret: return ret; } /* Power down the device */ static int ade7759_stop_device(struct device *dev) { int ret; u16 val; ret = ade7759_spi_read_reg_16(dev, ADE7759_MODE, &val); if (ret < 0) { dev_err(dev, "unable to power down the device, error: %d\n", ret); return ret; } val |= BIT(4); /* AD converters can be turned off */ return ade7759_spi_write_reg_16(dev, ADE7759_MODE, val); } static int ade7759_initial_setup(struct iio_dev *indio_dev) { int ret; struct ade7759_state *st = iio_priv(indio_dev); struct device *dev = &indio_dev->dev; /* use low spi speed for init */ st->us->mode = SPI_MODE_3; spi_setup(st->us); /* Disable IRQ */ ret = ade7759_set_irq(dev, false); if (ret) { dev_err(dev, "disable irq failed"); goto err_ret; } ade7759_reset(dev); msleep(ADE7759_STARTUP_DELAY); err_ret: return ret; } static ssize_t ade7759_read_frequency(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u16 t; int sps; ret = ade7759_spi_read_reg_16(dev, ADE7759_MODE, &t); if (ret) return ret; t = (t >> 3) & 0x3; sps = 27900 / (1 + t); return sprintf(buf, "%d\n", sps); } static ssize_t ade7759_write_frequency(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ade7759_state *st = iio_priv(indio_dev); u16 val; int ret; u16 reg, t; ret = kstrtou16(buf, 10, &val); if (ret) return ret; if (!val) return -EINVAL; mutex_lock(&indio_dev->mlock); t = 27900 / val; if (t > 0) t--; if (t > 1) st->us->max_speed_hz = ADE7759_SPI_SLOW; else st->us->max_speed_hz = ADE7759_SPI_FAST; ret = ade7759_spi_read_reg_16(dev, ADE7759_MODE, &reg); if (ret) goto out; reg &= ~(3 << 13); reg |= t << 13; ret = ade7759_spi_write_reg_16(dev, ADE7759_MODE, reg); out: mutex_unlock(&indio_dev->mlock); return ret ? ret : len; } static IIO_DEV_ATTR_TEMP_RAW(ade7759_read_8bit); static IIO_CONST_ATTR(in_temp_offset, "70 C"); static IIO_CONST_ATTR(in_temp_scale, "1 C"); static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, ade7759_read_frequency, ade7759_write_frequency); static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500"); static struct attribute *ade7759_attributes[] = { &iio_dev_attr_in_temp_raw.dev_attr.attr, &iio_const_attr_in_temp_offset.dev_attr.attr, &iio_const_attr_in_temp_scale.dev_attr.attr, &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_phcal.dev_attr.attr, &iio_dev_attr_cfden.dev_attr.attr, &iio_dev_attr_aenergy.dev_attr.attr, &iio_dev_attr_cfnum.dev_attr.attr, &iio_dev_attr_apos.dev_attr.attr, &iio_dev_attr_sagcyc.dev_attr.attr, &iio_dev_attr_saglvl.dev_attr.attr, &iio_dev_attr_linecyc.dev_attr.attr, &iio_dev_attr_lenergy.dev_attr.attr, &iio_dev_attr_chksum.dev_attr.attr, &iio_dev_attr_pga_gain.dev_attr.attr, &iio_dev_attr_active_power_gain.dev_attr.attr, &iio_dev_attr_choff_1.dev_attr.attr, &iio_dev_attr_choff_2.dev_attr.attr, NULL, }; static const struct attribute_group ade7759_attribute_group = { .attrs = ade7759_attributes, }; static const struct iio_info ade7759_info = { .attrs = &ade7759_attribute_group, .driver_module = THIS_MODULE, }; static int ade7759_probe(struct spi_device *spi) { int ret; struct ade7759_state *st; struct iio_dev *indio_dev; /* setup the industrialio driver allocated elements */ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); if (!indio_dev) return -ENOMEM; /* this is only used for removal purposes */ spi_set_drvdata(spi, indio_dev); st = iio_priv(indio_dev); st->us = spi; mutex_init(&st->buf_lock); indio_dev->name = spi->dev.driver->name; indio_dev->dev.parent = &spi->dev; indio_dev->info = &ade7759_info; indio_dev->modes = INDIO_DIRECT_MODE; /* Get the device into a sane initial state */ ret = ade7759_initial_setup(indio_dev); if (ret) return ret; return iio_device_register(indio_dev); } /* fixme, confirm ordering in this function */ static int ade7759_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); iio_device_unregister(indio_dev); ade7759_stop_device(&indio_dev->dev); return 0; } static struct spi_driver ade7759_driver = { .driver = { .name = "ade7759", }, .probe = ade7759_probe, .remove = ade7759_remove, }; module_spi_driver(ade7759_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADE7759 Active Energy Metering IC Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:ad7759");
gpl-2.0
mixianghang/mptcp
sound/pci/ymfpci/ymfpci.c
484
10906
/* * The driver for the Yamaha's DS1/DS1E cards * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/module.h> #include <sound/core.h> #include "ymfpci.h" #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Yamaha DS-1 PCI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Yamaha,YMF724}," "{Yamaha,YMF724F}," "{Yamaha,YMF740}," "{Yamaha,YMF740C}," "{Yamaha,YMF744}," "{Yamaha,YMF754}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static long fm_port[SNDRV_CARDS]; static long mpu_port[SNDRV_CARDS]; #ifdef SUPPORT_JOYSTICK static long joystick_port[SNDRV_CARDS]; #endif static bool rear_switch[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the Yamaha DS-1 PCI soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the Yamaha DS-1 PCI soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Yamaha DS-1 soundcard."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 Port."); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM OPL-3 Port."); #ifdef SUPPORT_JOYSTICK module_param_array(joystick_port, long, NULL, 0444); MODULE_PARM_DESC(joystick_port, "Joystick port address"); #endif module_param_array(rear_switch, bool, NULL, 0444); MODULE_PARM_DESC(rear_switch, "Enable shared rear/line-in switch"); static DEFINE_PCI_DEVICE_TABLE(snd_ymfpci_ids) = { { PCI_VDEVICE(YAMAHA, 0x0004), 0, }, /* YMF724 */ { PCI_VDEVICE(YAMAHA, 0x000d), 0, }, /* YMF724F */ { PCI_VDEVICE(YAMAHA, 0x000a), 0, }, /* YMF740 */ { PCI_VDEVICE(YAMAHA, 0x000c), 0, }, /* YMF740C */ { PCI_VDEVICE(YAMAHA, 0x0010), 0, }, /* YMF744 */ { PCI_VDEVICE(YAMAHA, 0x0012), 0, }, /* YMF754 */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_ymfpci_ids); #ifdef SUPPORT_JOYSTICK static int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, int legacy_ctrl, int legacy_ctrl2) { struct gameport *gp; struct resource *r = NULL; int io_port = joystick_port[dev]; if (!io_port) return -ENODEV; if (chip->pci->device >= 0x0010) { /* YMF 744/754 */ if (io_port == 1) { /* auto-detect */ if (!(io_port = pci_resource_start(chip->pci, 2))) return -ENODEV; } } else { if (io_port == 1) { /* auto-detect */ for (io_port = 0x201; io_port <= 0x205; io_port++) { if (io_port == 0x203) continue; if ((r = request_region(io_port, 1, "YMFPCI gameport")) != NULL) break; } if (!r) { printk(KERN_ERR "ymfpci: no gameport ports available\n"); return -EBUSY; } } switch (io_port) { case 0x201: legacy_ctrl2 |= 0 << 6; break; case 0x202: legacy_ctrl2 |= 1 << 6; break; case 0x204: legacy_ctrl2 |= 2 << 6; break; case 0x205: legacy_ctrl2 |= 3 << 6; break; default: printk(KERN_ERR "ymfpci: invalid joystick port %#x", io_port); return -EINVAL; } } if (!r && !(r = request_region(io_port, 1, "YMFPCI gameport"))) { printk(KERN_ERR "ymfpci: joystick port %#x is in use.\n", io_port); return -EBUSY; } chip->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "ymfpci: cannot allocate memory for gameport\n"); release_and_free_resource(r); return -ENOMEM; } gameport_set_name(gp, "Yamaha YMF Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(chip->pci)); gameport_set_dev_parent(gp, &chip->pci->dev); gp->io = io_port; gameport_set_port_data(gp, r); if (chip->pci->device >= 0x0010) /* YMF 744/754 */ pci_write_config_word(chip->pci, PCIR_DSXG_JOYBASE, io_port); pci_write_config_word(chip->pci, PCIR_DSXG_LEGACY, legacy_ctrl | YMFPCI_LEGACY_JPEN); pci_write_config_word(chip->pci, PCIR_DSXG_ELEGACY, legacy_ctrl2); gameport_register_port(chip->gameport); return 0; } void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { if (chip->gameport) { struct resource *r = gameport_get_port_data(chip->gameport); gameport_unregister_port(chip->gameport); chip->gameport = NULL; release_and_free_resource(r); } } #else static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, int l, int l2) { return -ENOSYS; } void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { } #endif /* SUPPORT_JOYSTICK */ static int snd_card_ymfpci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct resource *fm_res = NULL; struct resource *mpu_res = NULL; struct snd_ymfpci *chip; struct snd_opl3 *opl3; const char *str, *model; int err; u16 legacy_ctrl, legacy_ctrl2, old_legacy_ctrl; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch (pci_id->device) { case 0x0004: str = "YMF724"; model = "DS-1"; break; case 0x000d: str = "YMF724F"; model = "DS-1"; break; case 0x000a: str = "YMF740"; model = "DS-1L"; break; case 0x000c: str = "YMF740C"; model = "DS-1L"; break; case 0x0010: str = "YMF744"; model = "DS-1S"; break; case 0x0012: str = "YMF754"; model = "DS-1E"; break; default: model = str = "???"; break; } legacy_ctrl = 0; legacy_ctrl2 = 0x0800; /* SBEN = 0, SMOD = 01, LAD = 0 */ if (pci_id->device >= 0x0010) { /* YMF 744/754 */ if (fm_port[dev] == 1) { /* auto-detect */ fm_port[dev] = pci_resource_start(pci, 1); } if (fm_port[dev] > 0 && (fm_res = request_region(fm_port[dev], 4, "YMFPCI OPL3")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_FMEN; pci_write_config_word(pci, PCIR_DSXG_FMBASE, fm_port[dev]); } if (mpu_port[dev] == 1) { /* auto-detect */ mpu_port[dev] = pci_resource_start(pci, 1) + 0x20; } if (mpu_port[dev] > 0 && (mpu_res = request_region(mpu_port[dev], 2, "YMFPCI MPU401")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_MEN; pci_write_config_word(pci, PCIR_DSXG_MPU401BASE, mpu_port[dev]); } } else { switch (fm_port[dev]) { case 0x388: legacy_ctrl2 |= 0; break; case 0x398: legacy_ctrl2 |= 1; break; case 0x3a0: legacy_ctrl2 |= 2; break; case 0x3a8: legacy_ctrl2 |= 3; break; default: fm_port[dev] = 0; break; } if (fm_port[dev] > 0 && (fm_res = request_region(fm_port[dev], 4, "YMFPCI OPL3")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_FMEN; } else { legacy_ctrl2 &= ~YMFPCI_LEGACY2_FMIO; fm_port[dev] = 0; } switch (mpu_port[dev]) { case 0x330: legacy_ctrl2 |= 0 << 4; break; case 0x300: legacy_ctrl2 |= 1 << 4; break; case 0x332: legacy_ctrl2 |= 2 << 4; break; case 0x334: legacy_ctrl2 |= 3 << 4; break; default: mpu_port[dev] = 0; break; } if (mpu_port[dev] > 0 && (mpu_res = request_region(mpu_port[dev], 2, "YMFPCI MPU401")) != NULL) { legacy_ctrl |= YMFPCI_LEGACY_MEN; } else { legacy_ctrl2 &= ~YMFPCI_LEGACY2_MPUIO; mpu_port[dev] = 0; } } if (mpu_res) { legacy_ctrl |= YMFPCI_LEGACY_MIEN; legacy_ctrl2 |= YMFPCI_LEGACY2_IMOD; } pci_read_config_word(pci, PCIR_DSXG_LEGACY, &old_legacy_ctrl); pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl); pci_write_config_word(pci, PCIR_DSXG_ELEGACY, legacy_ctrl2); if ((err = snd_ymfpci_create(card, pci, old_legacy_ctrl, &chip)) < 0) { snd_card_free(card); release_and_free_resource(mpu_res); release_and_free_resource(fm_res); return err; } chip->fm_res = fm_res; chip->mpu_res = mpu_res; card->private_data = chip; strcpy(card->driver, str); sprintf(card->shortname, "Yamaha %s (%s)", model, str); sprintf(card->longname, "%s at 0x%lx, irq %i", card->shortname, chip->reg_area_phys, chip->irq); if ((err = snd_ymfpci_pcm(chip, 0, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ymfpci_pcm_spdif(chip, 1, NULL)) < 0) { snd_card_free(card); return err; } err = snd_ymfpci_mixer(chip, rear_switch[dev]); if (err < 0) { snd_card_free(card); return err; } if (chip->ac97->ext_id & AC97_EI_SDAC) { err = snd_ymfpci_pcm_4ch(chip, 2, NULL); if (err < 0) { snd_card_free(card); return err; } err = snd_ymfpci_pcm2(chip, 3, NULL); if (err < 0) { snd_card_free(card); return err; } } if ((err = snd_ymfpci_timer(chip, 0)) < 0) { snd_card_free(card); return err; } if (chip->mpu_res) { if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_YMFPCI, mpu_port[dev], MPU401_INFO_INTEGRATED | MPU401_INFO_IRQ_HOOK, -1, &chip->rawmidi)) < 0) { printk(KERN_WARNING "ymfpci: cannot initialize MPU401 at 0x%lx, skipping...\n", mpu_port[dev]); legacy_ctrl &= ~YMFPCI_LEGACY_MIEN; /* disable MPU401 irq */ pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl); } } if (chip->fm_res) { if ((err = snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_OPL3, 1, &opl3)) < 0) { printk(KERN_WARNING "ymfpci: cannot initialize FM OPL3 at 0x%lx, skipping...\n", fm_port[dev]); legacy_ctrl &= ~YMFPCI_LEGACY_FMEN; pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl); } else if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); snd_printk(KERN_ERR "cannot create opl3 hwdep\n"); return err; } } snd_ymfpci_create_gameport(chip, dev, legacy_ctrl, legacy_ctrl2); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void snd_card_ymfpci_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); } static struct pci_driver ymfpci_driver = { .name = KBUILD_MODNAME, .id_table = snd_ymfpci_ids, .probe = snd_card_ymfpci_probe, .remove = snd_card_ymfpci_remove, #ifdef CONFIG_PM_SLEEP .driver = { .pm = &snd_ymfpci_pm, }, #endif }; module_pci_driver(ymfpci_driver);
gpl-2.0
ridon/ridon-kernel-mediatek-mt6582
arch/sparc/kernel/perf_event.c
1508
36571
/* Performance event support for sparc64. * * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net> * * This code is based almost entirely upon the x86 perf event * code, which is: * * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> */ #include <linux/perf_event.h> #include <linux/kprobes.h> #include <linux/ftrace.h> #include <linux/kernel.h> #include <linux/kdebug.h> #include <linux/mutex.h> #include <asm/stacktrace.h> #include <asm/cpudata.h> #include <asm/uaccess.h> #include <linux/atomic.h> #include <asm/nmi.h> #include <asm/pcr.h> #include <asm/perfctr.h> #include <asm/cacheflush.h> #include "kernel.h" #include "kstack.h" /* Sparc64 chips have two performance counters, 32-bits each, with * overflow interrupts generated on transition from 0xffffffff to 0. * The counters are accessed in one go using a 64-bit register. * * Both counters are controlled using a single control register. The * only way to stop all sampling is to clear all of the context (user, * supervisor, hypervisor) sampling enable bits. But these bits apply * to both counters, thus the two counters can't be enabled/disabled * individually. * * The control register has two event fields, one for each of the two * counters. It's thus nearly impossible to have one counter going * while keeping the other one stopped. Therefore it is possible to * get overflow interrupts for counters not currently "in use" and * that condition must be checked in the overflow interrupt handler. * * So we use a hack, in that we program inactive counters with the * "sw_count0" and "sw_count1" events. These count how many times * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an * unusual way to encode a NOP and therefore will not trigger in * normal code. */ #define MAX_HWEVENTS 2 #define MAX_PERIOD ((1UL << 32) - 1) #define PIC_UPPER_INDEX 0 #define PIC_LOWER_INDEX 1 #define PIC_NO_INDEX -1 struct cpu_hw_events { /* Number of events currently scheduled onto this cpu. * This tells how many entries in the arrays below * are valid. */ int n_events; /* Number of new events added since the last hw_perf_disable(). * This works because the perf event layer always adds new * events inside of a perf_{disable,enable}() sequence. */ int n_added; /* Array of events current scheduled on this cpu. */ struct perf_event *event[MAX_HWEVENTS]; /* Array of encoded longs, specifying the %pcr register * encoding and the mask of PIC counters this even can * be scheduled on. See perf_event_encode() et al. */ unsigned long events[MAX_HWEVENTS]; /* The current counter index assigned to an event. When the * event hasn't been programmed into the cpu yet, this will * hold PIC_NO_INDEX. The event->hw.idx value tells us where * we ought to schedule the event. */ int current_idx[MAX_HWEVENTS]; /* Software copy of %pcr register on this cpu. */ u64 pcr; /* Enabled/disable state. */ int enabled; unsigned int group_flag; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; /* An event map describes the characteristics of a performance * counter event. In particular it gives the encoding as well as * a mask telling which counters the event can be measured on. */ struct perf_event_map { u16 encoding; u8 pic_mask; #define PIC_NONE 0x00 #define PIC_UPPER 0x01 #define PIC_LOWER 0x02 }; /* Encode a perf_event_map entry into a long. */ static unsigned long perf_event_encode(const struct perf_event_map *pmap) { return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; } static u8 perf_event_get_msk(unsigned long val) { return val & 0xff; } static u64 perf_event_get_enc(unsigned long val) { return val >> 16; } #define C(x) PERF_COUNT_HW_CACHE_##x #define CACHE_OP_UNSUPPORTED 0xfffe #define CACHE_OP_NONSENSE 0xffff typedef struct perf_event_map cache_map_t [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; struct sparc_pmu { const struct perf_event_map *(*event_map)(int); const cache_map_t *cache_map; int max_events; int upper_shift; int lower_shift; int event_mask; int hv_bit; int irq_bit; int upper_nop; int lower_nop; }; static const struct perf_event_map ultra3_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, }; static const struct perf_event_map *ultra3_event_map(int event_id) { return &ultra3_perfmon_event_map[event_id]; } static const cache_map_t ultra3_cache_map = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(NODE)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, }; static const struct sparc_pmu ultra3_pmu = { .event_map = ultra3_event_map, .cache_map = &ultra3_cache_map, .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), .upper_shift = 11, .lower_shift = 4, .event_mask = 0x3f, .upper_nop = 0x1c, .lower_nop = 0x14, }; /* Niagara1 is very limited. The upper PIC is hard-locked to count * only instructions, so it is free running which creates all kinds of * problems. Some hardware designs make one wonder if the creator * even looked at how this stuff gets used by software. */ static const struct perf_event_map niagara1_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, }; static const struct perf_event_map *niagara1_event_map(int event_id) { return &niagara1_perfmon_event_map[event_id]; } static const cache_map_t niagara1_cache_map = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(NODE)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, }; static const struct sparc_pmu niagara1_pmu = { .event_map = niagara1_event_map, .cache_map = &niagara1_cache_map, .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), .upper_shift = 0, .lower_shift = 4, .event_mask = 0x7, .upper_nop = 0x0, .lower_nop = 0x0, }; static const struct perf_event_map niagara2_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, }; static const struct perf_event_map *niagara2_event_map(int event_id) { return &niagara2_perfmon_event_map[event_id]; } static const cache_map_t niagara2_cache_map = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, }, [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, [C(NODE)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, }; static const struct sparc_pmu niagara2_pmu = { .event_map = niagara2_event_map, .cache_map = &niagara2_cache_map, .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), .upper_shift = 19, .lower_shift = 6, .event_mask = 0xfff, .hv_bit = 0x8, .irq_bit = 0x30, .upper_nop = 0x220, .lower_nop = 0x220, }; static const struct sparc_pmu *sparc_pmu __read_mostly; static u64 event_encoding(u64 event_id, int idx) { if (idx == PIC_UPPER_INDEX) event_id <<= sparc_pmu->upper_shift; else event_id <<= sparc_pmu->lower_shift; return event_id; } static u64 mask_for_index(int idx) { return event_encoding(sparc_pmu->event_mask, idx); } static u64 nop_for_index(int idx) { return event_encoding(idx == PIC_UPPER_INDEX ? sparc_pmu->upper_nop : sparc_pmu->lower_nop, idx); } static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { u64 enc, val, mask = mask_for_index(idx); enc = perf_event_get_enc(cpuc->events[idx]); val = cpuc->pcr; val &= ~mask; val |= event_encoding(enc, idx); cpuc->pcr = val; pcr_ops->write(cpuc->pcr); } static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { u64 mask = mask_for_index(idx); u64 nop = nop_for_index(idx); u64 val; val = cpuc->pcr; val &= ~mask; val |= nop; cpuc->pcr = val; pcr_ops->write(cpuc->pcr); } static u32 read_pmc(int idx) { u64 val; read_pic(val); if (idx == PIC_UPPER_INDEX) val >>= 32; return val & 0xffffffff; } static void write_pmc(int idx, u64 val) { u64 shift, mask, pic; shift = 0; if (idx == PIC_UPPER_INDEX) shift = 32; mask = ((u64) 0xffffffff) << shift; val <<= shift; read_pic(pic); pic &= ~mask; pic |= val; write_pic(pic); } static u64 sparc_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) { int shift = 64 - 32; u64 prev_raw_count, new_raw_count; s64 delta; again: prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = read_pmc(idx); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; delta = (new_raw_count << shift) - (prev_raw_count << shift); delta >>= shift; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); return new_raw_count; } static int sparc_perf_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; int ret = 0; if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (left > MAX_PERIOD) left = MAX_PERIOD; local64_set(&hwc->prev_count, (u64)-left); write_pmc(idx, (u64)(-left) & 0xffffffff); perf_event_update_userpage(event); return ret; } /* If performance event entries have been added, move existing * events around (if necessary) and then assign new entries to * counters. */ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) { int i; if (!cpuc->n_added) goto out; /* Read in the counters which are moving. */ for (i = 0; i < cpuc->n_events; i++) { struct perf_event *cp = cpuc->event[i]; if (cpuc->current_idx[i] != PIC_NO_INDEX && cpuc->current_idx[i] != cp->hw.idx) { sparc_perf_event_update(cp, &cp->hw, cpuc->current_idx[i]); cpuc->current_idx[i] = PIC_NO_INDEX; } } /* Assign to counters all unassigned events. */ for (i = 0; i < cpuc->n_events; i++) { struct perf_event *cp = cpuc->event[i]; struct hw_perf_event *hwc = &cp->hw; int idx = hwc->idx; u64 enc; if (cpuc->current_idx[i] != PIC_NO_INDEX) continue; sparc_perf_event_set_period(cp, hwc, idx); cpuc->current_idx[i] = idx; enc = perf_event_get_enc(cpuc->events[i]); pcr &= ~mask_for_index(idx); if (hwc->state & PERF_HES_STOPPED) pcr |= nop_for_index(idx); else pcr |= event_encoding(enc, idx); } out: return pcr; } static void sparc_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 pcr; if (cpuc->enabled) return; cpuc->enabled = 1; barrier(); pcr = cpuc->pcr; if (!cpuc->n_events) { pcr = 0; } else { pcr = maybe_change_configuration(cpuc, pcr); /* We require that all of the events have the same * configuration, so just fetch the settings from the * first entry. */ cpuc->pcr = pcr | cpuc->event[0]->hw.config_base; } pcr_ops->write(cpuc->pcr); } static void sparc_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); u64 val; if (!cpuc->enabled) return; cpuc->enabled = 0; cpuc->n_added = 0; val = cpuc->pcr; val &= ~(PCR_UTRACE | PCR_STRACE | sparc_pmu->hv_bit | sparc_pmu->irq_bit); cpuc->pcr = val; pcr_ops->write(cpuc->pcr); } static int active_event_index(struct cpu_hw_events *cpuc, struct perf_event *event) { int i; for (i = 0; i < cpuc->n_events; i++) { if (cpuc->event[i] == event) break; } BUG_ON(i == cpuc->n_events); return cpuc->current_idx[i]; } static void sparc_pmu_start(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx = active_event_index(cpuc, event); if (flags & PERF_EF_RELOAD) { WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); sparc_perf_event_set_period(event, &event->hw, idx); } event->hw.state = 0; sparc_pmu_enable_event(cpuc, &event->hw, idx); } static void sparc_pmu_stop(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx = active_event_index(cpuc, event); if (!(event->hw.state & PERF_HES_STOPPED)) { sparc_pmu_disable_event(cpuc, &event->hw, idx); event->hw.state |= PERF_HES_STOPPED; } if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { sparc_perf_event_update(event, &event->hw, idx); event->hw.state |= PERF_HES_UPTODATE; } } static void sparc_pmu_del(struct perf_event *event, int _flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); unsigned long flags; int i; local_irq_save(flags); perf_pmu_disable(event->pmu); for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event[i]) { /* Absorb the final count and turn off the * event. */ sparc_pmu_stop(event, PERF_EF_UPDATE); /* Shift remaining entries down into * the existing slot. */ while (++i < cpuc->n_events) { cpuc->event[i - 1] = cpuc->event[i]; cpuc->events[i - 1] = cpuc->events[i]; cpuc->current_idx[i - 1] = cpuc->current_idx[i]; } perf_event_update_userpage(event); cpuc->n_events--; break; } } perf_pmu_enable(event->pmu); local_irq_restore(flags); } static void sparc_pmu_read(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx = active_event_index(cpuc, event); struct hw_perf_event *hwc = &event->hw; sparc_perf_event_update(event, hwc, idx); } static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmc_grab_mutex); static void perf_stop_nmi_watchdog(void *unused) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); stop_nmi_watchdog(NULL); cpuc->pcr = pcr_ops->read(); } void perf_event_grab_pmc(void) { if (atomic_inc_not_zero(&active_events)) return; mutex_lock(&pmc_grab_mutex); if (atomic_read(&active_events) == 0) { if (atomic_read(&nmi_active) > 0) { on_each_cpu(perf_stop_nmi_watchdog, NULL, 1); BUG_ON(atomic_read(&nmi_active) != 0); } atomic_inc(&active_events); } mutex_unlock(&pmc_grab_mutex); } void perf_event_release_pmc(void) { if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { if (atomic_read(&nmi_active) == 0) on_each_cpu(start_nmi_watchdog, NULL, 1); mutex_unlock(&pmc_grab_mutex); } } static const struct perf_event_map *sparc_map_cache_event(u64 config) { unsigned int cache_type, cache_op, cache_result; const struct perf_event_map *pmap; if (!sparc_pmu->cache_map) return ERR_PTR(-ENOENT); cache_type = (config >> 0) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return ERR_PTR(-EINVAL); cache_op = (config >> 8) & 0xff; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return ERR_PTR(-EINVAL); cache_result = (config >> 16) & 0xff; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return ERR_PTR(-EINVAL); pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); if (pmap->encoding == CACHE_OP_UNSUPPORTED) return ERR_PTR(-ENOENT); if (pmap->encoding == CACHE_OP_NONSENSE) return ERR_PTR(-EINVAL); return pmap; } static void hw_perf_event_destroy(struct perf_event *event) { perf_event_release_pmc(); } /* Make sure all events can be scheduled into the hardware at * the same time. This is simplified by the fact that we only * need to support 2 simultaneous HW events. * * As a side effect, the evts[]->hw.idx values will be assigned * on success. These are pending indexes. When the events are * actually programmed into the chip, these values will propagate * to the per-cpu cpuc->current_idx[] slots, see the code in * maybe_change_configuration() for details. */ static int sparc_check_constraints(struct perf_event **evts, unsigned long *events, int n_ev) { u8 msk0 = 0, msk1 = 0; int idx0 = 0; /* This case is possible when we are invoked from * hw_perf_group_sched_in(). */ if (!n_ev) return 0; if (n_ev > MAX_HWEVENTS) return -1; msk0 = perf_event_get_msk(events[0]); if (n_ev == 1) { if (msk0 & PIC_LOWER) idx0 = 1; goto success; } BUG_ON(n_ev != 2); msk1 = perf_event_get_msk(events[1]); /* If both events can go on any counter, OK. */ if (msk0 == (PIC_UPPER | PIC_LOWER) && msk1 == (PIC_UPPER | PIC_LOWER)) goto success; /* If one event is limited to a specific counter, * and the other can go on both, OK. */ if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) && msk1 == (PIC_UPPER | PIC_LOWER)) { if (msk0 & PIC_LOWER) idx0 = 1; goto success; } if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && msk0 == (PIC_UPPER | PIC_LOWER)) { if (msk1 & PIC_UPPER) idx0 = 1; goto success; } /* If the events are fixed to different counters, OK. */ if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) || (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) { if (msk0 & PIC_LOWER) idx0 = 1; goto success; } /* Otherwise, there is a conflict. */ return -1; success: evts[0]->hw.idx = idx0; if (n_ev == 2) evts[1]->hw.idx = idx0 ^ 1; return 0; } static int check_excludes(struct perf_event **evts, int n_prev, int n_new) { int eu = 0, ek = 0, eh = 0; struct perf_event *event; int i, n, first; n = n_prev + n_new; if (n <= 1) return 0; first = 1; for (i = 0; i < n; i++) { event = evts[i]; if (first) { eu = event->attr.exclude_user; ek = event->attr.exclude_kernel; eh = event->attr.exclude_hv; first = 0; } else if (event->attr.exclude_user != eu || event->attr.exclude_kernel != ek || event->attr.exclude_hv != eh) { return -EAGAIN; } } return 0; } static int collect_events(struct perf_event *group, int max_count, struct perf_event *evts[], unsigned long *events, int *current_idx) { struct perf_event *event; int n = 0; if (!is_software_event(group)) { if (n >= max_count) return -1; evts[n] = group; events[n] = group->hw.event_base; current_idx[n++] = PIC_NO_INDEX; } list_for_each_entry(event, &group->sibling_list, group_entry) { if (!is_software_event(event) && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; evts[n] = event; events[n] = event->hw.event_base; current_idx[n++] = PIC_NO_INDEX; } } return n; } static int sparc_pmu_add(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int n0, ret = -EAGAIN; unsigned long flags; local_irq_save(flags); perf_pmu_disable(event->pmu); n0 = cpuc->n_events; if (n0 >= MAX_HWEVENTS) goto out; cpuc->event[n0] = event; cpuc->events[n0] = event->hw.event_base; cpuc->current_idx[n0] = PIC_NO_INDEX; event->hw.state = PERF_HES_UPTODATE; if (!(ef_flags & PERF_EF_START)) event->hw.state |= PERF_HES_STOPPED; /* * If group events scheduling transaction was started, * skip the schedulability test here, it will be performed * at commit time(->commit_txn) as a whole */ if (cpuc->group_flag & PERF_EVENT_TXN) goto nocheck; if (check_excludes(cpuc->event, n0, 1)) goto out; if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) goto out; nocheck: cpuc->n_events++; cpuc->n_added++; ret = 0; out: perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; } static int sparc_pmu_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct perf_event *evts[MAX_HWEVENTS]; struct hw_perf_event *hwc = &event->hw; unsigned long events[MAX_HWEVENTS]; int current_idx_dmy[MAX_HWEVENTS]; const struct perf_event_map *pmap; int n; if (atomic_read(&nmi_active) < 0) return -ENODEV; /* does not support taken branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; switch (attr->type) { case PERF_TYPE_HARDWARE: if (attr->config >= sparc_pmu->max_events) return -EINVAL; pmap = sparc_pmu->event_map(attr->config); break; case PERF_TYPE_HW_CACHE: pmap = sparc_map_cache_event(attr->config); if (IS_ERR(pmap)) return PTR_ERR(pmap); break; case PERF_TYPE_RAW: pmap = NULL; break; default: return -ENOENT; } if (pmap) { hwc->event_base = perf_event_encode(pmap); } else { /* * User gives us "(encoding << 16) | pic_mask" for * PERF_TYPE_RAW events. */ hwc->event_base = attr->config; } /* We save the enable bits in the config_base. */ hwc->config_base = sparc_pmu->irq_bit; if (!attr->exclude_user) hwc->config_base |= PCR_UTRACE; if (!attr->exclude_kernel) hwc->config_base |= PCR_STRACE; if (!attr->exclude_hv) hwc->config_base |= sparc_pmu->hv_bit; n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, MAX_HWEVENTS - 1, evts, events, current_idx_dmy); if (n < 0) return -EINVAL; } events[n] = hwc->event_base; evts[n] = event; if (check_excludes(evts, n, 1)) return -EINVAL; if (sparc_check_constraints(evts, events, n + 1)) return -EINVAL; hwc->idx = PIC_NO_INDEX; /* Try to do all error checking before this point, as unwinding * state after grabbing the PMC is difficult. */ perf_event_grab_pmc(); event->destroy = hw_perf_event_destroy; if (!hwc->sample_period) { hwc->sample_period = MAX_PERIOD; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } return 0; } /* * Start group events scheduling transaction * Set the flag to make pmu::enable() not perform the * schedulability test, it will be performed at commit time */ static void sparc_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); perf_pmu_disable(pmu); cpuhw->group_flag |= PERF_EVENT_TXN; } /* * Stop group events scheduling transaction * Clear the flag and pmu::enable() will perform the * schedulability test. */ static void sparc_pmu_cancel_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); cpuhw->group_flag &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); } /* * Commit group events scheduling transaction * Perform the group schedulability test as a whole * Return 0 if success */ static int sparc_pmu_commit_txn(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int n; if (!sparc_pmu) return -EINVAL; cpuc = &__get_cpu_var(cpu_hw_events); n = cpuc->n_events; if (check_excludes(cpuc->event, 0, n)) return -EINVAL; if (sparc_check_constraints(cpuc->event, cpuc->events, n)) return -EAGAIN; cpuc->group_flag &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); return 0; } static struct pmu pmu = { .pmu_enable = sparc_pmu_enable, .pmu_disable = sparc_pmu_disable, .event_init = sparc_pmu_event_init, .add = sparc_pmu_add, .del = sparc_pmu_del, .start = sparc_pmu_start, .stop = sparc_pmu_stop, .read = sparc_pmu_read, .start_txn = sparc_pmu_start_txn, .cancel_txn = sparc_pmu_cancel_txn, .commit_txn = sparc_pmu_commit_txn, }; void perf_event_print_debug(void) { unsigned long flags; u64 pcr, pic; int cpu; if (!sparc_pmu) return; local_irq_save(flags); cpu = smp_processor_id(); pcr = pcr_ops->read(); read_pic(pic); pr_info("\n"); pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", cpu, pcr, pic); local_irq_restore(flags); } static int __kprobes perf_event_nmi_handler(struct notifier_block *self, unsigned long cmd, void *__args) { struct die_args *args = __args; struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; int i; if (!atomic_read(&active_events)) return NOTIFY_DONE; switch (cmd) { case DIE_NMI: break; default: return NOTIFY_DONE; } regs = args->regs; perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); /* If the PMU has the TOE IRQ enable bits, we need to do a * dummy write to the %pcr to clear the overflow bits and thus * the interrupt. * * Do this before we peek at the counters to determine * overflow so we don't lose any events. */ if (sparc_pmu->irq_bit) pcr_ops->write(cpuc->pcr); for (i = 0; i < cpuc->n_events; i++) { struct perf_event *event = cpuc->event[i]; int idx = cpuc->current_idx[i]; struct hw_perf_event *hwc; u64 val; hwc = &event->hw; val = sparc_perf_event_update(event, hwc, idx); if (val & (1ULL << 31)) continue; data.period = event->hw.last_period; if (!sparc_perf_event_set_period(event, hwc, idx)) continue; if (perf_event_overflow(event, &data, regs)) sparc_pmu_stop(event, 0); } return NOTIFY_STOP; } static __read_mostly struct notifier_block perf_event_nmi_notifier = { .notifier_call = perf_event_nmi_handler, }; static bool __init supported_pmu(void) { if (!strcmp(sparc_pmu_type, "ultra3") || !strcmp(sparc_pmu_type, "ultra3+") || !strcmp(sparc_pmu_type, "ultra3i") || !strcmp(sparc_pmu_type, "ultra4+")) { sparc_pmu = &ultra3_pmu; return true; } if (!strcmp(sparc_pmu_type, "niagara")) { sparc_pmu = &niagara1_pmu; return true; } if (!strcmp(sparc_pmu_type, "niagara2") || !strcmp(sparc_pmu_type, "niagara3")) { sparc_pmu = &niagara2_pmu; return true; } return false; } int __init init_hw_perf_events(void) { pr_info("Performance events: "); if (!supported_pmu()) { pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); return 0; } pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); register_die_notifier(&perf_event_nmi_notifier); return 0; } early_initcall(init_hw_perf_events); void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long ksp, fp; #ifdef CONFIG_FUNCTION_GRAPH_TRACER int graph = 0; #endif stack_trace_flush(); perf_callchain_store(entry, regs->tpc); ksp = regs->u_regs[UREG_I6]; fp = ksp + STACK_BIAS; do { struct sparc_stackf *sf; struct pt_regs *regs; unsigned long pc; if (!kstack_valid(current_thread_info(), fp)) break; sf = (struct sparc_stackf *) fp; regs = (struct pt_regs *) (sf + 1); if (kstack_is_trap_frame(current_thread_info(), regs)) { if (user_mode(regs)) break; pc = regs->tpc; fp = regs->u_regs[UREG_I6] + STACK_BIAS; } else { pc = sf->callers_pc; fp = (unsigned long)sf->fp + STACK_BIAS; } perf_callchain_store(entry, pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((pc + 8UL) == (unsigned long) &return_to_handler) { int index = current->curr_ret_stack; if (current->ret_stack && index >= graph) { pc = current->ret_stack[index - graph].ret; perf_callchain_store(entry, pc); graph++; } } #endif } while (entry->nr < PERF_MAX_STACK_DEPTH); } static void perf_callchain_user_64(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long ufp; ufp = regs->u_regs[UREG_I6] + STACK_BIAS; do { struct sparc_stackf *usf, sf; unsigned long pc; usf = (struct sparc_stackf *) ufp; if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) break; pc = sf.callers_pc; ufp = (unsigned long)sf.fp + STACK_BIAS; perf_callchain_store(entry, pc); } while (entry->nr < PERF_MAX_STACK_DEPTH); } static void perf_callchain_user_32(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long ufp; ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; do { struct sparc_stackf32 *usf, sf; unsigned long pc; usf = (struct sparc_stackf32 *) ufp; if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) break; pc = sf.callers_pc; ufp = (unsigned long)sf.fp; perf_callchain_store(entry, pc); } while (entry->nr < PERF_MAX_STACK_DEPTH); } void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { perf_callchain_store(entry, regs->tpc); if (!current->mm) return; flushw_user(); if (test_thread_flag(TIF_32BIT)) perf_callchain_user_32(entry, regs); else perf_callchain_user_64(entry, regs); }
gpl-2.0
v-superuser/android_kernel_sony_msm8960
drivers/media/video/msm/imx072_reg.c
1764
3711
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "imx072.h" struct imx072_i2c_reg_conf imx072_prev_settings[] = { {0x0340, 0x03},/*frame_length*/ {0x0341, 0xF7},/*frame_length*/ {0x0342, 0x0A},/*line_length*/ {0x0343, 0xE0},/*line_length*/ {0x0344, 0x00},/*x_addr_start*/ {0x0345, 0x00},/*x_addr_start*/ {0x0346, 0x00},/*y_addr_start*/ {0x0347, 0x00},/*y_addr_start*/ {0x0348, 0x0A},/*x_addr_end*/ {0x0349, 0x2F},/*x_addr_end*/ {0x034A, 0x07},/*y_addr_end*/ {0x034B, 0xA7},/*y_addr_end*/ {0x034C, 0x05},/*x_out_size*/ {0x034D, 0x18},/*x_out_size*/ {0x034E, 0x03},/*y_out_size*/ {0x034F, 0xD4},/*y_out_size*/ {0x0381, 0x01},/*x_even_inc*/ {0x0383, 0x03},/*x_odd_inc*/ {0x0385, 0x01},/*y_even_inc*/ {0x0387, 0x03},/*y_odd_inc*/ {0x3016, 0x06},/*VMODEADD*/ {0x3017, 0x40}, {0x3069, 0x24}, {0x306A, 0x00}, {0x306B, 0xCB}, {0x306C, 0x07}, {0x30E8, 0x86}, {0x3304, 0x03}, {0x3305, 0x02}, {0x3306, 0x0A}, {0x3307, 0x02}, {0x3308, 0x11}, {0x3309, 0x04}, {0x330A, 0x05}, {0x330B, 0x04}, {0x330C, 0x05}, {0x330D, 0x04}, {0x330E, 0x01}, {0x3301, 0x80}, }; struct imx072_i2c_reg_conf imx072_snap_settings[] = { {0x0340, 0x07},/*frame_length*/ {0x0341, 0xEE},/*frame_length*/ {0x0342, 0x0A},/*line_length*/ {0x0343, 0xE0},/*line_length*/ {0x0344, 0x00},/*x_addr_start*/ {0x0345, 0x00},/*x_addr_start*/ {0x0346, 0x00},/*y_addr_start*/ {0x0347, 0x00},/*y_addr_start*/ {0x0348, 0x0A},/*x_addr_end*/ {0x0349, 0x2F},/*x_addr_end*/ {0x034A, 0x07},/*y_addr_end*/ {0x034B, 0xA7},/*y_addr_end*/ {0x034C, 0x0A},/*x_out_size*/ {0x034D, 0x30},/*x_out_size*/ {0x034E, 0x07},/*y_out_size*/ {0x034F, 0xA8},/*y_out_size*/ {0x0381, 0x01},/*x_even_inc*/ {0x0383, 0x01},/*x_odd_inc*/ {0x0385, 0x01},/*y_even_inc*/ {0x0387, 0x01},/*y_odd_inc*/ {0x3016, 0x06},/*VMODEADD*/ {0x3017, 0x40}, {0x3069, 0x24}, {0x306A, 0x00}, {0x306B, 0xCB}, {0x306C, 0x07}, {0x30E8, 0x06}, {0x3304, 0x05}, {0x3305, 0x04}, {0x3306, 0x15}, {0x3307, 0x02}, {0x3308, 0x11}, {0x3309, 0x07}, {0x330A, 0x05}, {0x330B, 0x04}, {0x330C, 0x05}, {0x330D, 0x04}, {0x330E, 0x01}, {0x3301, 0x00}, }; struct imx072_i2c_reg_conf imx072_recommend_settings[] = { {0x0307, 0x12}, {0x302B, 0x4B}, {0x0101, 0x03}, {0x300A, 0x80}, {0x3014, 0x08}, {0x3015, 0x37}, {0x3017, 0x40}, {0x301C, 0x01}, {0x3031, 0x28}, {0x3040, 0x00}, {0x3041, 0x60}, {0x3051, 0x24}, {0x3053, 0x34}, {0x3055, 0x3B}, {0x3057, 0xC0}, {0x3060, 0x30}, {0x3065, 0x00}, {0x30AA, 0x88}, {0x30AB, 0x1C}, {0x30B0, 0x32}, {0x30B2, 0x83}, {0x30D3, 0x04}, {0x310E, 0xDD}, {0x31A4, 0xD8}, {0x31A6, 0x17}, {0x31AC, 0xCF}, {0x31AE, 0xF1}, {0x31B4, 0xD8}, {0x31B6, 0x17}, {0x3304, 0x05}, {0x3305, 0x04}, {0x3306, 0x15}, {0x3307, 0x02}, {0x3308, 0x11}, {0x3309, 0x07}, {0x330A, 0x05}, {0x330B, 0x04}, {0x330C, 0x05}, {0x330D, 0x04}, {0x330E, 0x01}, {0x30d8, 0x20}, }; struct imx072_i2c_conf_array imx072_confs[] = { {&imx072_prev_settings[0], ARRAY_SIZE(imx072_prev_settings)}, {&imx072_snap_settings[0], ARRAY_SIZE(imx072_snap_settings)}, }; struct imx072_reg imx072_regs = { .rec_settings = &imx072_recommend_settings[0], .rec_size = ARRAY_SIZE(imx072_recommend_settings), .conf_array = &imx072_confs[0], };
gpl-2.0
Lolzen/kernel_mako
drivers/media/video/msm/ov9726_reg.c
1764
3234
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "ov9726.h" struct reg_struct_type ov9726_init_settings_array[] = { {0x0103, 0x01}, /* SOFTWARE_RESET */ {0x3026, 0x00}, /* OUTPUT_SELECT01 */ {0x3027, 0x00}, /* OUTPUT_SELECT02 */ {0x3002, 0xe8}, /* IO_CTRL00 */ {0x3004, 0x03}, /* IO_CTRL01 */ {0x3005, 0xff}, /* IO_CTRL02 */ {0x3703, 0x42}, {0x3704, 0x10}, {0x3705, 0x45}, {0x3603, 0xaa}, {0x3632, 0x2f}, {0x3620, 0x66}, {0x3621, 0xc0}, {0x0340, 0x03}, /* FRAME_LENGTH_LINES_HI */ {0x0341, 0xC1}, /* FRAME_LENGTH_LINES_LO */ {0x0342, 0x06}, /* LINE_LENGTH_PCK_HI */ {0x0343, 0x80}, /* LINE_LENGTH_PCK_LO */ {0x0202, 0x03}, /* COARSE_INTEGRATION_TIME_HI */ {0x0203, 0x43}, /* COARSE_INTEGRATION_TIME_LO */ {0x3833, 0x04}, {0x3835, 0x02}, {0x4702, 0x04}, {0x4704, 0x00}, /* DVP_CTRL01 */ {0x4706, 0x08}, {0x5052, 0x01}, {0x3819, 0x6e}, {0x3817, 0x94}, {0x3a18, 0x00}, /* AEC_GAIN_CEILING_HI */ {0x3a19, 0x7f}, /* AEC_GAIN_CEILING_LO */ {0x404e, 0x7e}, {0x3631, 0x52}, {0x3633, 0x50}, {0x3630, 0xd2}, {0x3604, 0x08}, {0x3601, 0x40}, {0x3602, 0x14}, {0x3610, 0xa0}, {0x3612, 0x20}, {0x034c, 0x05}, /* X_OUTPUT_SIZE_HI */ {0x034d, 0x10}, /* X_OUTPUT_SIZE_LO */ {0x034e, 0x03}, /* Y_OUTPUT_SIZE_HI */ {0x034f, 0x28}, /* Y_OUTPUT_SIZE_LO */ {0x0340, 0x03}, /* FRAME_LENGTH_LINES_HI */ {0x0341, 0xC1}, /* FRAME_LENGTH_LINES_LO */ {0x0342, 0x06}, /* LINE_LENGTH_PCK_HI */ {0x0343, 0x80}, /* LINE_LENGTH_PCK_LO */ {0x0202, 0x03}, /* COARSE_INTEGRATION_TIME_HI */ {0x0203, 0x43}, /* COARSE_INTEGRATION_TIME_LO */ {0x0303, 0x01}, /* VT_SYS_CLK_DIV_LO */ {0x3002, 0x00}, /* IO_CTRL00 */ {0x3004, 0x00}, /* IO_CTRL01 */ {0x3005, 0x00}, /* IO_CTRL02 */ {0x4801, 0x0f}, /* MIPI_CTRL01 */ {0x4803, 0x05}, /* MIPI_CTRL03 */ {0x4601, 0x16}, /* VFIFO_READ_CONTROL */ {0x3014, 0x05}, /* SC_CMMN_MIPI / SC_CTRL00 */ {0x3104, 0x80}, {0x0305, 0x04}, /* PRE_PLL_CLK_DIV_LO */ {0x0307, 0x64}, /* PLL_MULTIPLIER_LO */ {0x300c, 0x02}, {0x300d, 0x20}, {0x300e, 0x01}, {0x3010, 0x01}, {0x460e, 0x81}, /* VFIFO_CONTROL00 */ {0x0101, 0x01}, /* IMAGE_ORIENTATION */ {0x3707, 0x14}, {0x3622, 0x9f}, {0x5047, 0x3D}, /* ISP_CTRL47 */ {0x4002, 0x45}, /* BLC_CTRL02 */ {0x5000, 0x06}, /* ISP_CTRL0 */ {0x5001, 0x00}, /* ISP_CTRL1 */ {0x3406, 0x00}, /* AWB_MANUAL_CTRL */ {0x3503, 0x13}, /* AEC_ENABLE */ {0x4005, 0x18}, /* BLC_CTRL05 */ {0x4837, 0x21}, {0x0100, 0x01}, /* MODE_SELECT */ {0x3a0f, 0x64}, /* AEC_CTRL0F */ {0x3a10, 0x54}, /* AEC_CTRL10 */ {0x3a11, 0xc2}, /* AEC_CTRL11 */ {0x3a1b, 0x64}, /* AEC_CTRL1B */ {0x3a1e, 0x54}, /* AEC_CTRL1E */ {0x3a1a, 0x05}, /* AEC_DIFF_MAX */ }; int32_t ov9726_array_length = sizeof(ov9726_init_settings_array) / sizeof(ov9726_init_settings_array[0]);
gpl-2.0
rickyzhang82/dragon-linux
drivers/gpu/drm/radeon/uvd_v4_2.c
2020
2271
/* * Copyright 2013 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Christian König <christian.koenig@amd.com> */ #include <linux/firmware.h> #include <drm/drmP.h> #include "radeon.h" #include "radeon_asic.h" #include "cikd.h" /** * uvd_v4_2_resume - memory controller programming * * @rdev: radeon_device pointer * * Let the UVD memory controller know it's offsets */ int uvd_v4_2_resume(struct radeon_device *rdev) { uint64_t addr; uint32_t size; /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; WREG32(UVD_VCPU_CACHE_OFFSET0, addr); WREG32(UVD_VCPU_CACHE_SIZE0, size); addr += size; size = RADEON_UVD_STACK_SIZE >> 3; WREG32(UVD_VCPU_CACHE_OFFSET1, addr); WREG32(UVD_VCPU_CACHE_SIZE1, size); addr += size; size = RADEON_UVD_HEAP_SIZE >> 3; WREG32(UVD_VCPU_CACHE_OFFSET2, addr); WREG32(UVD_VCPU_CACHE_SIZE2, size); /* bits 28-31 */ addr = (rdev->uvd.gpu_addr >> 28) & 0xF; WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); /* bits 32-39 */ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); return 0; }
gpl-2.0
sakindia123/android_kernel_htc_pico
drivers/scsi/atari_NCR5380.c
2532
92653
/* * NCR 5380 generic driver routines. These should make it *trivial* * to implement 5380 SCSI drivers under Linux with a non-trantor * architecture. * * Note that these routines also work with NR53c400 family chips. * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 666-5836 * * DISTRIBUTION RELEASE 6. * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * ++roman: To port the 5380 driver to the Atari, I had to do some changes in * this file, too: * * - Some of the debug statements were incorrect (undefined variables and the * like). I fixed that. * * - In information_transfer(), I think a #ifdef was wrong. Looking at the * possible DMA transfer size should also happen for REAL_DMA. I added this * in the #if statement. * * - When using real DMA, information_transfer() should return in a DATAOUT * phase after starting the DMA. It has nothing more to do. * * - The interrupt service routine should run main after end of DMA, too (not * only after RESELECTION interrupts). Additionally, it should _not_ test * for more interrupts after running main, since a DMA process may have * been started and interrupts are turned on now. The new int could happen * inside the execution of NCR5380_intr(), leading to recursive * calls. * * - I've added a function merge_contiguous_buffers() that tries to * merge scatter-gather buffers that are located at contiguous * physical addresses and can be processed with the same DMA setup. * Since most scatter-gather operations work on a page (4K) of * 4 buffers (1K), in more than 90% of all cases three interrupts and * DMA setup actions are saved. * * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA * and USLEEP, because these were messing up readability and will never be * needed for Atari SCSI. * * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' * stuff), and 'main' is executed in a bottom half if awoken by an * interrupt. * * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..." * constructs. In my eyes, this made the source rather unreadable, so I * finally replaced that by the *_PRINTK() macros. * */ /* * Further development / testing that should be done : * 1. Test linked command handling code after Eric is ready with * the high level code. */ #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> #if (NDEBUG & NDEBUG_LISTS) #define LIST(x, y) \ do { \ printk("LINE:%d Adding %p to %p\n", \ __LINE__, (void*)(x), (void*)(y)); \ if ((x) == (y)) \ udelay(5); \ } while (0) #define REMOVE(w, x, y, z) \ do { \ printk("LINE:%d Removing: %p->%p %p->%p \n", \ __LINE__, (void*)(w), (void*)(x), \ (void*)(y), (void*)(z)); \ if ((x) == (y)) \ udelay(5); \ } while (0) #else #define LIST(x,y) #define REMOVE(w,x,y,z) #endif #ifndef notyet #undef LINKED #endif /* * Design * Issues : * * The other Linux SCSI drivers were written when Linux was Intel PC-only, * and specifically for each board rather than each chip. This makes their * adaptation to platforms like the Mac (Some of which use NCR5380's) * more difficult than it has to be. * * Also, many of the SCSI drivers were written before the command queuing * routines were implemented, meaning their implementations of queued * commands were hacked on rather than designed in from the start. * * When I designed the Linux SCSI drivers I figured that * while having two different SCSI boards in a system might be useful * for debugging things, two of the same type wouldn't be used. * Well, I was wrong and a number of users have mailed me about running * multiple high-performance SCSI boards in a server. * * Finally, when I get questions from users, I have no idea what * revision of my driver they are running. * * This driver attempts to address these problems : * This is a generic 5380 driver. To use it on a different platform, * one simply writes appropriate system specific macros (ie, data * transfer - some PC's will use the I/O bus, 68K's must use * memory mapped) and drops this file in their 'C' wrapper. * * As far as command queueing, two queues are maintained for * each 5380 in the system - commands that haven't been issued yet, * and commands that are currently executing. This means that an * unlimited number of commands may be queued, letting * more commands propagate from the higher driver levels giving higher * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, * allowing multiple commands to propagate all the way to a SCSI-II device * while a command is already executing. * * To solve the multiple-boards-in-the-same-system problem, * there is a separate instance structure for each instance * of a 5380 in the system. So, multiple NCR5380 drivers will * be able to coexist with appropriate changes to the high level * SCSI code. * * A NCR5380_PUBLIC_REVISION macro is provided, with the release * number (updated for each public release) printed by the * NCR5380_print_options command, which should be called from the * wrapper detect function, so that I know what release of the driver * users are using. * * Issues specific to the NCR5380 : * * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead * piece of hardware that requires you to sit in a loop polling for * the REQ signal as long as you are connected. Some devices are * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect * while doing long seek operations. * * The workaround for this is to keep track of devices that have * disconnected. If the device hasn't disconnected, for commands that * should disconnect, we do something like * * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } * * Some tweaking of N and M needs to be done. An algorithm based * on "time to data" would give the best results as long as short time * to datas (ie, on the same track) were considered, however these * broken devices are the exception rather than the rule and I'd rather * spend my time optimizing for the normal case. * * Architecture : * * At the heart of the design is a coroutine, NCR5380_main, * which is started when not running by the interrupt handler, * timer, and queue command function. It attempts to establish * I_T_L or I_T_L_Q nexuses by removing the commands from the * issue queue and calling NCR5380_select() if a nexus * is not established. * * Once a nexus is established, the NCR5380_information_transfer() * phase goes through the various phases as instructed by the target. * if the target goes into MSG IN and sends a DISCONNECT message, * the command structure is placed into the per instance disconnected * queue, and NCR5380_main tries to find more work. If USLEEP * was defined, and the target is idle for too long, the system * will try to sleep. * * If a command has disconnected, eventually an interrupt will trigger, * calling NCR5380_intr() which will in turn call NCR5380_reselect * to reestablish a nexus. This will run main if necessary. * * On command termination, the done function will be called as * appropriate. * * SCSI pointers are maintained in the SCp field of SCSI command * structures, being initialized after the command is connected * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. * Note that in violation of the standard, an implicit SAVE POINTERS operation * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. */ /* * Using this file : * This file a skeleton Linux SCSI driver for the NCR 5380 series * of chips. To use it, you write an architecture specific functions * and macros and include this file in your driver. * * These macros control options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * * LINKED - if defined, linked commands are supported. * * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. * * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible * * These macros MUST be defined : * * NCR5380_read(register) - read from the specified register * * NCR5380_write(register, value) - write to the specific register * * Either real DMA *or* pseudo DMA may be implemented * REAL functions : * NCR5380_REAL_DMA should be defined if real DMA is to be used. * Note that the DMA setup functions should return the number of bytes * that they were able to program the controller for. * * Also note that generic i386/PC versions of these macros are * available as NCR5380_i386_dma_write_setup, * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. * * NCR5380_dma_write_setup(instance, src, count) - initialize * NCR5380_dma_read_setup(instance, dst, count) - initialize * NCR5380_dma_residual(instance); - residual count * * PSEUDO functions : * NCR5380_pwrite(instance, src, count) * NCR5380_pread(instance, dst, count); * * If nothing specific to this implementation needs doing (ie, with external * hardware), you must also define * * NCR5380_queue_command * NCR5380_reset * NCR5380_abort * NCR5380_proc_info * * to be the global entry points into the specific driver, ie * #define NCR5380_queue_command t128_queue_command. * * If this is not done, the routines will be defined as static functions * with the NCR5380* names and the user must provide a globally * accessible wrapper function. * * The generic driver is initialized by calling NCR5380_init(instance), * after setting the appropriate host specific fields and ID. If the * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, * possible) function may be used. Before the specific driver initialization * code finishes, NCR5380_print_options should be called. */ static struct Scsi_Host *first_instance = NULL; static struct scsi_host_template *the_template = NULL; /* Macros ease life... :-) */ #define SETUP_HOSTDATA(in) \ struct NCR5380_hostdata *hostdata = \ (struct NCR5380_hostdata *)(in)->hostdata #define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) #define NEXT(cmd) ((Scsi_Cmnd *)(cmd)->host_scribble) #define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next)) #define NEXTADDR(cmd) ((Scsi_Cmnd **)&(cmd)->host_scribble) #define HOSTNO instance->host_no #define H_NO(cmd) (cmd)->device->host->host_no #ifdef SUPPORT_TAGS /* * Functions for handling tagged queuing * ===================================== * * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: * * Using consecutive numbers for the tags is no good idea in my eyes. There * could be wrong re-usings if the counter (8 bit!) wraps and some early * command has been preempted for a long time. My solution: a bitfield for * remembering used tags. * * There's also the problem that each target has a certain queue size, but we * cannot know it in advance :-( We just see a QUEUE_FULL status being * returned. So, in this case, the driver internal queue size assumption is * reduced to the number of active tags if QUEUE_FULL is returned by the * target. The command is returned to the mid-level, but with status changed * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL * correctly. * * We're also not allowed running tagged commands as long as an untagged * command is active. And REQUEST SENSE commands after a contingent allegiance * condition _must_ be untagged. To keep track whether an untagged command has * been issued, the host->busy array is still employed, as it is without * support for tagged queuing. * * One could suspect that there are possible race conditions between * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), * which already guaranteed to be running at most once. It is also the only * place where tags/LUNs are allocated. So no other allocation can slip * between that pair, there could only happen a reselection, which can free a * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes * important: the tag bit must be cleared before 'nr_allocated' is decreased. */ /* -1 for TAG_NONE is not possible with unsigned char cmd->tag */ #undef TAG_NONE #define TAG_NONE 0xff typedef struct { DECLARE_BITMAP(allocated, MAX_TAGS); int nr_allocated; int queue_size; } TAG_ALLOC; static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */ static void __init init_tags(void) { int target, lun; TAG_ALLOC *ta; if (!setup_use_tagged_queuing) return; for (target = 0; target < 8; ++target) { for (lun = 0; lun < 8; ++lun) { ta = &TagAlloc[target][lun]; bitmap_zero(ta->allocated, MAX_TAGS); ta->nr_allocated = 0; /* At the beginning, assume the maximum queue size we could * support (MAX_TAGS). This value will be decreased if the target * returns QUEUE_FULL status. */ ta->queue_size = MAX_TAGS; } } } /* Check if we can issue a command to this LUN: First see if the LUN is marked * busy by an untagged command. If the command should use tagged queuing, also * check that there is a free tag and the target's queue won't overflow. This * function should be called with interrupts disabled to avoid race * conditions. */ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) { SETUP_HOSTDATA(cmd->device->host); if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)) return 1; if (!should_be_tagged || !setup_use_tagged_queuing || !cmd->device->tagged_supported) return 0; if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= TagAlloc[cmd->device->id][cmd->device->lun].queue_size) { TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, cmd->device->lun); return 1; } return 0; } /* Allocate a tag for a command (there are no checks anymore, check_lun_busy() * must be called before!), or reserve the LUN in 'busy' if the command is * untagged. */ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) { SETUP_HOSTDATA(cmd->device->host); /* If we or the target don't support tagged queuing, allocate the LUN for * an untagged command. */ if (!should_be_tagged || !setup_use_tagged_queuing || !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun); } else { TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); set_bit(cmd->tag, ta->allocated); ta->nr_allocated++; TAG_PRINTK("scsi%d: using tag %d for target %d lun %d " "(now %d tags in use)\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, ta->nr_allocated); } } /* Mark the tag of command 'cmd' as free, or in case of an untagged command, * unlock the LUN. */ static void cmd_free_tag(Scsi_Cmnd *cmd) { SETUP_HOSTDATA(cmd->device->host); if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n", H_NO(cmd), cmd->device->id, cmd->device->lun); } else if (cmd->tag >= MAX_TAGS) { printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", H_NO(cmd), cmd->tag); } else { TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; clear_bit(cmd->tag, ta->allocated); ta->nr_allocated--; TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun); } } static void free_all_tags(void) { int target, lun; TAG_ALLOC *ta; if (!setup_use_tagged_queuing) return; for (target = 0; target < 8; ++target) { for (lun = 0; lun < 8; ++lun) { ta = &TagAlloc[target][lun]; bitmap_zero(ta->allocated, MAX_TAGS); ta->nr_allocated = 0; } } } #endif /* SUPPORT_TAGS */ /* * Function: void merge_contiguous_buffers( Scsi_Cmnd *cmd ) * * Purpose: Try to merge several scatter-gather requests into one DMA * transfer. This is possible if the scatter buffers lie on * physical contiguous addresses. * * Parameters: Scsi_Cmnd *cmd * The command to work on. The first scatter buffer's data are * assumed to be already transferred into ptr/this_residual. */ static void merge_contiguous_buffers(Scsi_Cmnd *cmd) { unsigned long endaddr; #if (NDEBUG & NDEBUG_MERGING) unsigned long oldlen = cmd->SCp.this_residual; int cnt = 1; #endif for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; cmd->SCp.buffers_residual && virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { MER_PRINTK("VTOP(%p) == %08lx -> merging\n", page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); #if (NDEBUG & NDEBUG_MERGING) ++cnt; #endif ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual += cmd->SCp.buffer->length; endaddr += cmd->SCp.buffer->length; } #if (NDEBUG & NDEBUG_MERGING) if (oldlen != cmd->SCp.this_residual) MER_PRINTK("merged %d buffers from %p, new length %08x\n", cnt, cmd->SCp.ptr, cmd->SCp.this_residual); #endif } /* * Function : void initialize_SCp(Scsi_Cmnd *cmd) * * Purpose : initialize the saved data pointers for cmd to point to the * start of the buffer. * * Inputs : cmd - Scsi_Cmnd structure to have pointers reset. */ static inline void initialize_SCp(Scsi_Cmnd *cmd) { /* * Initialize the Scsi Pointer field so that all of the commands in the * various queues are valid. */ if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; /* ++roman: Try to merge some scatter-buffers if they are at * contiguous physical addresses. */ merge_contiguous_buffers(cmd); } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } } #include <linux/delay.h> #if NDEBUG static struct { unsigned char mask; const char *name; } signals[] = { { SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, { SR_SEL, "SEL" }, {0, NULL} }, basrs[] = { {BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL} }, icrs[] = { {ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, {0, NULL} }, mrs[] = { {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, {MR_MONITOR_BSY, "MODE MONITOR BSY"}, {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, {0, NULL} }; /* * Function : void NCR5380_print(struct Scsi_Host *instance) * * Purpose : print the SCSI bus signals for debugging purposes * * Input : instance - which NCR5380 */ static void NCR5380_print(struct Scsi_Host *instance) { unsigned char status, data, basr, mr, icr, i; unsigned long flags; local_irq_save(flags); data = NCR5380_read(CURRENT_SCSI_DATA_REG); status = NCR5380_read(STATUS_REG); mr = NCR5380_read(MODE_REG); icr = NCR5380_read(INITIATOR_COMMAND_REG); basr = NCR5380_read(BUS_AND_STATUS_REG); local_irq_restore(flags); printk("STATUS_REG: %02x ", status); for (i = 0; signals[i].mask; ++i) if (status & signals[i].mask) printk(",%s", signals[i].name); printk("\nBASR: %02x ", basr); for (i = 0; basrs[i].mask; ++i) if (basr & basrs[i].mask) printk(",%s", basrs[i].name); printk("\nICR: %02x ", icr); for (i = 0; icrs[i].mask; ++i) if (icr & icrs[i].mask) printk(",%s", icrs[i].name); printk("\nMODE: %02x ", mr); for (i = 0; mrs[i].mask; ++i) if (mr & mrs[i].mask) printk(",%s", mrs[i].name); printk("\n"); } static struct { unsigned char value; const char *name; } phases[] = { {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, {PHASE_UNKNOWN, "UNKNOWN"} }; /* * Function : void NCR5380_print_phase(struct Scsi_Host *instance) * * Purpose : print the current SCSI phase for debugging purposes * * Input : instance - which NCR5380 */ static void NCR5380_print_phase(struct Scsi_Host *instance) { unsigned char status; int i; status = NCR5380_read(STATUS_REG); if (!(status & SR_REQ)) printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); else { for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i) ; printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); } } #else /* !NDEBUG */ /* dummies... */ static inline void NCR5380_print(struct Scsi_Host *instance) { }; static inline void NCR5380_print_phase(struct Scsi_Host *instance) { }; #endif /* * ++roman: New scheme of calling NCR5380_main() * * If we're not in an interrupt, we can call our main directly, it cannot be * already running. Else, we queue it on a task queue, if not 'main_running' * tells us that a lower level is already executing it. This way, * 'main_running' needs not be protected in a special way. * * queue_main() is a utility function for putting our main onto the task * queue, if main_running is false. It should be called only from a * interrupt or bottom half. */ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/interrupt.h> static volatile int main_running; static DECLARE_WORK(NCR5380_tqueue, NCR5380_main); static inline void queue_main(void) { if (!main_running) { /* If in interrupt and NCR5380_main() not already running, queue it on the 'immediate' task queue, to be processed immediately after the current interrupt processing has finished. */ schedule_work(&NCR5380_tqueue); } /* else: nothing to do: the running NCR5380_main() will pick up any newly queued command. */ } static inline void NCR5380_all_init(void) { static int done = 0; if (!done) { INI_PRINTK("scsi : NCR5380_all_init()\n"); done = 1; } } /* * Function : void NCR58380_print_options (struct Scsi_Host *instance) * * Purpose : called by probe code indicating the NCR5380 driver * options that were selected. * * Inputs : instance, pointer to this instance. Unused. */ static void __init NCR5380_print_options(struct Scsi_Host *instance) { printk(" generic options" #ifdef AUTOSENSE " AUTOSENSE" #endif #ifdef REAL_DMA " REAL DMA" #endif #ifdef PARITY " PARITY" #endif #ifdef SUPPORT_TAGS " SCSI-2 TAGGED QUEUING" #endif ); printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); } /* * Function : void NCR5380_print_status (struct Scsi_Host *instance) * * Purpose : print commands in the various queues, called from * NCR5380_abort and NCR5380_debug to aid debugging. * * Inputs : instance, pointer to this instance. */ static void NCR5380_print_status(struct Scsi_Host *instance) { char *pr_bfr; char *start; int len; NCR_PRINT(NDEBUG_ANY); NCR_PRINT_PHASE(NDEBUG_ANY); pr_bfr = (char *)__get_free_page(GFP_ATOMIC); if (!pr_bfr) { printk("NCR5380_print_status: no memory for print buffer\n"); return; } len = NCR5380_proc_info(instance, pr_bfr, &start, 0, PAGE_SIZE, 0); pr_bfr[len] = 0; printk("\n%s\n", pr_bfr); free_page((unsigned long)pr_bfr); } /******************************************/ /* * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED] * * *buffer: I/O buffer * **start: if inout == FALSE pointer into buffer where user read should start * offset: current offset * length: length of buffer * hostno: Scsi_Host host_no * inout: TRUE - user is writing; FALSE - user is reading * * Return the number of bytes read from or written */ #undef SPRINTF #define SPRINTF(fmt,args...) \ do { \ if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \ pos += sprintf(pos, fmt , ## args); \ } while(0) static char *lprint_Scsi_Cmnd(Scsi_Cmnd *cmd, char *pos, char *buffer, int length); static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset, int length, int inout) { char *pos = buffer; struct NCR5380_hostdata *hostdata; Scsi_Cmnd *ptr; unsigned long flags; off_t begin = 0; #define check_offset() \ do { \ if (pos - buffer < offset - begin) { \ begin += pos - buffer; \ pos = buffer; \ } \ } while (0) hostdata = (struct NCR5380_hostdata *)instance->hostdata; if (inout) /* Has data been written to the file ? */ return -ENOSYS; /* Currently this is a no-op */ SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); check_offset(); local_irq_save(flags); SPRINTF("NCR5380: coroutine is%s running.\n", main_running ? "" : "n't"); check_offset(); if (!hostdata->connected) SPRINTF("scsi%d: no currently connected command\n", HOSTNO); else pos = lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, pos, buffer, length); SPRINTF("scsi%d: issue_queue\n", HOSTNO); check_offset(); for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) { pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length); check_offset(); } SPRINTF("scsi%d: disconnected_queue\n", HOSTNO); check_offset(); for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = NEXT(ptr)) { pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length); check_offset(); } local_irq_restore(flags); *start = buffer + (offset - begin); if (pos - buffer < offset - begin) return 0; else if (pos - buffer - (offset - begin) < length) return pos - buffer - (offset - begin); return length; } static char *lprint_Scsi_Cmnd(Scsi_Cmnd *cmd, char *pos, char *buffer, int length) { int i, s; unsigned char *command; SPRINTF("scsi%d: destination target %d, lun %d\n", H_NO(cmd), cmd->device->id, cmd->device->lun); SPRINTF(" command = "); command = cmd->cmnd; SPRINTF("%2d (0x%02x)", command[0], command[0]); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) SPRINTF(" %02x", command[i]); SPRINTF("\n"); return pos; } /* * Function : void NCR5380_init (struct Scsi_Host *instance) * * Purpose : initializes *instance and corresponding 5380 chip. * * Inputs : instance - instantiation of the 5380 driver. * * Notes : I assume that the host, hostno, and id bits have been * set correctly. I don't care about the irq and other fields. * */ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) { int i; SETUP_HOSTDATA(instance); NCR5380_all_init(); hostdata->aborted = 0; hostdata->id_mask = 1 << instance->this_id; hostdata->id_higher_mask = 0; for (i = hostdata->id_mask; i <= 0x80; i <<= 1) if (i > hostdata->id_mask) hostdata->id_higher_mask |= i; for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef SUPPORT_TAGS init_tags(); #endif #if defined (REAL_DMA) hostdata->dma_len = 0; #endif hostdata->targets_present = 0; hostdata->connected = NULL; hostdata->issue_queue = NULL; hostdata->disconnected_queue = NULL; hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; if (!the_template) { the_template = instance->hostt; first_instance = instance; } #ifndef AUTOSENSE if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1)) printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n" " without AUTOSENSE option, contingent allegiance conditions may\n" " be incorrectly cleared.\n", HOSTNO); #endif /* def AUTOSENSE */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); return 0; } /* * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd, * void (*done)(Scsi_Cmnd *)) * * Purpose : enqueues a SCSI command * * Inputs : cmd - SCSI command, done - function called on completion, with * a pointer to the command descriptor. * * Returns : 0 * * Side effects : * cmd is added to the per instance issue_queue, with minor * twiddling done to the host specific fields of cmd. If the * main coroutine is not running, it is restarted. * */ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) { SETUP_HOSTDATA(cmd->device->host); Scsi_Cmnd *tmp; int oldto; unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", H_NO(cmd)); cmd->result = (DID_ERROR << 16); done(cmd); return 0; } #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ #ifdef NCR5380_STATS # if 0 if (!hostdata->connected && !hostdata->issue_queue && !hostdata->disconnected_queue) { hostdata->timebase = jiffies; } # endif # ifdef NCR5380_STAT_LIMIT if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) # endif switch (cmd->cmnd[0]) { case WRITE: case WRITE_6: case WRITE_10: hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd); hostdata->pendingw++; break; case READ: case READ_6: case READ_10: hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd); hostdata->pendingr++; break; } #endif /* * We use the host_scribble field as a pointer to the next command * in a queue */ SET_NEXT(cmd, NULL); cmd->scsi_done = done; cmd->result = 0; /* * Insert the cmd into the issue queue. Note that REQUEST SENSE * commands are added to the head of the queue since any command will * clear the contingent allegiance condition that exists and the * sense data is only guaranteed to be valid while the condition exists. */ local_irq_save(flags); /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. * Otherwise a running NCR5380_main may steal the lock. * Lock before actually inserting due to fairness reasons explained in * atari_scsi.c. If we insert first, then it's impossible for this driver * to release the lock. * Stop timer for this command while waiting for the lock, or timeouts * may happen (and they really do), and it's no good if the command doesn't * appear in any of the queues. * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which would * alter queues and touch the lock. */ if (!IS_A_TT()) { /* perhaps stop command timer here */ falcon_get_lock(); /* perhaps restart command timer here */ } if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { LIST(cmd, hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = cmd; } else { for (tmp = (Scsi_Cmnd *)hostdata->issue_queue; NEXT(tmp); tmp = NEXT(tmp)) ; LIST(cmd, tmp); SET_NEXT(tmp, cmd); } local_irq_restore(flags); QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* If queue_command() is called from an interrupt (real one or bottom * half), we let queue_main() do the job of taking care about main. If it * is already running, this is a no-op, else main will be queued. * * If we're not in an interrupt, we can call NCR5380_main() * unconditionally, because it cannot be already running. */ if (in_interrupt() || ((flags >> 8) & 7) >= 6) queue_main(); else NCR5380_main(NULL); return 0; } static DEF_SCSI_QCMD(NCR5380_queue_command) /* * Function : NCR5380_main (void) * * Purpose : NCR5380_main is a coroutine that runs as long as more work can * be done on the NCR5380 host adapters in a system. Both * NCR5380_queue_command() and NCR5380_intr() will try to start it * in case it is not running. * * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should * reenable them. This prevents reentrancy and kernel stack overflow. */ static void NCR5380_main(struct work_struct *work) { Scsi_Cmnd *tmp, *prev; struct Scsi_Host *instance = first_instance; struct NCR5380_hostdata *hostdata = HOSTDATA(instance); int done; unsigned long flags; /* * We run (with interrupts disabled) until we're sure that none of * the host adapters have anything that can be done, at which point * we set main_running to 0 and exit. * * Interrupts are enabled before doing various other internal * instructions, after we've decided that we need to run through * the loop again. * * this should prevent any race conditions. * * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which can * alter queues and touch the Falcon lock. */ /* Tell int handlers main() is now already executing. Note that no races are possible here. If an int comes in before 'main_running' is set here, and queues/executes main via the task queue, it doesn't do any harm, just this instance of main won't find any work left to do. */ if (main_running) return; main_running = 1; local_save_flags(flags); do { local_irq_disable(); /* Freeze request queues */ done = 1; if (!hostdata->connected) { MAIN_PRINTK("scsi%d: not connected\n", HOSTNO); /* * Search through the issue_queue for a command destined * for a target that's not busy. */ #if (NDEBUG & NDEBUG_LISTS) for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) ; /*printk("%p ", tmp);*/ if ((tmp == prev) && tmp) printk(" LOOP\n"); /* else printk("\n"); */ #endif for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { #if (NDEBUG & NDEBUG_LISTS) if (prev != tmp) printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); #endif /* When we find one, remove it from the issue queue. */ /* ++guenther: possible race with Falcon locking */ if ( #ifdef SUPPORT_TAGS !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) #else !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun)) #endif ) { /* ++guenther: just to be sure, this must be atomic */ local_irq_disable(); if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); hostdata->issue_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); falcon_dont_release++; /* reenable interrupts after finding one */ local_irq_restore(flags); /* * Attempt to establish an I_T_L nexus here. * On success, instance->hostdata->connected is set. * On failure, we must add the command back to the * issue queue so we can keep trying. */ MAIN_PRINTK("scsi%d: main(): command for target %d " "lun %d removed from issue_queue\n", HOSTNO, tmp->device->id, tmp->device->lun); /* * REQUEST SENSE commands are issued without tagged * queueing, even on SCSI-II devices because the * contingent allegiance condition exists for the * entire unit. */ /* ++roman: ...and the standard also requires that * REQUEST SENSE command are untagged. */ #ifdef SUPPORT_TAGS cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE); #endif if (!NCR5380_select(instance, tmp, (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) { falcon_dont_release--; /* release if target did not response! */ falcon_release_lock_if_possible(hostdata); break; } else { local_irq_disable(); LIST(tmp, hostdata->issue_queue); SET_NEXT(tmp, hostdata->issue_queue); hostdata->issue_queue = tmp; #ifdef SUPPORT_TAGS cmd_free_tag(tmp); #endif falcon_dont_release--; local_irq_restore(flags); MAIN_PRINTK("scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); if (hostdata->connected) break; } } /* if target/lun/target queue is not busy */ } /* for issue_queue */ } /* if (!hostdata->connected) */ if (hostdata->connected #ifdef REAL_DMA && !hostdata->dma_len #endif ) { local_irq_restore(flags); MAIN_PRINTK("scsi%d: main: performing information transfer\n", HOSTNO); NCR5380_information_transfer(instance); MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); done = 0; } } while (!done); /* Better allow ints _after_ 'main_running' has been cleared, else an interrupt could believe we'll pick up the work it left for us, but we won't see it anymore here... */ main_running = 0; local_irq_restore(flags); } #ifdef REAL_DMA /* * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) * * Purpose : Called by interrupt handler when DMA finishes or a phase * mismatch occurs (which would finish the DMA transfer). * * Inputs : instance - this instance of the NCR5380. * */ static void NCR5380_dma_complete(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); int transfered, saved_data = 0, overrun = 0, cnt, toPIO; unsigned char **data, p; volatile int *count; if (!hostdata->connected) { printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " "no connected cmd\n", HOSTNO); return; } if (atari_read_overruns) { p = hostdata->connected->SCp.phase; if (p & SR_IO) { udelay(10); if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH|BASR_ACK)) == (BASR_PHASE_MATCH|BASR_ACK)) { saved_data = NCR5380_read(INPUT_DATA_REG); overrun = 1; DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO); } } } DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); transfered = hostdata->dma_len - NCR5380_dma_residual(instance); hostdata->dma_len = 0; data = (unsigned char **)&hostdata->connected->SCp.ptr; count = &hostdata->connected->SCp.this_residual; *data += transfered; *count -= transfered; if (atari_read_overruns) { if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { cnt = toPIO = atari_read_overruns; if (overrun) { DMA_PRINTK("Got an input overrun, using saved byte\n"); *(*data)++ = saved_data; (*count)--; cnt--; toPIO--; } DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); NCR5380_transfer_pio(instance, &p, &cnt, data); *count -= toPIO - cnt; } } } #endif /* REAL_DMA */ /* * Function : void NCR5380_intr (int irq) * * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses * from the disconnected queue, and restarting NCR5380_main() * as required. * * Inputs : int irq, irq that caused this interrupt. * */ static irqreturn_t NCR5380_intr(int irq, void *dev_id) { struct Scsi_Host *instance = first_instance; int done = 1, handled = 0; unsigned char basr; INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { NCR_PRINT(NDEBUG_INTR); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; ENABLE_IRQ(); INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { /* * The rest of the interrupt conditions can occur only during a * DMA transfer */ #if defined(REAL_DMA) /* * We should only get PHASE MISMATCH and EOP interrupts if we have * DMA enabled, so do a sanity check based on the current setting * of the MODE register. */ if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; ENABLE_IRQ(); } else #endif /* REAL_DMA */ { /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ if (basr & BASR_PHASE_MATCH) printk(KERN_NOTICE "scsi%d: unknown interrupt, " "BASR 0x%x, MR 0x%x, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } } /* if !(SELECTION || PARITY) */ handled = 1; } /* BASR & IRQ */ else { printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } if (!done) { INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ queue_main(); } return IRQ_RETVAL(handled); } #ifdef NCR5380_STATS static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd) { # ifdef NCR5380_STAT_LIMIT if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) # endif switch (cmd->cmnd[0]) { case WRITE: case WRITE_6: case WRITE_10: hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/ hostdata->pendingw--; break; case READ: case READ_6: case READ_10: hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/ hostdata->pendingr--; break; } } #endif /* * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, * int tag); * * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, * including ARBITRATION, SELECTION, and initial message out for * IDENTIFY and queue messages. * * Inputs : instance - instantiation of the 5380 driver on which this * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for * the command that is presently connected. * * Returns : -1 if selection could not execute for some reason, * 0 if selection succeeded or failed because the target * did not respond. * * Side effects : * If bus busy, arbitration failed, etc, NCR5380_select() will exit * with registers as they should have been on entry - ie * SELECT_ENABLE will be set appropriately, the NCR5380 * will cease to drive any SCSI bus signals. * * If successful : I_T_L or I_T_L_Q nexus will be established, * instance->connected will be set to cmd. * SELECT interrupt will be disabled. * * If failed (no target) : cmd->scsi_done() will be called, and the * cmd->result host byte set to DID_BAD_TARGET. */ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) { SETUP_HOSTDATA(instance); unsigned char tmp[3], phase; unsigned char *data; int len; unsigned long timeout; unsigned long flags; hostdata->restart_select = 0; NCR_PRINT(NDEBUG_ARBITRATION); ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, instance->this_id); /* * Set the phase bits to 0, otherwise the NCR5380 won't drive the * data bus during SELECTION. */ local_irq_save(flags); if (hostdata->connected) { local_irq_restore(flags); return -1; } NCR5380_write(TARGET_COMMAND_REG, 0); /* * Start arbitration. */ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(MODE_REG, MR_ARBITRATE); local_irq_restore(flags); /* Wait for arbitration logic to complete */ #if defined(NCR_TIMEOUT) { unsigned long timeout = jiffies + 2*NCR_TIMEOUT; while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && time_before(jiffies, timeout) && !hostdata->connected) ; if (time_after_eq(jiffies, timeout)) { printk("scsi : arbitration timeout at %d\n", __LINE__); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } } #else /* NCR_TIMEOUT */ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && !hostdata->connected) ; #endif ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); return -1; } /* * The arbitration delay is 2.2us, but this is a minimum and there is * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate * the integral nature of udelay(). * */ udelay(3); /* Check for lost arbitration */ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", HOSTNO); return -1; } /* after/during arbitration, BSY should be asserted. IBM DPES-31080 Version S31Q works now */ /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", HOSTNO); return -1; } /* * Again, bus clear + bus settle time is 1.2us, however, this is * a minimum so we'll udelay ceil(1.2) */ #ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY /* ++roman: But some targets (see above :-) seem to need a bit more... */ udelay(15); #else udelay(2); #endif if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); /* * Now that we have won arbitration, start Selection process, asserting * the host and target ID's on the SCSI bus. */ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); /* * Raise ATN while SEL is true before BSY goes false from arbitration, * since this is the only way to guarantee that we'll get a MESSAGE OUT * phase immediately after selection. */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); NCR5380_write(MODE_REG, MR_BASE); /* * Reselect interrupts must be turned off prior to the dropping of BSY, * otherwise we will trigger an interrupt. */ if (hostdata->connected) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } NCR5380_write(SELECT_ENABLE_REG, 0); /* * The initiator shall then wait at least two deskew delays and release * the BSY signal. */ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ /* Reset BSY */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL)); /* * Something weird happens when we cease to drive BSY - looks * like the board/chip is letting us do another read before the * appropriate propagation delay has expired, and we're confusing * a BSY signal from ourselves as the target's response to SELECTION. * * A small delay (the 'C++' frontend breaks the pipeline with an * unnecessary jump, making it work on my 386-33/Trantor T128, the * tighter 'C' code breaks and requires this) solves the problem - * the 1 us delay is arbitrary, and only used because this delay will * be the same on other platforms and since it works here, it should * work there. * * wingel suggests that this could be due to failing to wait * one deskew delay. */ udelay(1); SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); /* * The SCSI specification calls for a 250 ms timeout for the actual * selection. */ timeout = jiffies + 25; /* * XXX very interesting - we're seeing a bounce where the BSY we * asserted is being reflected / still asserted (propagation delay?) * and it's detecting as true. Sigh. */ #if 0 /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert * IO while SEL is true. But again, there are some disks out the in the * world that do that nevertheless. (Somebody claimed that this announces * reselection capability of the target.) So we better skip that test and * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) */ while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO))) ; if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_reselect(instance); printk(KERN_ERR "scsi%d: reselection after won arbitration?\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } #else while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)) ; #endif /* * No less than two deskew delays after the initiator detects the * BSY signal is true, it shall release the SEL signal and may * change the DATA BUS. -wingel */ udelay(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); if (hostdata->targets_present & (1 << cmd->device->id)) { printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); if (hostdata->restart_select) printk(KERN_NOTICE "\trestart select\n"); NCR_PRINT(NDEBUG_ANY); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } cmd->result = DID_BAD_TARGET << 16; #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } hostdata->targets_present |= (1 << cmd->device->id); /* * Since we followed the SCSI spec, and raised ATN while SEL * was true but before BSY was false during selection, the information * transfer phase should be a MESSAGE OUT phase so that we can send the * IDENTIFY message. * * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG * message (2 bytes) with a tag ID that we increment with every command * until it wraps back to 0. * * XXX - it turns out that there are some broken SCSI-II devices, * which claim to support tagged queuing but fail when more than * some number of commands are issued at once. */ /* Wait for start of REQ/ACK handshake */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)) ; SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", HOSTNO, cmd->device->id); tmp[0] = IDENTIFY(1, cmd->device->lun); #ifdef SUPPORT_TAGS if (cmd->tag != TAG_NONE) { tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; tmp[2] = cmd->tag; len = 3; } else len = 1; #else len = 1; cmd->tag = 0; #endif /* SUPPORT_TAGS */ /* Send message(s) */ data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); /* XXX need to handle errors here */ hostdata->connected = cmd; #ifndef SUPPORT_TAGS hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); #endif initialize_SCp(cmd); return 0; } /* * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using polled I/O * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes are transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * * XXX Note : handling for bus free may be useful. */ /* * Note : this code is not as quick as it could be, however it * IS 100% reliable, and for the actual data transfer where speed * counts, we will always do a pseudo DMA or DMA transfer. */ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { register unsigned char p = *phase, tmp; register int c = *count; register unsigned char *d = *data; /* * The NCR5380 chip will only drive the SCSI bus when the * phase specified in the appropriate bits of the TARGET COMMAND * REGISTER match the STATUS REGISTER */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); do { /* * Wait for assertion of REQ, after which the phase bits will be * valid */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) ; HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); NCR_PRINT_PHASE(NDEBUG_PIO); break; } /* Do actual transfer from SCSI bus to / from memory */ if (!(p & SR_IO)) NCR5380_write(OUTPUT_DATA_REG, *d); else *d = NCR5380_read(CURRENT_SCSI_DATA_REG); ++d; /* * The SCSI standard suggests that in MSGOUT phase, the initiator * should drop ATN on the last byte of the message phase * after REQ has been asserted for the handshake but before * the initiator raises ACK. */ if (!(p & SR_IO)) { if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } while (NCR5380_read(STATUS_REG) & SR_REQ) ; HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); /* * We have several special cases to consider during REQ/ACK handshaking : * 1. We were in MSGOUT phase, and we are on the last byte of the * message. ATN must be dropped as ACK is dropped. * * 2. We are in a MSGIN phase, and we are on the last byte of the * message. We must exit with ACK asserted, so that the calling * code may raise ATN before dropping ACK to reject the message. * * 3. ACK and ATN are clear and the target may proceed as normal. */ if (!(p == PHASE_MSGIN && c == 1)) { if (p == PHASE_MSGOUT && c > 1) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); else NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); } } while (--c); PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); *count = c; *data = d; tmp = NCR5380_read(STATUS_REG); /* The phase read from the bus is valid if either REQ is (already) * asserted or if ACK hasn't been released yet. The latter is the case if * we're in MSGIN and all wanted bytes have been received. */ if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; if (!c || (*phase == p)) return 0; else return -1; } /* * Function : do_abort (Scsi_Host *host) * * Purpose : abort the currently established nexus. Should only be * called from a routine which can drop into a * * Returns : 0 on success, -1 on failure. */ static int do_abort(struct Scsi_Host *host) { unsigned char tmp, *msgptr, phase; int len; /* Request message out phase */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); /* * Wait for the target to indicate a valid phase by asserting * REQ. Once this happens, we'll have either a MSGOUT phase * and can immediately send the ABORT message, or we'll have some * other phase and will have to source/sink data. * * We really don't care what value was on the bus or what value * the target sees, so we just handshake. */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) ; NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); } tmp = ABORT; msgptr = &tmp; len = 1; phase = PHASE_MSGOUT; NCR5380_transfer_pio(host, &phase, &len, &msgptr); /* * If we got here, and the command completed successfully, * we're about to go into bus free state. */ return len ? -1 : 0; } #if defined(REAL_DMA) /* * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using either real * or pseudo DMA. * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes or transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * */ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { SETUP_HOSTDATA(instance); register int c = *count; register unsigned char p = *phase; register unsigned char *d = *data; unsigned char tmp; unsigned long flags; if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { *phase = tmp; return -1; } if (atari_read_overruns && (p & SR_IO)) c -= atari_read_overruns; DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", d); NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); #ifdef REAL_DMA NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); #endif /* def REAL_DMA */ if (IS_A_TT()) { /* On the Medusa, it is a must to initialize the DMA before * starting the NCR. This is also the cleaner way for the TT. */ local_irq_save(flags); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); local_irq_restore(flags); } if (p & SR_IO) NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR5380_write(START_DMA_SEND_REG, 0); } if (!IS_A_TT()) { /* On the Falcon, the DMA setup must be done after the last */ /* NCR access, else the DMA setup gets trashed! */ local_irq_save(flags); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); local_irq_restore(flags); } return 0; } #endif /* defined(REAL_DMA) */ /* * Function : NCR5380_information_transfer (struct Scsi_Host *instance) * * Purpose : run through the various SCSI phases and do as the target * directs us to. Operates on the currently connected command, * instance->connected. * * Inputs : instance, instance for which we are doing commands * * Side effects : SCSI things happen, the disconnected queue will be * modified if a command disconnects, *instance->connected will * change. * * XXX Note : we need to watch for bus free or a reset condition here * to recover from an unexpected bus free condition. */ static void NCR5380_information_transfer(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned long flags; unsigned char msgout = NOP; int sink = 0; int len; #if defined(REAL_DMA) int transfersize; #endif unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected; while (1) { tmp = NCR5380_read(STATUS_REG); /* We only have a valid SCSI phase when REQ is asserted */ if (tmp & SR_REQ) { phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; NCR_PRINT_PHASE(NDEBUG_INFORMATION); } if (sink && (phase != PHASE_MSGOUT)) { NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 0; continue; } switch (phase) { case PHASE_DATAOUT: #if (NDEBUG & NDEBUG_NO_DATAOUT) printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " "aborted\n", HOSTNO); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); return; #endif case PHASE_DATAIN: /* * If there is no room left in the current buffer in the * scatter-gather list, move onto the next one. */ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); /* ++roman: Try to merge some scatter-buffers if * they are at contiguous physical addresses. */ merge_contiguous_buffers(cmd); INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } /* * The preferred transfer method is going to be * PSEUDO-DMA for systems that are strictly PIO, * since we can let the hardware do the handshaking. * * For this to work, we need to know the transfersize * ahead of time, since the pseudo-DMA code will sit * in an unconditional loop. */ /* ++roman: I suggest, this should be * #if def(REAL_DMA) * instead of leaving REAL_DMA out. */ #if defined(REAL_DMA) if (!cmd->device->borken && (transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > 31) { len = transfersize; cmd->SCp.phase = phase; if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **)&cmd->SCp.ptr)) { /* * If the watchdog timer fires, all future * accesses to this device will use the * polled-IO. */ printk(KERN_NOTICE "scsi%d: switching target %d " "lun %d to slow handshake\n", HOSTNO, cmd->device->id, cmd->device->lun); cmd->device->borken = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); /* XXX - need to source or sink data here, as appropriate */ } else { #ifdef REAL_DMA /* ++roman: When using real DMA, * information_transfer() should return after * starting DMA since it has nothing more to * do. */ return; #else cmd->SCp.this_residual -= transfersize - len; #endif } } else #endif /* defined(REAL_DMA) */ NCR5380_transfer_pio(instance, &phase, (int *)&cmd->SCp.this_residual, (unsigned char **)&cmd->SCp.ptr); break; case PHASE_MSGIN: len = 1; data = &tmp; NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Message = tmp; switch (tmp) { /* * Linking lets us reduce the time required to get the * next command out to the device, hopefully this will * mean we don't waste another revolution due to the delays * required by ARBITRATION and another SELECTION. * * In the current implementation proposal, low level drivers * merely have to start the next command, pointed to by * next_link, done() is called as with unlinked commands. */ #ifdef LINKED case LINKED_CMD_COMPLETE: case LINKED_FLG_CMD_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); LNK_PRINTK("scsi%d: target %d lun %d linked command " "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Sanity check : A linked command should only terminate * with one of these messages if there are more linked * commands available. */ if (!cmd->next_link) { printk(KERN_NOTICE "scsi%d: target %d lun %d " "linked command complete, no next_link\n", HOSTNO, cmd->device->id, cmd->device->lun); sink = 1; do_abort(instance); return; } initialize_SCp(cmd->next_link); /* The next command is still part of this process; copy it * and don't free it! */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); LNK_PRINTK("scsi%d: target %d lun %d linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); cmd = hostdata->connected; break; #endif /* def LINKED */ case ABORT: case COMMAND_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* ++guenther: possible race with Falcon locking */ falcon_dont_release++; hostdata->connected = NULL; QU_PRINTK("scsi%d: command for target %d, lun %d " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef SUPPORT_TAGS cmd_free_tag(cmd); if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { /* Turn a QUEUE FULL status into BUSY, I think the * mid level cannot handle QUEUE FULL :-( (The * command is retried after BUSY). Also update our * queue size to the number of currently issued * commands now. */ /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; TAG_PRINTK("scsi%d: target %d lun %d returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, ta->nr_allocated); if (ta->queue_size > ta->nr_allocated) ta->nr_allocated = ta->queue_size; } #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * I'm not sure what the correct thing to do here is : * * If the command that just executed is NOT a request * sense, the obvious thing to do is to set the result * code to the values of the stored parameters. * * If it was a REQUEST SENSE command, we need some way to * differentiate between the failure code of the original * and the failure code of the REQUEST sense - the obvious * case is success, where we fall through and leave the * result code unchanged. * * The non-obvious place is where the REQUEST SENSE failed */ if (cmd->cmnd[0] != REQUEST_SENSE) cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); else if (status_byte(cmd->SCp.Status) != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); #ifdef AUTOSENSE if ((cmd->cmnd[0] == REQUEST_SENSE) && hostdata->ses.cmd_len) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); hostdata->ses.cmd_len = 0 ; } if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO); local_irq_save(flags); LIST(cmd,hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = (Scsi_Cmnd *) cmd; local_irq_restore(flags); QU_PRINTK("scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); } else #endif /* def AUTOSENSE */ { #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); } NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); falcon_dont_release--; /* ++roman: For Falcon SCSI, release the lock on the * ST-DMA here if no other commands are waiting on the * disconnected queue. */ falcon_release_lock_if_possible(hostdata); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); switch (hostdata->last_message) { case HEAD_OF_QUEUE_TAG: case ORDERED_QUEUE_TAG: case SIMPLE_QUEUE_TAG: /* The target obviously doesn't support tagged * queuing, even though it announced this ability in * its INQUIRY data ?!? (maybe only this LUN?) Ok, * clear 'tagged_supported' and lock the LUN, since * the command is treated as untagged further on. */ cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; TAG_PRINTK("scsi%d: target %d lun %d rejected " "QUEUE_TAG message; tagged queuing " "disabled\n", HOSTNO, cmd->device->id, cmd->device->lun); break; } break; case DISCONNECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); local_irq_save(flags); cmd->device->disconnect = 1; LIST(cmd,hostdata->disconnected_queue); SET_NEXT(cmd, hostdata->disconnected_queue); hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); QU_PRINTK("scsi%d: command for target %d lun %d was " "moved from connected to the " "disconnected_queue\n", HOSTNO, cmd->device->id, cmd->device->lun); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* Wait for bus free to avoid nasty timeouts */ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); return; /* * The SCSI data pointer is *IMPLICITLY* saved on a disconnect * operation, in violation of the SCSI spec so we can safely * ignore SAVE/RESTORE pointers calls. * * Unfortunately, some disks violate the SCSI spec and * don't issue the required SAVE_POINTERS message before * disconnecting, and we have to break spec to remain * compatible. */ case SAVE_POINTERS: case RESTORE_POINTERS: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); break; case EXTENDED_MESSAGE: /* * Extended messages are sent in the following format : * Byte * 0 EXTENDED_MESSAGE == 1 * 1 length (includes one byte for code, doesn't * include first two bytes) * 2 code * 3..length+1 arguments * * Start the extended message buffer with the EXTENDED_MESSAGE * byte, since spi_print_msg() wants the whole thing. */ extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) { /* Accept third byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = extended_msg[1] - 1; data = extended_msg + 3; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); EXT_PRINTK("scsi%d: message received, residual %d\n", HOSTNO, len); switch (extended_msg[2]) { case EXTENDED_SDTR: case EXTENDED_WDTR: case EXTENDED_MODIFY_DATA_POINTER: case EXTENDED_EXTENDED_IDENTIFY: tmp = 0; } } else if (len) { printk(KERN_NOTICE "scsi%d: error receiving " "extended message\n", HOSTNO); tmp = 0; } else { printk(KERN_NOTICE "scsi%d: extended message " "code %02x length %d is too long\n", HOSTNO, extended_msg[2], extended_msg[1]); tmp = 0; } /* Fall through to reject message */ /* * If we get something weird that we aren't expecting, * reject it. */ default: if (!tmp) { printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); spi_print_msg(extended_msg); printk("\n"); } else if (tmp != EXTENDED_MESSAGE) printk(KERN_DEBUG "scsi%d: rejecting unknown " "message %02x from target %d, lun %d\n", HOSTNO, tmp, cmd->device->id, cmd->device->lun); else printk(KERN_DEBUG "scsi%d: rejecting unknown " "extended message " "code %02x, length %d from target %d, lun %d\n", HOSTNO, extended_msg[1], extended_msg[0], cmd->device->id, cmd->device->lun); msgout = MESSAGE_REJECT; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); break; } /* switch (tmp) */ break; case PHASE_MSGOUT: len = 1; data = &msgout; hostdata->last_message = msgout; NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif hostdata->connected = NULL; cmd->result = DID_ERROR << 16; #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); falcon_release_lock_if_possible(hostdata); return; } msgout = NOP; break; case PHASE_CMDOUT: len = cmd->cmd_len; data = cmd->cmnd; /* * XXX for performance reasons, on machines with a * PSEUDO-DMA architecture we should probably * use the dma transfer function. */ NCR5380_transfer_pio(instance, &phase, &len, &data); break; case PHASE_STATIN: len = 1; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Status = tmp; break; default: printk("scsi%d: unknown phase\n", HOSTNO); NCR_PRINT(NDEBUG_ANY); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ } /* while (1) */ } /* * Function : void NCR5380_reselect (struct Scsi_Host *instance) * * Purpose : does reselection, initializing the instance->connected * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q * nexus has been reestablished, * * Inputs : instance - this instance of the NCR5380. * */ static void NCR5380_reselect(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned char target_mask; unsigned char lun, phase; int len; #ifdef SUPPORT_TAGS unsigned char tag; #endif unsigned char msg[3]; unsigned char *data; Scsi_Cmnd *tmp = NULL, *prev; /* unsigned long flags; */ /* * Disable arbitration, etc. since the host adapter obviously * lost, and tell an interrupted NCR5380_select() to restart. */ NCR5380_write(MODE_REG, MR_BASE); hostdata->restart_select = 1; target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); RSL_PRINTK("scsi%d: reselect\n", HOSTNO); /* * At this point, we have detected that our SCSI ID is on the bus, * SEL is true and BSY was false for at least one bus settle delay * (400 ns). * * We must assert BSY ourselves, until the target drops the SEL * signal. */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); while (NCR5380_read(STATUS_REG) & SR_SEL) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* * Wait for target to go into MSGIN. */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)) ; len = 1; data = msg; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); if (!(msg[0] & 0x80)) { printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); spi_print_msg(msg); do_abort(instance); return; } lun = (msg[0] & 0x07); #ifdef SUPPORT_TAGS /* If the phase is still MSGIN, the target wants to send some more * messages. In case it supports tagged queuing, this is probably a * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. */ tag = TAG_NONE; if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { /* Accept previous IDENTIFY message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = 2; data = msg + 1; if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " "reselection\n", HOSTNO, target_mask, lun, tag); } #endif /* * Find the command corresponding to the I_T_L or I_T_L_Q nexus we * just reestablished, and remove it from the disconnected queue. */ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) #ifdef SUPPORT_TAGS && (tag == tmp->tag) #endif ) { /* ++guenther: prevent race with falcon_release_lock */ falcon_dont_release++; if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); hostdata->disconnected_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); break; } } if (!tmp) { printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " #ifdef SUPPORT_TAGS "tag %d " #endif "not in disconnected_queue.\n", HOSTNO, target_mask, lun #ifdef SUPPORT_TAGS , tag #endif ); /* * Since we have an established nexus that we can't do anything * with, we must abort it. */ do_abort(instance); return; } /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = tmp; RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); falcon_dont_release--; } /* * Function : int NCR5380_abort (Scsi_Cmnd *cmd) * * Purpose : abort a command * * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the * host byte of the result field to, if zero DID_ABORTED is * used. * * Returns : 0 - success, -1 on failure. * * XXX - there is no way to abort the command that is currently * connected, you have to wait for it to complete. If this is * a problem, we could implement longjmp() / setjmp(), setjmp() * called where the loop started in NCR5380_main(). */ static int NCR5380_abort(Scsi_Cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; SETUP_HOSTDATA(instance); Scsi_Cmnd *tmp, **prev; unsigned long flags; printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO); scsi_print_command(cmd); NCR5380_print_status(instance); local_irq_save(flags); if (!IS_A_TT() && !falcon_got_lock) printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n", HOSTNO); ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); #if 1 /* * Case 1 : If the command is the currently executing command, * we'll set the aborted flag and return control so that * information transfer routine can exit cleanly. */ if (hostdata->connected == cmd) { ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); /* * We should perform BSY checking, and make sure we haven't slipped * into BUS FREE. */ /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ /* * Since we can't change phases until we've completed the current * handshake, we have to source or sink a byte of data if the current * phase is not MSGOUT. */ /* * Return control to the executing NCR drive so we can clear the * aborted flag and get back into our main loop. */ if (do_abort(instance) == 0) { hostdata->aborted = 1; hostdata->connected = NULL; cmd->result = DID_ABORT << 16; #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif local_irq_restore(flags); cmd->scsi_done(cmd); falcon_release_lock_if_possible(hostdata); return SCSI_ABORT_SUCCESS; } else { /* local_irq_restore(flags); */ printk("scsi%d: abort of connected command failed!\n", HOSTNO); return SCSI_ABORT_ERROR; } } #endif /* * Case 2 : If the command hasn't been issued yet, we simply remove it * from the issue queue. */ for (prev = (Scsi_Cmnd **)&(hostdata->issue_queue), tmp = (Scsi_Cmnd *)hostdata->issue_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); (*prev) = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; local_irq_restore(flags); ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); falcon_release_lock_if_possible(hostdata); return SCSI_ABORT_SUCCESS; } } /* * Case 3 : If any commands are connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected) { local_irq_restore(flags); ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); return SCSI_ABORT_SNOOZE; } /* * Case 4: If the command is currently disconnected from the bus, and * there are no connected commands, we reconnect the I_T_L or * I_T_L_Q nexus associated with it, go into message out, and send * an abort message. * * This case is especially ugly. In order to reestablish the nexus, we * need to call NCR5380_select(). The easiest way to implement this * function was to abort if the bus was busy, and let the interrupt * handler triggered on the SEL for reselect take care of lost arbitrations * where necessary, meaning interrupts need to be enabled. * * When interrupts are enabled, the queues may change - so we * can't remove it from the disconnected queue before selecting it * because that could cause a failure in hashing the nexus if that * device reselected. * * Since the queues may change, we can't use the pointers from when we * first locate it. * * So, we must first locate the command, and if NCR5380_select() * succeeds, then issue the abort, relocate the command and remove * it from the disconnected queue. */ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = NEXT(tmp)) { if (cmd == tmp) { local_irq_restore(flags); ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); if (NCR5380_select(instance, cmd, (int)cmd->tag)) return SCSI_ABORT_BUSY; ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); do_abort(instance); local_irq_save(flags); for (prev = (Scsi_Cmnd **)&(hostdata->disconnected_queue), tmp = (Scsi_Cmnd *)hostdata->disconnected_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); *prev = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; /* We must unlock the tag/LUN immediately here, since the * target goes to BUS FREE and doesn't send us another * message (COMMAND_COMPLETE or the like) */ #ifdef SUPPORT_TAGS cmd_free_tag(tmp); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif local_irq_restore(flags); tmp->scsi_done(tmp); falcon_release_lock_if_possible(hostdata); return SCSI_ABORT_SUCCESS; } } } } /* * Case 5 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ local_irq_restore(flags); printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); /* Maybe it is sufficient just to release the ST-DMA lock... (if * possible at all) At least, we should check if the lock could be * released after the abort, in case it is kept due to some bug. */ falcon_release_lock_if_possible(hostdata); return SCSI_ABORT_NOT_RUNNING; } /* * Function : int NCR5380_reset (Scsi_Cmnd *cmd) * * Purpose : reset the SCSI bus. * * Returns : SCSI_RESET_WAKEUP * */ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) { SETUP_HOSTDATA(cmd->device->host); int i; unsigned long flags; #if 1 Scsi_Cmnd *connected, *disconnected_queue; #endif if (!IS_A_TT() && !falcon_got_lock) printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_reset\n", H_NO(cmd)); NCR5380_print_status(cmd->device->host); /* get in phase */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG))); /* assert RST */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); udelay(40); /* reset NCR registers */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); /* ++roman: reset interrupt condition! otherwise no interrupts don't get * through anymore ... */ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); #if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ /* XXX see below XXX */ /* MSch: old-style reset: actually abort all command processing here */ /* After the reset, there are no more connected or disconnected commands * and no busy units; to avoid problems with re-inserting the commands * into the issue_queue (via scsi_done()), the aborted commands are * remembered in local variables first. */ local_irq_save(flags); connected = (Scsi_Cmnd *)hostdata->connected; hostdata->connected = NULL; disconnected_queue = (Scsi_Cmnd *)hostdata->disconnected_queue; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(); #endif for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif local_irq_restore(flags); /* In order to tell the mid-level code which commands were aborted, * set the command status to DID_RESET and call scsi_done() !!! * This ultimately aborts processing of these commands in the mid-level. */ if ((cmd = connected)) { ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done(cmd); } for (i = 0; (cmd = disconnected_queue); ++i) { disconnected_queue = NEXT(cmd); SET_NEXT(cmd, NULL); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done(cmd); } if (i > 0) ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); /* The Falcon lock should be released after a reset... */ /* ++guenther: moved to atari_scsi_reset(), to prevent a race between * unlocking and enabling dma interrupt. */ /* falcon_release_lock_if_possible( hostdata );*/ /* since all commands have been explicitly terminated, we need to tell * the midlevel code that the reset was SUCCESSFUL, and there is no * need to 'wake up' the commands by a request_sense */ return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; #else /* 1 */ /* MSch: new-style reset handling: let the mid-level do what it can */ /* ++guenther: MID-LEVEL IS STILL BROKEN. * Mid-level is supposed to requeue all commands that were active on the * various low-level queues. In fact it does this, but that's not enough * because all these commands are subject to timeout. And if a timeout * happens for any removed command, *_abort() is called but all queues * are now empty. Abort then gives up the falcon lock, which is fatal, * since the mid-level will queue more commands and must have the lock * (it's all happening inside timer interrupt handler!!). * Even worse, abort will return NOT_RUNNING for all those commands not * on any queue, so they won't be retried ... * * Conclusion: either scsi.c disables timeout for all resetted commands * immediately, or we lose! As of linux-2.0.20 it doesn't. */ /* After the reset, there are no more connected or disconnected commands * and no busy units; so clear the low-level status here to avoid * conflicts when the mid-level code tries to wake up the affected * commands! */ if (hostdata->issue_queue) ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); if (hostdata->connected) ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); if (hostdata->disconnected_queue) ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); local_irq_save(flags); hostdata->issue_queue = NULL; hostdata->connected = NULL; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(); #endif for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif local_irq_restore(flags); /* we did no complete reset of all commands, so a wakeup is required */ return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; #endif /* 1 */ }
gpl-2.0
EmericanX/android_kernel_motorola_msm8960-common
arch/arm/mach-davinci/psc.c
2532
2932
/* * TI DaVinci Power and Sleep Controller (PSC) * * Copyright (C) 2006 Texas Instruments. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <mach/cputype.h> #include <mach/psc.h> /* Return nonzero iff the domain's clock is active */ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id) { void __iomem *psc_base; u32 mdstat; struct davinci_soc_info *soc_info = &davinci_soc_info; if (!soc_info->psc_bases || (ctlr >= soc_info->psc_bases_num)) { pr_warning("PSC: Bad psc data: 0x%x[%d]\n", (int)soc_info->psc_bases, ctlr); return 0; } psc_base = ioremap(soc_info->psc_bases[ctlr], SZ_4K); mdstat = __raw_readl(psc_base + MDSTAT + 4 * id); iounmap(psc_base); /* if clocked, state can be "Enable" or "SyncReset" */ return mdstat & BIT(12); } /* Enable or disable a PSC domain */ void davinci_psc_config(unsigned int domain, unsigned int ctlr, unsigned int id, u32 next_state) { u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl; void __iomem *psc_base; struct davinci_soc_info *soc_info = &davinci_soc_info; if (!soc_info->psc_bases || (ctlr >= soc_info->psc_bases_num)) { pr_warning("PSC: Bad psc data: 0x%x[%d]\n", (int)soc_info->psc_bases, ctlr); return; } psc_base = ioremap(soc_info->psc_bases[ctlr], SZ_4K); mdctl = __raw_readl(psc_base + MDCTL + 4 * id); mdctl &= ~MDSTAT_STATE_MASK; mdctl |= next_state; __raw_writel(mdctl, psc_base + MDCTL + 4 * id); pdstat = __raw_readl(psc_base + PDSTAT); if ((pdstat & 0x00000001) == 0) { pdctl1 = __raw_readl(psc_base + PDCTL1); pdctl1 |= 0x1; __raw_writel(pdctl1, psc_base + PDCTL1); ptcmd = 1 << domain; __raw_writel(ptcmd, psc_base + PTCMD); do { epcpr = __raw_readl(psc_base + EPCPR); } while ((((epcpr >> domain) & 1) == 0)); pdctl1 = __raw_readl(psc_base + PDCTL1); pdctl1 |= 0x100; __raw_writel(pdctl1, psc_base + PDCTL1); } else { ptcmd = 1 << domain; __raw_writel(ptcmd, psc_base + PTCMD); } do { ptstat = __raw_readl(psc_base + PTSTAT); } while (!(((ptstat >> domain) & 1) == 0)); do { mdstat = __raw_readl(psc_base + MDSTAT + 4 * id); } while (!((mdstat & MDSTAT_STATE_MASK) == next_state)); iounmap(psc_base); }
gpl-2.0
speef/linux
drivers/media/dvb-frontends/it913x-fe.c
2788
24826
/* * Driver for it913x-fe Frontend * * with support for on chip it9137 integral tuner * * Copyright (C) 2011 Malcolm Priestley (tvboxspy@gmail.com) * IT9137 Copyright (C) ITE Tech Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include "dvb_frontend.h" #include "it913x-fe.h" #include "it913x-fe-priv.h" static int it913x_debug; module_param_named(debug, it913x_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))."); #define dprintk(level, args...) do { \ if (level & it913x_debug) \ printk(KERN_DEBUG "it913x-fe: " args); \ } while (0) #define deb_info(args...) dprintk(0x01, args) #define debug_data_snipet(level, name, p) \ dprintk(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \ *p, *(p+1), *(p+2), *(p+3), *(p+4), \ *(p+5), *(p+6), *(p+7)); #define info(format, arg...) \ printk(KERN_INFO "it913x-fe: " format "\n" , ## arg) struct it913x_fe_state { struct dvb_frontend frontend; struct i2c_adapter *i2c_adap; struct ite_config *config; u8 i2c_addr; u32 frequency; fe_modulation_t constellation; fe_transmit_mode_t transmission_mode; u8 priority; u32 crystalFrequency; u32 adcFrequency; u8 tuner_type; struct adctable *table; fe_status_t it913x_status; u16 tun_xtal; u8 tun_fdiv; u8 tun_clk_mode; u32 tun_fn_min; u32 ucblocks; }; static int it913x_read_reg(struct it913x_fe_state *state, u32 reg, u8 *data, u8 count) { int ret; u8 pro = PRO_DMOD; /* All reads from demodulator */ u8 b[4]; struct i2c_msg msg[2] = { { .addr = state->i2c_addr + (pro << 1), .flags = 0, .buf = b, .len = sizeof(b) }, { .addr = state->i2c_addr + (pro << 1), .flags = I2C_M_RD, .buf = data, .len = count } }; b[0] = (u8) reg >> 24; b[1] = (u8)(reg >> 16) & 0xff; b[2] = (u8)(reg >> 8) & 0xff; b[3] = (u8) reg & 0xff; ret = i2c_transfer(state->i2c_adap, msg, 2); return ret; } static int it913x_read_reg_u8(struct it913x_fe_state *state, u32 reg) { int ret; u8 b[1]; ret = it913x_read_reg(state, reg, &b[0], sizeof(b)); return (ret < 0) ? -ENODEV : b[0]; } static int it913x_write(struct it913x_fe_state *state, u8 pro, u32 reg, u8 buf[], u8 count) { u8 b[256]; struct i2c_msg msg[1] = { { .addr = state->i2c_addr + (pro << 1), .flags = 0, .buf = b, .len = count + 4 } }; int ret; b[0] = (u8) reg >> 24; b[1] = (u8)(reg >> 16) & 0xff; b[2] = (u8)(reg >> 8) & 0xff; b[3] = (u8) reg & 0xff; memcpy(&b[4], buf, count); ret = i2c_transfer(state->i2c_adap, msg, 1); if (ret < 0) return -EIO; return 0; } static int it913x_write_reg(struct it913x_fe_state *state, u8 pro, u32 reg, u32 data) { int ret; u8 b[4]; u8 s; b[0] = data >> 24; b[1] = (data >> 16) & 0xff; b[2] = (data >> 8) & 0xff; b[3] = data & 0xff; /* expand write as needed */ if (data < 0x100) s = 3; else if (data < 0x1000) s = 2; else if (data < 0x100000) s = 1; else s = 0; ret = it913x_write(state, pro, reg, &b[s], sizeof(b) - s); return ret; } static int it913x_fe_script_loader(struct it913x_fe_state *state, struct it913xset *loadscript) { int ret, i; if (loadscript == NULL) return -EINVAL; for (i = 0; i < 1000; ++i) { if (loadscript[i].pro == 0xff) break; ret = it913x_write(state, loadscript[i].pro, loadscript[i].address, loadscript[i].reg, loadscript[i].count); if (ret < 0) return -ENODEV; } return 0; } static int it913x_init_tuner(struct it913x_fe_state *state) { int ret, i, reg; u8 val, nv_val; u8 nv[] = {48, 32, 24, 16, 12, 8, 6, 4, 2}; u8 b[2]; reg = it913x_read_reg_u8(state, 0xec86); switch (reg) { case 0: state->tun_clk_mode = reg; state->tun_xtal = 2000; state->tun_fdiv = 3; val = 16; break; case -ENODEV: return -ENODEV; case 1: default: state->tun_clk_mode = reg; state->tun_xtal = 640; state->tun_fdiv = 1; val = 6; break; } reg = it913x_read_reg_u8(state, 0xed03); if (reg < 0) return -ENODEV; else if (reg < ARRAY_SIZE(nv)) nv_val = nv[reg]; else nv_val = 2; for (i = 0; i < 50; i++) { ret = it913x_read_reg(state, 0xed23, &b[0], sizeof(b)); reg = (b[1] << 8) + b[0]; if (reg > 0) break; if (ret < 0) return -ENODEV; udelay(2000); } state->tun_fn_min = state->tun_xtal * reg; state->tun_fn_min /= (state->tun_fdiv * nv_val); deb_info("Tuner fn_min %d", state->tun_fn_min); if (state->config->chip_ver > 1) msleep(50); else { for (i = 0; i < 50; i++) { reg = it913x_read_reg_u8(state, 0xec82); if (reg > 0) break; if (reg < 0) return -ENODEV; udelay(2000); } } return it913x_write_reg(state, PRO_DMOD, 0xed81, val); } static int it9137_set_tuner(struct it913x_fe_state *state, u32 bandwidth, u32 frequency_m) { struct it913xset *set_tuner = set_it9137_template; int ret, reg; u32 frequency = frequency_m / 1000; u32 freq, temp_f, tmp; u16 iqik_m_cal; u16 n_div; u8 n; u8 l_band; u8 lna_band; u8 bw; if (state->config->firmware_ver == 1) set_tuner = set_it9135_template; else set_tuner = set_it9137_template; deb_info("Tuner Frequency %d Bandwidth %d", frequency, bandwidth); if (frequency >= 51000 && frequency <= 440000) { l_band = 0; lna_band = 0; } else if (frequency > 440000 && frequency <= 484000) { l_band = 1; lna_band = 1; } else if (frequency > 484000 && frequency <= 533000) { l_band = 1; lna_band = 2; } else if (frequency > 533000 && frequency <= 587000) { l_band = 1; lna_band = 3; } else if (frequency > 587000 && frequency <= 645000) { l_band = 1; lna_band = 4; } else if (frequency > 645000 && frequency <= 710000) { l_band = 1; lna_band = 5; } else if (frequency > 710000 && frequency <= 782000) { l_band = 1; lna_band = 6; } else if (frequency > 782000 && frequency <= 860000) { l_band = 1; lna_band = 7; } else if (frequency > 1450000 && frequency <= 1492000) { l_band = 1; lna_band = 0; } else if (frequency > 1660000 && frequency <= 1685000) { l_band = 1; lna_band = 1; } else return -EINVAL; set_tuner[0].reg[0] = lna_band; switch (bandwidth) { case 5000000: bw = 0; break; case 6000000: bw = 2; break; case 7000000: bw = 4; break; default: case 8000000: bw = 6; break; } set_tuner[1].reg[0] = bw; set_tuner[2].reg[0] = 0xa0 | (l_band << 3); if (frequency > 53000 && frequency <= 74000) { n_div = 48; n = 0; } else if (frequency > 74000 && frequency <= 111000) { n_div = 32; n = 1; } else if (frequency > 111000 && frequency <= 148000) { n_div = 24; n = 2; } else if (frequency > 148000 && frequency <= 222000) { n_div = 16; n = 3; } else if (frequency > 222000 && frequency <= 296000) { n_div = 12; n = 4; } else if (frequency > 296000 && frequency <= 445000) { n_div = 8; n = 5; } else if (frequency > 445000 && frequency <= state->tun_fn_min) { n_div = 6; n = 6; } else if (frequency > state->tun_fn_min && frequency <= 950000) { n_div = 4; n = 7; } else if (frequency > 1450000 && frequency <= 1680000) { n_div = 2; n = 0; } else return -EINVAL; reg = it913x_read_reg_u8(state, 0xed81); iqik_m_cal = (u16)reg * n_div; if (reg < 0x20) { if (state->tun_clk_mode == 0) iqik_m_cal = (iqik_m_cal * 9) >> 5; else iqik_m_cal >>= 1; } else { iqik_m_cal = 0x40 - iqik_m_cal; if (state->tun_clk_mode == 0) iqik_m_cal = ~((iqik_m_cal * 9) >> 5); else iqik_m_cal = ~(iqik_m_cal >> 1); } temp_f = frequency * (u32)n_div * (u32)state->tun_fdiv; freq = temp_f / state->tun_xtal; tmp = freq * state->tun_xtal; if ((temp_f - tmp) >= (state->tun_xtal >> 1)) freq++; freq += (u32) n << 13; /* Frequency OMEGA_IQIK_M_CAL_MID*/ temp_f = freq + (u32)iqik_m_cal; set_tuner[3].reg[0] = temp_f & 0xff; set_tuner[4].reg[0] = (temp_f >> 8) & 0xff; deb_info("High Frequency = %04x", temp_f); /* Lower frequency */ set_tuner[5].reg[0] = freq & 0xff; set_tuner[6].reg[0] = (freq >> 8) & 0xff; deb_info("low Frequency = %04x", freq); ret = it913x_fe_script_loader(state, set_tuner); return (ret < 0) ? -ENODEV : 0; } static int it913x_fe_select_bw(struct it913x_fe_state *state, u32 bandwidth, u32 adcFrequency) { int ret, i; u8 buffer[256]; u32 coeff[8]; u16 bfsfcw_fftinx_ratio; u16 fftinx_bfsfcw_ratio; u8 count; u8 bw; u8 adcmultiplier; deb_info("Bandwidth %d Adc %d", bandwidth, adcFrequency); switch (bandwidth) { case 5000000: bw = 3; break; case 6000000: bw = 0; break; case 7000000: bw = 1; break; default: case 8000000: bw = 2; break; } ret = it913x_write_reg(state, PRO_DMOD, REG_BW, bw); if (state->table == NULL) return -EINVAL; /* In write order */ coeff[0] = state->table[bw].coeff_1_2048; coeff[1] = state->table[bw].coeff_2_2k; coeff[2] = state->table[bw].coeff_1_8191; coeff[3] = state->table[bw].coeff_1_8192; coeff[4] = state->table[bw].coeff_1_8193; coeff[5] = state->table[bw].coeff_2_8k; coeff[6] = state->table[bw].coeff_1_4096; coeff[7] = state->table[bw].coeff_2_4k; bfsfcw_fftinx_ratio = state->table[bw].bfsfcw_fftinx_ratio; fftinx_bfsfcw_ratio = state->table[bw].fftinx_bfsfcw_ratio; /* ADC multiplier */ ret = it913x_read_reg_u8(state, ADC_X_2); if (ret < 0) return -EINVAL; adcmultiplier = ret; count = 0; /* Build Buffer for COEFF Registers */ for (i = 0; i < 8; i++) { if (adcmultiplier == 1) coeff[i] /= 2; buffer[count++] = (coeff[i] >> 24) & 0x3; buffer[count++] = (coeff[i] >> 16) & 0xff; buffer[count++] = (coeff[i] >> 8) & 0xff; buffer[count++] = coeff[i] & 0xff; } /* bfsfcw_fftinx_ratio register 0x21-0x22 */ buffer[count++] = bfsfcw_fftinx_ratio & 0xff; buffer[count++] = (bfsfcw_fftinx_ratio >> 8) & 0xff; /* fftinx_bfsfcw_ratio register 0x23-0x24 */ buffer[count++] = fftinx_bfsfcw_ratio & 0xff; buffer[count++] = (fftinx_bfsfcw_ratio >> 8) & 0xff; /* start at COEFF_1_2048 and write through to fftinx_bfsfcw_ratio*/ ret = it913x_write(state, PRO_DMOD, COEFF_1_2048, buffer, count); for (i = 0; i < 42; i += 8) debug_data_snipet(0x1, "Buffer", &buffer[i]); return ret; } static int it913x_fe_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct it913x_fe_state *state = fe->demodulator_priv; int ret, i; fe_status_t old_status = state->it913x_status; *status = 0; if (state->it913x_status == 0) { ret = it913x_read_reg_u8(state, EMPTY_CHANNEL_STATUS); if (ret == 0x1) { *status |= FE_HAS_SIGNAL; for (i = 0; i < 40; i++) { ret = it913x_read_reg_u8(state, MP2IF_SYNC_LK); if (ret == 0x1) break; msleep(25); } if (ret == 0x1) *status |= FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC; state->it913x_status = *status; } } if (state->it913x_status & FE_HAS_SYNC) { ret = it913x_read_reg_u8(state, TPSD_LOCK); if (ret == 0x1) *status |= FE_HAS_LOCK | state->it913x_status; else state->it913x_status = 0; if (old_status != state->it913x_status) ret = it913x_write_reg(state, PRO_LINK, GPIOH3_O, ret); } return 0; } /* FEC values based on fe_code_rate_t non supported values 0*/ int it913x_qpsk_pval[] = {0, -93, -91, -90, 0, -89, -88}; int it913x_16qam_pval[] = {0, -87, -85, -84, 0, -83, -82}; int it913x_64qam_pval[] = {0, -82, -80, -78, 0, -77, -76}; static int it913x_get_signal_strength(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct it913x_fe_state *state = fe->demodulator_priv; u8 code_rate; int ret, temp; u8 lna_gain_os; ret = it913x_read_reg_u8(state, VAR_P_INBAND); if (ret < 0) return ret; /* VHF/UHF gain offset */ if (state->frequency < 300000000) lna_gain_os = 7; else lna_gain_os = 14; temp = (ret - 100) - lna_gain_os; if (state->priority == PRIORITY_HIGH) code_rate = p->code_rate_HP; else code_rate = p->code_rate_LP; if (code_rate >= ARRAY_SIZE(it913x_qpsk_pval)) return -EINVAL; deb_info("Reg VAR_P_INBAND:%d Calc Offset Value:%d", ret, temp); /* Apply FEC offset values*/ switch (p->modulation) { case QPSK: temp -= it913x_qpsk_pval[code_rate]; break; case QAM_16: temp -= it913x_16qam_pval[code_rate]; break; case QAM_64: temp -= it913x_64qam_pval[code_rate]; break; default: return -EINVAL; } if (temp < -15) ret = 0; else if ((-15 <= temp) && (temp < 0)) ret = (2 * (temp + 15)) / 3; else if ((0 <= temp) && (temp < 20)) ret = 4 * temp + 10; else if ((20 <= temp) && (temp < 35)) ret = (2 * (temp - 20)) / 3 + 90; else if (temp >= 35) ret = 100; deb_info("Signal Strength :%d", ret); return ret; } static int it913x_fe_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct it913x_fe_state *state = fe->demodulator_priv; int ret = 0; if (state->config->read_slevel) { if (state->it913x_status & FE_HAS_SIGNAL) ret = it913x_read_reg_u8(state, SIGNAL_LEVEL); } else ret = it913x_get_signal_strength(fe); if (ret >= 0) *strength = (u16)((u32)ret * 0xffff / 0x64); return (ret < 0) ? -ENODEV : 0; } static int it913x_fe_read_snr(struct dvb_frontend *fe, u16 *snr) { struct it913x_fe_state *state = fe->demodulator_priv; int ret; u8 reg[3]; u32 snr_val, snr_min, snr_max; u32 temp; ret = it913x_read_reg(state, 0x2c, reg, sizeof(reg)); snr_val = (u32)(reg[2] << 16) | (reg[1] << 8) | reg[0]; ret |= it913x_read_reg(state, 0xf78b, reg, 1); if (reg[0]) snr_val /= reg[0]; if (state->transmission_mode == TRANSMISSION_MODE_2K) snr_val *= 4; else if (state->transmission_mode == TRANSMISSION_MODE_4K) snr_val *= 2; if (state->constellation == QPSK) { snr_min = 0xb4711; snr_max = 0x191451; } else if (state->constellation == QAM_16) { snr_min = 0x4f0d5; snr_max = 0xc7925; } else if (state->constellation == QAM_64) { snr_min = 0x256d0; snr_max = 0x626be; } else return -EINVAL; if (snr_val < snr_min) *snr = 0; else if (snr_val < snr_max) { temp = (snr_val - snr_min) >> 5; temp *= 0xffff; temp /= (snr_max - snr_min) >> 5; *snr = (u16)temp; } else *snr = 0xffff; return (ret < 0) ? -ENODEV : 0; } static int it913x_fe_read_ber(struct dvb_frontend *fe, u32 *ber) { struct it913x_fe_state *state = fe->demodulator_priv; u8 reg[5]; /* Read Aborted Packets and Pre-Viterbi error rate 5 bytes */ it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg)); state->ucblocks += (u32)(reg[1] << 8) | reg[0]; *ber = (u32)(reg[4] << 16) | (reg[3] << 8) | reg[2]; return 0; } static int it913x_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct it913x_fe_state *state = fe->demodulator_priv; int ret; u8 reg[2]; /* Aborted Packets */ ret = it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg)); state->ucblocks += (u32)(reg[1] << 8) | reg[0]; *ucblocks = state->ucblocks; return ret; } static int it913x_fe_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct it913x_fe_state *state = fe->demodulator_priv; u8 reg[8]; it913x_read_reg(state, REG_TPSD_TX_MODE, reg, sizeof(reg)); if (reg[3] < 3) p->modulation = fe_con[reg[3]]; if (reg[0] < 3) p->transmission_mode = fe_mode[reg[0]]; if (reg[1] < 4) p->guard_interval = fe_gi[reg[1]]; if (reg[2] < 4) p->hierarchy = fe_hi[reg[2]]; state->priority = reg[5]; p->code_rate_HP = (reg[6] < 6) ? fe_code[reg[6]] : FEC_NONE; p->code_rate_LP = (reg[7] < 6) ? fe_code[reg[7]] : FEC_NONE; /* Update internal state to reflect the autodetected props */ state->constellation = p->modulation; state->transmission_mode = p->transmission_mode; return 0; } static int it913x_fe_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct it913x_fe_state *state = fe->demodulator_priv; int i; u8 empty_ch, last_ch; state->it913x_status = 0; /* Set bw*/ it913x_fe_select_bw(state, p->bandwidth_hz, state->adcFrequency); /* Training Mode Off */ it913x_write_reg(state, PRO_LINK, TRAINING_MODE, 0x0); /* Clear Empty Channel */ it913x_write_reg(state, PRO_DMOD, EMPTY_CHANNEL_STATUS, 0x0); /* Clear bits */ it913x_write_reg(state, PRO_DMOD, MP2IF_SYNC_LK, 0x0); /* LED on */ it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1); /* Select Band*/ if ((p->frequency >= 51000000) && (p->frequency <= 230000000)) i = 0; else if ((p->frequency >= 350000000) && (p->frequency <= 900000000)) i = 1; else if ((p->frequency >= 1450000000) && (p->frequency <= 1680000000)) i = 2; else return -EOPNOTSUPP; it913x_write_reg(state, PRO_DMOD, FREE_BAND, i); deb_info("Frontend Set Tuner Type %02x", state->tuner_type); switch (state->tuner_type) { case IT9135_38: case IT9135_51: case IT9135_52: case IT9135_60: case IT9135_61: case IT9135_62: it9137_set_tuner(state, p->bandwidth_hz, p->frequency); break; default: if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } break; } /* LED off */ it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0); /* Trigger ofsm */ it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0); last_ch = 2; for (i = 0; i < 40; ++i) { empty_ch = it913x_read_reg_u8(state, EMPTY_CHANNEL_STATUS); if (last_ch == 1 && empty_ch == 1) break; if (last_ch == 2 && empty_ch == 2) return 0; last_ch = empty_ch; msleep(25); } for (i = 0; i < 40; ++i) { if (it913x_read_reg_u8(state, D_TPSD_LOCK) == 1) break; msleep(25); } state->frequency = p->frequency; return 0; } static int it913x_fe_suspend(struct it913x_fe_state *state) { int ret, i; u8 b; ret = it913x_write_reg(state, PRO_DMOD, SUSPEND_FLAG, 0x1); ret |= it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0); for (i = 0; i < 128; i++) { ret = it913x_read_reg(state, SUSPEND_FLAG, &b, 1); if (ret < 0) return -ENODEV; if (b == 0) break; } ret |= it913x_write_reg(state, PRO_DMOD, AFE_MEM0, 0x8); /* Turn LED off */ ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0); ret |= it913x_fe_script_loader(state, it9137_tuner_off); return (ret < 0) ? -ENODEV : 0; } /* Power sequence */ /* Power Up Tuner on -> Frontend suspend off -> Tuner clk on */ /* Power Down Frontend suspend on -> Tuner clk off -> Tuner off */ static int it913x_fe_sleep(struct dvb_frontend *fe) { struct it913x_fe_state *state = fe->demodulator_priv; return it913x_fe_suspend(state); } static u32 compute_div(u32 a, u32 b, u32 x) { u32 res = 0; u32 c = 0; u32 i = 0; if (a > b) { c = a / b; a = a - c * b; } for (i = 0; i < x; i++) { if (a >= b) { res += 1; a -= b; } a <<= 1; res <<= 1; } res = (c << x) + res; return res; } static int it913x_fe_start(struct it913x_fe_state *state) { struct it913xset *set_lna; struct it913xset *set_mode; int ret; u8 adf = (state->config->adf & 0xf); u32 adc, xtal; u8 b[4]; if (state->config->chip_ver == 1) ret = it913x_init_tuner(state); info("ADF table value :%02x", adf); if (adf < 10) { state->crystalFrequency = fe_clockTable[adf].xtal ; state->table = fe_clockTable[adf].table; state->adcFrequency = state->table->adcFrequency; adc = compute_div(state->adcFrequency, 1000000ul, 19ul); xtal = compute_div(state->crystalFrequency, 1000000ul, 19ul); } else return -EINVAL; /* Set LED indicator on GPIOH3 */ ret = it913x_write_reg(state, PRO_LINK, GPIOH3_EN, 0x1); ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_ON, 0x1); ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1); ret |= it913x_write_reg(state, PRO_LINK, 0xf641, state->tuner_type); ret |= it913x_write_reg(state, PRO_DMOD, 0xf5ca, 0x01); ret |= it913x_write_reg(state, PRO_DMOD, 0xf715, 0x01); b[0] = xtal & 0xff; b[1] = (xtal >> 8) & 0xff; b[2] = (xtal >> 16) & 0xff; b[3] = (xtal >> 24); ret |= it913x_write(state, PRO_DMOD, XTAL_CLK, b , 4); b[0] = adc & 0xff; b[1] = (adc >> 8) & 0xff; b[2] = (adc >> 16) & 0xff; ret |= it913x_write(state, PRO_DMOD, ADC_FREQ, b, 3); if (state->config->adc_x2) ret |= it913x_write_reg(state, PRO_DMOD, ADC_X_2, 0x01); b[0] = 0; b[1] = 0; b[2] = 0; ret |= it913x_write(state, PRO_DMOD, 0x0029, b, 3); info("Crystal Frequency :%d Adc Frequency :%d ADC X2: %02x", state->crystalFrequency, state->adcFrequency, state->config->adc_x2); deb_info("Xtal value :%04x Adc value :%04x", xtal, adc); if (ret < 0) return -ENODEV; /* v1 or v2 tuner script */ if (state->config->chip_ver > 1) ret = it913x_fe_script_loader(state, it9135_v2); else ret = it913x_fe_script_loader(state, it9135_v1); if (ret < 0) return ret; /* LNA Scripts */ switch (state->tuner_type) { case IT9135_51: set_lna = it9135_51; break; case IT9135_52: set_lna = it9135_52; break; case IT9135_60: set_lna = it9135_60; break; case IT9135_61: set_lna = it9135_61; break; case IT9135_62: set_lna = it9135_62; break; case IT9135_38: default: set_lna = it9135_38; } info("Tuner LNA type :%02x", state->tuner_type); ret = it913x_fe_script_loader(state, set_lna); if (ret < 0) return ret; if (state->config->chip_ver == 2) { ret = it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x1); ret |= it913x_write_reg(state, PRO_LINK, PADODPU, 0x0); ret |= it913x_write_reg(state, PRO_LINK, AGC_O_D, 0x0); ret |= it913x_init_tuner(state); } if (ret < 0) return -ENODEV; /* Always solo frontend */ set_mode = set_solo_fe; ret |= it913x_fe_script_loader(state, set_mode); ret |= it913x_fe_suspend(state); return (ret < 0) ? -ENODEV : 0; } static int it913x_fe_init(struct dvb_frontend *fe) { struct it913x_fe_state *state = fe->demodulator_priv; int ret = 0; /* Power Up Tuner - common all versions */ ret = it913x_write_reg(state, PRO_DMOD, 0xec40, 0x1); ret |= it913x_fe_script_loader(state, init_1); ret |= it913x_write_reg(state, PRO_DMOD, AFE_MEM0, 0x0); ret |= it913x_write_reg(state, PRO_DMOD, 0xfba8, 0x0); return (ret < 0) ? -ENODEV : 0; } static void it913x_fe_release(struct dvb_frontend *fe) { struct it913x_fe_state *state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops it913x_fe_ofdm_ops; struct dvb_frontend *it913x_fe_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct ite_config *config) { struct it913x_fe_state *state = NULL; int ret; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct it913x_fe_state), GFP_KERNEL); if (state == NULL) return NULL; if (config == NULL) goto error; state->i2c_adap = i2c_adap; state->i2c_addr = i2c_addr; state->config = config; switch (state->config->tuner_id_0) { case IT9135_51: case IT9135_52: case IT9135_60: case IT9135_61: case IT9135_62: state->tuner_type = state->config->tuner_id_0; break; default: case IT9135_38: state->tuner_type = IT9135_38; } ret = it913x_fe_start(state); if (ret < 0) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &it913x_fe_ofdm_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(it913x_fe_attach); static struct dvb_frontend_ops it913x_fe_ofdm_ops = { .delsys = { SYS_DVBT }, .info = { .name = "it913x-fe DVB-T", .frequency_min = 51000000, .frequency_max = 1680000000, .frequency_stepsize = 62500, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO, }, .release = it913x_fe_release, .init = it913x_fe_init, .sleep = it913x_fe_sleep, .set_frontend = it913x_fe_set_frontend, .get_frontend = it913x_fe_get_frontend, .read_status = it913x_fe_read_status, .read_signal_strength = it913x_fe_read_signal_strength, .read_snr = it913x_fe_read_snr, .read_ber = it913x_fe_read_ber, .read_ucblocks = it913x_fe_read_ucblocks, }; MODULE_DESCRIPTION("it913x Frontend and it9137 tuner"); MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com"); MODULE_VERSION("1.15"); MODULE_LICENSE("GPL");
gpl-2.0
mohankr/android_kernel_samsung_msm8x60
tools/perf/util/ui/browsers/annotate.c
2788
7340
#include "../browser.h" #include "../helpline.h" #include "../libslang.h" #include "../../annotate.h" #include "../../hist.h" #include "../../sort.h" #include "../../symbol.h" #include <pthread.h> static void ui__error_window(const char *fmt, ...) { va_list ap; va_start(ap, fmt); newtWinMessagev((char *)"Error", (char *)"Ok", (char *)fmt, ap); va_end(ap); } struct annotate_browser { struct ui_browser b; struct rb_root entries; struct rb_node *curr_hot; }; struct objdump_line_rb_node { struct rb_node rb_node; double percent; u32 idx; }; static inline struct objdump_line_rb_node *objdump_line__rb(struct objdump_line *self) { return (struct objdump_line_rb_node *)(self + 1); } static void annotate_browser__write(struct ui_browser *self, void *entry, int row) { struct objdump_line *ol = rb_entry(entry, struct objdump_line, node); bool current_entry = ui_browser__is_current_entry(self, row); int width = self->width; if (ol->offset != -1) { struct objdump_line_rb_node *olrb = objdump_line__rb(ol); ui_browser__set_percent_color(self, olrb->percent, current_entry); slsmg_printf(" %7.2f ", olrb->percent); } else { ui_browser__set_percent_color(self, 0, current_entry); slsmg_write_nstring(" ", 9); } SLsmg_write_char(':'); slsmg_write_nstring(" ", 8); if (!*ol->line) slsmg_write_nstring(" ", width - 18); else slsmg_write_nstring(ol->line, width - 18); if (!current_entry) ui_browser__set_color(self, HE_COLORSET_CODE); } static double objdump_line__calc_percent(struct objdump_line *self, struct symbol *sym, int evidx) { double percent = 0.0; if (self->offset != -1) { int len = sym->end - sym->start; unsigned int hits = 0; struct annotation *notes = symbol__annotation(sym); struct source_line *src_line = notes->src->lines; struct sym_hist *h = annotation__histogram(notes, evidx); s64 offset = self->offset; struct objdump_line *next; next = objdump__get_next_ip_line(&notes->src->source, self); while (offset < (s64)len && (next == NULL || offset < next->offset)) { if (src_line) { percent += src_line[offset].percent; } else hits += h->addr[offset]; ++offset; } /* * If the percentage wasn't already calculated in * symbol__get_source_line, do it now: */ if (src_line == NULL && h->sum) percent = 100.0 * hits / h->sum; } return percent; } static void objdump__insert_line(struct rb_root *self, struct objdump_line_rb_node *line) { struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; struct objdump_line_rb_node *l; while (*p != NULL) { parent = *p; l = rb_entry(parent, struct objdump_line_rb_node, rb_node); if (line->percent < l->percent) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&line->rb_node, parent, p); rb_insert_color(&line->rb_node, self); } static void annotate_browser__set_top(struct annotate_browser *self, struct rb_node *nd) { struct objdump_line_rb_node *rbpos; struct objdump_line *pos; unsigned back; ui_browser__refresh_dimensions(&self->b); back = self->b.height / 2; rbpos = rb_entry(nd, struct objdump_line_rb_node, rb_node); pos = ((struct objdump_line *)rbpos) - 1; self->b.top_idx = self->b.index = rbpos->idx; while (self->b.top_idx != 0 && back != 0) { pos = list_entry(pos->node.prev, struct objdump_line, node); --self->b.top_idx; --back; } self->b.top = pos; self->curr_hot = nd; } static void annotate_browser__calc_percent(struct annotate_browser *browser, int evidx) { struct symbol *sym = browser->b.priv; struct annotation *notes = symbol__annotation(sym); struct objdump_line *pos; browser->entries = RB_ROOT; pthread_mutex_lock(&notes->lock); list_for_each_entry(pos, &notes->src->source, node) { struct objdump_line_rb_node *rbpos = objdump_line__rb(pos); rbpos->percent = objdump_line__calc_percent(pos, sym, evidx); if (rbpos->percent < 0.01) { RB_CLEAR_NODE(&rbpos->rb_node); continue; } objdump__insert_line(&browser->entries, rbpos); } pthread_mutex_unlock(&notes->lock); browser->curr_hot = rb_last(&browser->entries); } static int annotate_browser__run(struct annotate_browser *self, int evidx, int refresh) { struct rb_node *nd = NULL; struct symbol *sym = self->b.priv; /* * RIGHT To allow builtin-annotate to cycle thru multiple symbols by * examining the exit key for this function. */ int exit_keys[] = { 'H', NEWT_KEY_TAB, NEWT_KEY_UNTAB, NEWT_KEY_RIGHT, 0 }; int key; if (ui_browser__show(&self->b, sym->name, "<-, -> or ESC: exit, TAB/shift+TAB: " "cycle hottest lines, H: Hottest") < 0) return -1; ui_browser__add_exit_keys(&self->b, exit_keys); annotate_browser__calc_percent(self, evidx); if (self->curr_hot) annotate_browser__set_top(self, self->curr_hot); nd = self->curr_hot; if (refresh != 0) newtFormSetTimer(self->b.form, refresh); while (1) { key = ui_browser__run(&self->b); if (refresh != 0) { annotate_browser__calc_percent(self, evidx); /* * Current line focus got out of the list of most active * lines, NULL it so that if TAB|UNTAB is pressed, we * move to curr_hot (current hottest line). */ if (nd != NULL && RB_EMPTY_NODE(nd)) nd = NULL; } switch (key) { case -1: /* * FIXME we need to check if it was * es.reason == NEWT_EXIT_TIMER */ if (refresh != 0) symbol__annotate_decay_histogram(sym, evidx); continue; case NEWT_KEY_TAB: if (nd != NULL) { nd = rb_prev(nd); if (nd == NULL) nd = rb_last(&self->entries); } else nd = self->curr_hot; break; case NEWT_KEY_UNTAB: if (nd != NULL) nd = rb_next(nd); if (nd == NULL) nd = rb_first(&self->entries); else nd = self->curr_hot; break; case 'H': nd = self->curr_hot; break; default: goto out; } if (nd != NULL) annotate_browser__set_top(self, nd); } out: ui_browser__hide(&self->b); return key; } int hist_entry__tui_annotate(struct hist_entry *he, int evidx) { return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, 0); } int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, int refresh) { struct objdump_line *pos, *n; struct annotation *notes; struct annotate_browser browser = { .b = { .refresh = ui_browser__list_head_refresh, .seek = ui_browser__list_head_seek, .write = annotate_browser__write, .priv = sym, }, }; int ret; if (sym == NULL) return -1; if (map->dso->annotate_warned) return -1; if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) { ui__error_window(ui_helpline__last_msg); return -1; } ui_helpline__push("Press <- or ESC to exit"); notes = symbol__annotation(sym); list_for_each_entry(pos, &notes->src->source, node) { struct objdump_line_rb_node *rbpos; size_t line_len = strlen(pos->line); if (browser.b.width < line_len) browser.b.width = line_len; rbpos = objdump_line__rb(pos); rbpos->idx = browser.b.nr_entries++; } browser.b.entries = &notes->src->source, browser.b.width += 18; /* Percentage */ ret = annotate_browser__run(&browser, evidx, refresh); list_for_each_entry_safe(pos, n, &notes->src->source, node) { list_del(&pos->node); objdump_line__free(pos); } return ret; }
gpl-2.0
andip71/boeffla-kernel-oos-oneplus2
drivers/hwmon/sch56xx-common.c
4068
16386
/*************************************************************************** * Copyright (C) 2010-2012 Hans de Goede <hdegoede@redhat.com> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/acpi.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/watchdog.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/kref.h> #include <linux/slab.h> #include "sch56xx-common.h" /* Insmod parameters */ static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #define SIO_SCH56XX_LD_EM 0x0C /* Embedded uController Logical Dev */ #define SIO_UNLOCK_KEY 0x55 /* Key to enable Super-I/O */ #define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */ #define SIO_REG_LDSEL 0x07 /* Logical device select */ #define SIO_REG_DEVID 0x20 /* Device ID */ #define SIO_REG_ENABLE 0x30 /* Logical device enable */ #define SIO_REG_ADDR 0x66 /* Logical device address (2 bytes) */ #define SIO_SCH5627_ID 0xC6 /* Chipset ID */ #define SIO_SCH5636_ID 0xC7 /* Chipset ID */ #define REGION_LENGTH 10 #define SCH56XX_CMD_READ 0x02 #define SCH56XX_CMD_WRITE 0x03 /* Watchdog registers */ #define SCH56XX_REG_WDOG_PRESET 0x58B #define SCH56XX_REG_WDOG_CONTROL 0x58C #define SCH56XX_WDOG_TIME_BASE_SEC 0x01 #define SCH56XX_REG_WDOG_OUTPUT_ENABLE 0x58E #define SCH56XX_WDOG_OUTPUT_ENABLE 0x02 struct sch56xx_watchdog_data { u16 addr; struct mutex *io_lock; struct kref kref; struct watchdog_info wdinfo; struct watchdog_device wddev; u8 watchdog_preset; u8 watchdog_control; u8 watchdog_output_enable; }; static struct platform_device *sch56xx_pdev; /* Super I/O functions */ static inline int superio_inb(int base, int reg) { outb(reg, base); return inb(base + 1); } static inline int superio_enter(int base) { /* Don't step on other drivers' I/O space by accident */ if (!request_muxed_region(base, 2, "sch56xx")) { pr_err("I/O address 0x%04x already in use\n", base); return -EBUSY; } outb(SIO_UNLOCK_KEY, base); return 0; } static inline void superio_select(int base, int ld) { outb(SIO_REG_LDSEL, base); outb(ld, base + 1); } static inline void superio_exit(int base) { outb(SIO_LOCK_KEY, base); release_region(base, 2); } static int sch56xx_send_cmd(u16 addr, u8 cmd, u16 reg, u8 v) { u8 val; int i; /* * According to SMSC for the commands we use the maximum time for * the EM to respond is 15 ms, but testing shows in practice it * responds within 15-32 reads, so we first busy poll, and if * that fails sleep a bit and try again until we are way past * the 15 ms maximum response time. */ const int max_busy_polls = 64; const int max_lazy_polls = 32; /* (Optional) Write-Clear the EC to Host Mailbox Register */ val = inb(addr + 1); outb(val, addr + 1); /* Set Mailbox Address Pointer to first location in Region 1 */ outb(0x00, addr + 2); outb(0x80, addr + 3); /* Write Request Packet Header */ outb(cmd, addr + 4); /* VREG Access Type read:0x02 write:0x03 */ outb(0x01, addr + 5); /* # of Entries: 1 Byte (8-bit) */ outb(0x04, addr + 2); /* Mailbox AP to first data entry loc. */ /* Write Value field */ if (cmd == SCH56XX_CMD_WRITE) outb(v, addr + 4); /* Write Address field */ outb(reg & 0xff, addr + 6); outb(reg >> 8, addr + 7); /* Execute the Random Access Command */ outb(0x01, addr); /* Write 01h to the Host-to-EC register */ /* EM Interface Polling "Algorithm" */ for (i = 0; i < max_busy_polls + max_lazy_polls; i++) { if (i >= max_busy_polls) msleep(1); /* Read Interrupt source Register */ val = inb(addr + 8); /* Write Clear the interrupt source bits */ if (val) outb(val, addr + 8); /* Command Completed ? */ if (val & 0x01) break; } if (i == max_busy_polls + max_lazy_polls) { pr_err("Max retries exceeded reading virtual register 0x%04hx (%d)\n", reg, 1); return -EIO; } /* * According to SMSC we may need to retry this, but sofar I've always * seen this succeed in 1 try. */ for (i = 0; i < max_busy_polls; i++) { /* Read EC-to-Host Register */ val = inb(addr + 1); /* Command Completed ? */ if (val == 0x01) break; if (i == 0) pr_warn("EC reports: 0x%02x reading virtual register 0x%04hx\n", (unsigned int)val, reg); } if (i == max_busy_polls) { pr_err("Max retries exceeded reading virtual register 0x%04hx (%d)\n", reg, 2); return -EIO; } /* * According to the SMSC app note we should now do: * * Set Mailbox Address Pointer to first location in Region 1 * * outb(0x00, addr + 2); * outb(0x80, addr + 3); * * But if we do that things don't work, so let's not. */ /* Read Value field */ if (cmd == SCH56XX_CMD_READ) return inb(addr + 4); return 0; } int sch56xx_read_virtual_reg(u16 addr, u16 reg) { return sch56xx_send_cmd(addr, SCH56XX_CMD_READ, reg, 0); } EXPORT_SYMBOL(sch56xx_read_virtual_reg); int sch56xx_write_virtual_reg(u16 addr, u16 reg, u8 val) { return sch56xx_send_cmd(addr, SCH56XX_CMD_WRITE, reg, val); } EXPORT_SYMBOL(sch56xx_write_virtual_reg); int sch56xx_read_virtual_reg16(u16 addr, u16 reg) { int lsb, msb; /* Read LSB first, this will cause the matching MSB to be latched */ lsb = sch56xx_read_virtual_reg(addr, reg); if (lsb < 0) return lsb; msb = sch56xx_read_virtual_reg(addr, reg + 1); if (msb < 0) return msb; return lsb | (msb << 8); } EXPORT_SYMBOL(sch56xx_read_virtual_reg16); int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg, int high_nibble) { int msb, lsn; /* Read MSB first, this will cause the matching LSN to be latched */ msb = sch56xx_read_virtual_reg(addr, msb_reg); if (msb < 0) return msb; lsn = sch56xx_read_virtual_reg(addr, lsn_reg); if (lsn < 0) return lsn; if (high_nibble) return (msb << 4) | (lsn >> 4); else return (msb << 4) | (lsn & 0x0f); } EXPORT_SYMBOL(sch56xx_read_virtual_reg12); /* * Watchdog routines */ /* Release our data struct when we're unregistered *and* all references to our watchdog device are released */ static void watchdog_release_resources(struct kref *r) { struct sch56xx_watchdog_data *data = container_of(r, struct sch56xx_watchdog_data, kref); kfree(data); } static int watchdog_set_timeout(struct watchdog_device *wddev, unsigned int timeout) { struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev); unsigned int resolution; u8 control; int ret; /* 1 second or 60 second resolution? */ if (timeout <= 255) resolution = 1; else resolution = 60; if (timeout < resolution || timeout > (resolution * 255)) return -EINVAL; if (resolution == 1) control = data->watchdog_control | SCH56XX_WDOG_TIME_BASE_SEC; else control = data->watchdog_control & ~SCH56XX_WDOG_TIME_BASE_SEC; if (data->watchdog_control != control) { mutex_lock(data->io_lock); ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_CONTROL, control); mutex_unlock(data->io_lock); if (ret) return ret; data->watchdog_control = control; } /* * Remember new timeout value, but do not write as that (re)starts * the watchdog countdown. */ data->watchdog_preset = DIV_ROUND_UP(timeout, resolution); wddev->timeout = data->watchdog_preset * resolution; return 0; } static int watchdog_start(struct watchdog_device *wddev) { struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev); int ret; u8 val; /* * The sch56xx's watchdog cannot really be started / stopped * it is always running, but we can avoid the timer expiring * from causing a system reset by clearing the output enable bit. * * The sch56xx's watchdog will set the watchdog event bit, bit 0 * of the second interrupt source register (at base-address + 9), * when the timer expires. * * This will only cause a system reset if the 0-1 flank happens when * output enable is true. Setting output enable after the flank will * not cause a reset, nor will the timer expiring a second time. * This means we must clear the watchdog event bit in case it is set. * * The timer may still be running (after a recent watchdog_stop) and * mere milliseconds away from expiring, so the timer must be reset * first! */ mutex_lock(data->io_lock); /* 1. Reset the watchdog countdown counter */ ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_PRESET, data->watchdog_preset); if (ret) goto leave; /* 2. Enable output */ val = data->watchdog_output_enable | SCH56XX_WDOG_OUTPUT_ENABLE; ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_OUTPUT_ENABLE, val); if (ret) goto leave; data->watchdog_output_enable = val; /* 3. Clear the watchdog event bit if set */ val = inb(data->addr + 9); if (val & 0x01) outb(0x01, data->addr + 9); leave: mutex_unlock(data->io_lock); return ret; } static int watchdog_trigger(struct watchdog_device *wddev) { struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev); int ret; /* Reset the watchdog countdown counter */ mutex_lock(data->io_lock); ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_PRESET, data->watchdog_preset); mutex_unlock(data->io_lock); return ret; } static int watchdog_stop(struct watchdog_device *wddev) { struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev); int ret = 0; u8 val; val = data->watchdog_output_enable & ~SCH56XX_WDOG_OUTPUT_ENABLE; mutex_lock(data->io_lock); ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_OUTPUT_ENABLE, val); mutex_unlock(data->io_lock); if (ret) return ret; data->watchdog_output_enable = val; return 0; } static void watchdog_ref(struct watchdog_device *wddev) { struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev); kref_get(&data->kref); } static void watchdog_unref(struct watchdog_device *wddev) { struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev); kref_put(&data->kref, watchdog_release_resources); } static const struct watchdog_ops watchdog_ops = { .owner = THIS_MODULE, .start = watchdog_start, .stop = watchdog_stop, .ping = watchdog_trigger, .set_timeout = watchdog_set_timeout, .ref = watchdog_ref, .unref = watchdog_unref, }; struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision, struct mutex *io_lock, int check_enabled) { struct sch56xx_watchdog_data *data; int err, control, output_enable; /* Cache the watchdog registers */ mutex_lock(io_lock); control = sch56xx_read_virtual_reg(addr, SCH56XX_REG_WDOG_CONTROL); output_enable = sch56xx_read_virtual_reg(addr, SCH56XX_REG_WDOG_OUTPUT_ENABLE); mutex_unlock(io_lock); if (control < 0) return NULL; if (output_enable < 0) return NULL; if (check_enabled && !(output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)) { pr_warn("Watchdog not enabled by BIOS, not registering\n"); return NULL; } data = kzalloc(sizeof(struct sch56xx_watchdog_data), GFP_KERNEL); if (!data) return NULL; data->addr = addr; data->io_lock = io_lock; kref_init(&data->kref); strlcpy(data->wdinfo.identity, "sch56xx watchdog", sizeof(data->wdinfo.identity)); data->wdinfo.firmware_version = revision; data->wdinfo.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT; if (!nowayout) data->wdinfo.options |= WDIOF_MAGICCLOSE; data->wddev.info = &data->wdinfo; data->wddev.ops = &watchdog_ops; data->wddev.parent = parent; data->wddev.timeout = 60; data->wddev.min_timeout = 1; data->wddev.max_timeout = 255 * 60; if (nowayout) set_bit(WDOG_NO_WAY_OUT, &data->wddev.status); if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) set_bit(WDOG_ACTIVE, &data->wddev.status); /* Since the watchdog uses a downcounter there is no register to read the BIOS set timeout from (if any was set at all) -> Choose a preset which will give us a 1 minute timeout */ if (control & SCH56XX_WDOG_TIME_BASE_SEC) data->watchdog_preset = 60; /* seconds */ else data->watchdog_preset = 1; /* minute */ data->watchdog_control = control; data->watchdog_output_enable = output_enable; watchdog_set_drvdata(&data->wddev, data); err = watchdog_register_device(&data->wddev); if (err) { pr_err("Registering watchdog chardev: %d\n", err); kfree(data); return NULL; } return data; } EXPORT_SYMBOL(sch56xx_watchdog_register); void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data) { watchdog_unregister_device(&data->wddev); kref_put(&data->kref, watchdog_release_resources); /* Don't touch data after this it may have been free-ed! */ } EXPORT_SYMBOL(sch56xx_watchdog_unregister); /* * platform dev find, add and remove functions */ static int __init sch56xx_find(int sioaddr, const char **name) { u8 devid; unsigned short address; int err; err = superio_enter(sioaddr); if (err) return err; devid = superio_inb(sioaddr, SIO_REG_DEVID); switch (devid) { case SIO_SCH5627_ID: *name = "sch5627"; break; case SIO_SCH5636_ID: *name = "sch5636"; break; default: pr_debug("Unsupported device id: 0x%02x\n", (unsigned int)devid); err = -ENODEV; goto exit; } superio_select(sioaddr, SIO_SCH56XX_LD_EM); if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { pr_warn("Device not activated\n"); err = -ENODEV; goto exit; } /* * Warning the order of the low / high byte is the other way around * as on most other superio devices!! */ address = superio_inb(sioaddr, SIO_REG_ADDR) | superio_inb(sioaddr, SIO_REG_ADDR + 1) << 8; if (address == 0) { pr_warn("Base address not set\n"); err = -ENODEV; goto exit; } err = address; exit: superio_exit(sioaddr); return err; } static int __init sch56xx_device_add(int address, const char *name) { struct resource res = { .start = address, .end = address + REGION_LENGTH - 1, .flags = IORESOURCE_IO, }; int err; sch56xx_pdev = platform_device_alloc(name, address); if (!sch56xx_pdev) return -ENOMEM; res.name = sch56xx_pdev->name; err = acpi_check_resource_conflict(&res); if (err) goto exit_device_put; err = platform_device_add_resources(sch56xx_pdev, &res, 1); if (err) { pr_err("Device resource addition failed\n"); goto exit_device_put; } err = platform_device_add(sch56xx_pdev); if (err) { pr_err("Device addition failed\n"); goto exit_device_put; } return 0; exit_device_put: platform_device_put(sch56xx_pdev); return err; } static int __init sch56xx_init(void) { int address; const char *name = NULL; address = sch56xx_find(0x4e, &name); if (address < 0) address = sch56xx_find(0x2e, &name); if (address < 0) return address; return sch56xx_device_add(address, name); } static void __exit sch56xx_exit(void) { platform_device_unregister(sch56xx_pdev); } MODULE_DESCRIPTION("SMSC SCH56xx Hardware Monitoring Common Code"); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_LICENSE("GPL"); module_init(sch56xx_init); module_exit(sch56xx_exit);
gpl-2.0
ParanoidNote/void-kernel
drivers/base/transport_class.c
4836
9551
/* * transport_class.c - implementation of generic transport classes * using attribute_containers * * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> * * This file is licensed under GPLv2 * * The basic idea here is to allow any "device controller" (which * would most often be a Host Bus Adapter to use the services of one * or more tranport classes for performing transport specific * services. Transport specific services are things that the generic * command layer doesn't want to know about (speed settings, line * condidtioning, etc), but which the user might be interested in. * Thus, the HBA's use the routines exported by the transport classes * to perform these functions. The transport classes export certain * values to the user via sysfs using attribute containers. * * Note: because not every HBA will care about every transport * attribute, there's a many to one relationship that goes like this: * * transport class<-----attribute container<----class device * * Usually the attribute container is per-HBA, but the design doesn't * mandate that. Although most of the services will be specific to * the actual external storage connection used by the HBA, the generic * transport class is framed entirely in terms of generic devices to * allow it to be used by any physical HBA in the system. */ #include <linux/attribute_container.h> #include <linux/transport_class.h> /** * transport_class_register - register an initial transport class * * @tclass: a pointer to the transport class structure to be initialised * * The transport class contains an embedded class which is used to * identify it. The caller should initialise this structure with * zeros and then generic class must have been initialised with the * actual transport class unique name. There's a macro * DECLARE_TRANSPORT_CLASS() to do this (declared classes still must * be registered). * * Returns 0 on success or error on failure. */ int transport_class_register(struct transport_class *tclass) { return class_register(&tclass->class); } EXPORT_SYMBOL_GPL(transport_class_register); /** * transport_class_unregister - unregister a previously registered class * * @tclass: The transport class to unregister * * Must be called prior to deallocating the memory for the transport * class. */ void transport_class_unregister(struct transport_class *tclass) { class_unregister(&tclass->class); } EXPORT_SYMBOL_GPL(transport_class_unregister); static int anon_transport_dummy_function(struct transport_container *tc, struct device *dev, struct device *cdev) { /* do nothing */ return 0; } /** * anon_transport_class_register - register an anonymous class * * @atc: The anon transport class to register * * The anonymous transport class contains both a transport class and a * container. The idea of an anonymous class is that it never * actually has any device attributes associated with it (and thus * saves on container storage). So it can only be used for triggering * events. Use prezero and then use DECLARE_ANON_TRANSPORT_CLASS() to * initialise the anon transport class storage. */ int anon_transport_class_register(struct anon_transport_class *atc) { int error; atc->container.class = &atc->tclass.class; attribute_container_set_no_classdevs(&atc->container); error = attribute_container_register(&atc->container); if (error) return error; atc->tclass.setup = anon_transport_dummy_function; atc->tclass.remove = anon_transport_dummy_function; return 0; } EXPORT_SYMBOL_GPL(anon_transport_class_register); /** * anon_transport_class_unregister - unregister an anon class * * @atc: Pointer to the anon transport class to unregister * * Must be called prior to deallocating the memory for the anon * transport class. */ void anon_transport_class_unregister(struct anon_transport_class *atc) { if (unlikely(attribute_container_unregister(&atc->container))) BUG(); } EXPORT_SYMBOL_GPL(anon_transport_class_unregister); static int transport_setup_classdev(struct attribute_container *cont, struct device *dev, struct device *classdev) { struct transport_class *tclass = class_to_transport_class(cont->class); struct transport_container *tcont = attribute_container_to_transport_container(cont); if (tclass->setup) tclass->setup(tcont, dev, classdev); return 0; } /** * transport_setup_device - declare a new dev for transport class association but don't make it visible yet. * @dev: the generic device representing the entity being added * * Usually, dev represents some component in the HBA system (either * the HBA itself or a device remote across the HBA bus). This * routine is simply a trigger point to see if any set of transport * classes wishes to associate with the added device. This allocates * storage for the class device and initialises it, but does not yet * add it to the system or add attributes to it (you do this with * transport_add_device). If you have no need for a separate setup * and add operations, use transport_register_device (see * transport_class.h). */ void transport_setup_device(struct device *dev) { attribute_container_add_device(dev, transport_setup_classdev); } EXPORT_SYMBOL_GPL(transport_setup_device); static int transport_add_class_device(struct attribute_container *cont, struct device *dev, struct device *classdev) { int error = attribute_container_add_class_device(classdev); struct transport_container *tcont = attribute_container_to_transport_container(cont); if (!error && tcont->statistics) error = sysfs_create_group(&classdev->kobj, tcont->statistics); return error; } /** * transport_add_device - declare a new dev for transport class association * * @dev: the generic device representing the entity being added * * Usually, dev represents some component in the HBA system (either * the HBA itself or a device remote across the HBA bus). This * routine is simply a trigger point used to add the device to the * system and register attributes for it. */ void transport_add_device(struct device *dev) { attribute_container_device_trigger(dev, transport_add_class_device); } EXPORT_SYMBOL_GPL(transport_add_device); static int transport_configure(struct attribute_container *cont, struct device *dev, struct device *cdev) { struct transport_class *tclass = class_to_transport_class(cont->class); struct transport_container *tcont = attribute_container_to_transport_container(cont); if (tclass->configure) tclass->configure(tcont, dev, cdev); return 0; } /** * transport_configure_device - configure an already set up device * * @dev: generic device representing device to be configured * * The idea of configure is simply to provide a point within the setup * process to allow the transport class to extract information from a * device after it has been setup. This is used in SCSI because we * have to have a setup device to begin using the HBA, but after we * send the initial inquiry, we use configure to extract the device * parameters. The device need not have been added to be configured. */ void transport_configure_device(struct device *dev) { attribute_container_device_trigger(dev, transport_configure); } EXPORT_SYMBOL_GPL(transport_configure_device); static int transport_remove_classdev(struct attribute_container *cont, struct device *dev, struct device *classdev) { struct transport_container *tcont = attribute_container_to_transport_container(cont); struct transport_class *tclass = class_to_transport_class(cont->class); if (tclass->remove) tclass->remove(tcont, dev, classdev); if (tclass->remove != anon_transport_dummy_function) { if (tcont->statistics) sysfs_remove_group(&classdev->kobj, tcont->statistics); attribute_container_class_device_del(classdev); } return 0; } /** * transport_remove_device - remove the visibility of a device * * @dev: generic device to remove * * This call removes the visibility of the device (to the user from * sysfs), but does not destroy it. To eliminate a device entirely * you must also call transport_destroy_device. If you don't need to * do remove and destroy as separate operations, use * transport_unregister_device() (see transport_class.h) which will * perform both calls for you. */ void transport_remove_device(struct device *dev) { attribute_container_device_trigger(dev, transport_remove_classdev); } EXPORT_SYMBOL_GPL(transport_remove_device); static void transport_destroy_classdev(struct attribute_container *cont, struct device *dev, struct device *classdev) { struct transport_class *tclass = class_to_transport_class(cont->class); if (tclass->remove != anon_transport_dummy_function) put_device(classdev); } /** * transport_destroy_device - destroy a removed device * * @dev: device to eliminate from the transport class. * * This call triggers the elimination of storage associated with the * transport classdev. Note: all it really does is relinquish a * reference to the classdev. The memory will not be freed until the * last reference goes to zero. Note also that the classdev retains a * reference count on dev, so dev too will remain for as long as the * transport class device remains around. */ void transport_destroy_device(struct device *dev) { attribute_container_remove_device(dev, transport_destroy_classdev); } EXPORT_SYMBOL_GPL(transport_destroy_device);
gpl-2.0
javelinanddart/Canuck
drivers/net/ethernet/ti/davinci_mdio.c
4836
11280
/* * DaVinci MDIO Module driver * * Copyright (C) 2010 Texas Instruments. * * Shamelessly ripped out of davinci_emac.c, original copyrights follow: * * Copyright (C) 2009 Texas Instruments. * * --------------------------------------------------------------------------- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * --------------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/phy.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/davinci_emac.h> /* * This timeout definition is a worst-case ultra defensive measure against * unexpected controller lock ups. Ideally, we should never ever hit this * scenario in practice. */ #define MDIO_TIMEOUT 100 /* msecs */ #define PHY_REG_MASK 0x1f #define PHY_ID_MASK 0x1f #define DEF_OUT_FREQ 2200000 /* 2.2 MHz */ struct davinci_mdio_regs { u32 version; u32 control; #define CONTROL_IDLE BIT(31) #define CONTROL_ENABLE BIT(30) #define CONTROL_MAX_DIV (0xffff) u32 alive; u32 link; u32 linkintraw; u32 linkintmasked; u32 __reserved_0[2]; u32 userintraw; u32 userintmasked; u32 userintmaskset; u32 userintmaskclr; u32 __reserved_1[20]; struct { u32 access; #define USERACCESS_GO BIT(31) #define USERACCESS_WRITE BIT(30) #define USERACCESS_ACK BIT(29) #define USERACCESS_READ (0) #define USERACCESS_DATA (0xffff) u32 physel; } user[0]; }; struct mdio_platform_data default_pdata = { .bus_freq = DEF_OUT_FREQ, }; struct davinci_mdio_data { struct mdio_platform_data pdata; struct davinci_mdio_regs __iomem *regs; spinlock_t lock; struct clk *clk; struct device *dev; struct mii_bus *bus; bool suspended; unsigned long access_time; /* jiffies */ }; static void __davinci_mdio_reset(struct davinci_mdio_data *data) { u32 mdio_in, div, mdio_out_khz, access_time; mdio_in = clk_get_rate(data->clk); div = (mdio_in / data->pdata.bus_freq) - 1; if (div > CONTROL_MAX_DIV) div = CONTROL_MAX_DIV; /* set enable and clock divider */ __raw_writel(div | CONTROL_ENABLE, &data->regs->control); /* * One mdio transaction consists of: * 32 bits of preamble * 32 bits of transferred data * 24 bits of bus yield (not needed unless shared?) */ mdio_out_khz = mdio_in / (1000 * (div + 1)); access_time = (88 * 1000) / mdio_out_khz; /* * In the worst case, we could be kicking off a user-access immediately * after the mdio bus scan state-machine triggered its own read. If * so, our request could get deferred by one access cycle. We * defensively allow for 4 access cycles. */ data->access_time = usecs_to_jiffies(access_time * 4); if (!data->access_time) data->access_time = 1; } static int davinci_mdio_reset(struct mii_bus *bus) { struct davinci_mdio_data *data = bus->priv; u32 phy_mask, ver; __davinci_mdio_reset(data); /* wait for scan logic to settle */ msleep(PHY_MAX_ADDR * data->access_time); /* dump hardware version info */ ver = __raw_readl(&data->regs->version); dev_info(data->dev, "davinci mdio revision %d.%d\n", (ver >> 8) & 0xff, ver & 0xff); /* get phy mask from the alive register */ phy_mask = __raw_readl(&data->regs->alive); if (phy_mask) { /* restrict mdio bus to live phys only */ dev_info(data->dev, "detected phy mask %x\n", ~phy_mask); phy_mask = ~phy_mask; } else { /* desperately scan all phys */ dev_warn(data->dev, "no live phy, scanning all\n"); phy_mask = 0; } data->bus->phy_mask = phy_mask; return 0; } /* wait until hardware is ready for another user access */ static inline int wait_for_user_access(struct davinci_mdio_data *data) { struct davinci_mdio_regs __iomem *regs = data->regs; unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); u32 reg; while (time_after(timeout, jiffies)) { reg = __raw_readl(&regs->user[0].access); if ((reg & USERACCESS_GO) == 0) return 0; reg = __raw_readl(&regs->control); if ((reg & CONTROL_IDLE) == 0) continue; /* * An emac soft_reset may have clobbered the mdio controller's * state machine. We need to reset and retry the current * operation */ dev_warn(data->dev, "resetting idled controller\n"); __davinci_mdio_reset(data); return -EAGAIN; } reg = __raw_readl(&regs->user[0].access); if ((reg & USERACCESS_GO) == 0) return 0; dev_err(data->dev, "timed out waiting for user access\n"); return -ETIMEDOUT; } /* wait until hardware state machine is idle */ static inline int wait_for_idle(struct davinci_mdio_data *data) { struct davinci_mdio_regs __iomem *regs = data->regs; unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); while (time_after(timeout, jiffies)) { if (__raw_readl(&regs->control) & CONTROL_IDLE) return 0; } dev_err(data->dev, "timed out waiting for idle\n"); return -ETIMEDOUT; } static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) { struct davinci_mdio_data *data = bus->priv; u32 reg; int ret; if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; spin_lock(&data->lock); if (data->suspended) { spin_unlock(&data->lock); return -ENODEV; } reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | (phy_id << 16)); while (1) { ret = wait_for_user_access(data); if (ret == -EAGAIN) continue; if (ret < 0) break; __raw_writel(reg, &data->regs->user[0].access); ret = wait_for_user_access(data); if (ret == -EAGAIN) continue; if (ret < 0) break; reg = __raw_readl(&data->regs->user[0].access); ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO; break; } spin_unlock(&data->lock); return ret; } static int davinci_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg, u16 phy_data) { struct davinci_mdio_data *data = bus->priv; u32 reg; int ret; if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; spin_lock(&data->lock); if (data->suspended) { spin_unlock(&data->lock); return -ENODEV; } reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | (phy_id << 16) | (phy_data & USERACCESS_DATA)); while (1) { ret = wait_for_user_access(data); if (ret == -EAGAIN) continue; if (ret < 0) break; __raw_writel(reg, &data->regs->user[0].access); ret = wait_for_user_access(data); if (ret == -EAGAIN) continue; break; } spin_unlock(&data->lock); return 0; } static int __devinit davinci_mdio_probe(struct platform_device *pdev) { struct mdio_platform_data *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct davinci_mdio_data *data; struct resource *res; struct phy_device *phy; int ret, addr; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { dev_err(dev, "failed to alloc device data\n"); return -ENOMEM; } data->pdata = pdata ? (*pdata) : default_pdata; data->bus = mdiobus_alloc(); if (!data->bus) { dev_err(dev, "failed to alloc mii bus\n"); ret = -ENOMEM; goto bail_out; } data->bus->name = dev_name(dev); data->bus->read = davinci_mdio_read, data->bus->write = davinci_mdio_write, data->bus->reset = davinci_mdio_reset, data->bus->parent = dev; data->bus->priv = data; snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); data->clk = clk_get(dev, NULL); if (IS_ERR(data->clk)) { dev_err(dev, "failed to get device clock\n"); ret = PTR_ERR(data->clk); data->clk = NULL; goto bail_out; } clk_enable(data->clk); dev_set_drvdata(dev, data); data->dev = dev; spin_lock_init(&data->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "could not find register map resource\n"); ret = -ENOENT; goto bail_out; } res = devm_request_mem_region(dev, res->start, resource_size(res), dev_name(dev)); if (!res) { dev_err(dev, "could not allocate register map resource\n"); ret = -ENXIO; goto bail_out; } data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); if (!data->regs) { dev_err(dev, "could not map mdio registers\n"); ret = -ENOMEM; goto bail_out; } /* register the mii bus */ ret = mdiobus_register(data->bus); if (ret) goto bail_out; /* scan and dump the bus */ for (addr = 0; addr < PHY_MAX_ADDR; addr++) { phy = data->bus->phy_map[addr]; if (phy) { dev_info(dev, "phy[%d]: device %s, driver %s\n", phy->addr, dev_name(&phy->dev), phy->drv ? phy->drv->name : "unknown"); } } return 0; bail_out: if (data->bus) mdiobus_free(data->bus); if (data->clk) { clk_disable(data->clk); clk_put(data->clk); } kfree(data); return ret; } static int __devexit davinci_mdio_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct davinci_mdio_data *data = dev_get_drvdata(dev); if (data->bus) mdiobus_free(data->bus); if (data->clk) { clk_disable(data->clk); clk_put(data->clk); } dev_set_drvdata(dev, NULL); kfree(data); return 0; } static int davinci_mdio_suspend(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); u32 ctrl; spin_lock(&data->lock); /* shutdown the scan state machine */ ctrl = __raw_readl(&data->regs->control); ctrl &= ~CONTROL_ENABLE; __raw_writel(ctrl, &data->regs->control); wait_for_idle(data); if (data->clk) clk_disable(data->clk); data->suspended = true; spin_unlock(&data->lock); return 0; } static int davinci_mdio_resume(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); u32 ctrl; spin_lock(&data->lock); if (data->clk) clk_enable(data->clk); /* restart the scan state machine */ ctrl = __raw_readl(&data->regs->control); ctrl |= CONTROL_ENABLE; __raw_writel(ctrl, &data->regs->control); data->suspended = false; spin_unlock(&data->lock); return 0; } static const struct dev_pm_ops davinci_mdio_pm_ops = { .suspend = davinci_mdio_suspend, .resume = davinci_mdio_resume, }; static struct platform_driver davinci_mdio_driver = { .driver = { .name = "davinci_mdio", .owner = THIS_MODULE, .pm = &davinci_mdio_pm_ops, }, .probe = davinci_mdio_probe, .remove = __devexit_p(davinci_mdio_remove), }; static int __init davinci_mdio_init(void) { return platform_driver_register(&davinci_mdio_driver); } device_initcall(davinci_mdio_init); static void __exit davinci_mdio_exit(void) { platform_driver_unregister(&davinci_mdio_driver); } module_exit(davinci_mdio_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DaVinci MDIO driver");
gpl-2.0
davidmueller13/flo-1
drivers/mfd/tc3589x.c
5092
9531
/* * Copyright (C) ST-Ericsson SA 2010 * * License Terms: GNU General Public License, version 2 * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mfd/core.h> #include <linux/mfd/tc3589x.h> #define TC3589x_CLKMODE_MODCTL_SLEEP 0x0 #define TC3589x_CLKMODE_MODCTL_OPERATION (1 << 0) /** * tc3589x_reg_read() - read a single TC3589x register * @tc3589x: Device to read from * @reg: Register to read */ int tc3589x_reg_read(struct tc3589x *tc3589x, u8 reg) { int ret; ret = i2c_smbus_read_byte_data(tc3589x->i2c, reg); if (ret < 0) dev_err(tc3589x->dev, "failed to read reg %#x: %d\n", reg, ret); return ret; } EXPORT_SYMBOL_GPL(tc3589x_reg_read); /** * tc3589x_reg_read() - write a single TC3589x register * @tc3589x: Device to write to * @reg: Register to read * @data: Value to write */ int tc3589x_reg_write(struct tc3589x *tc3589x, u8 reg, u8 data) { int ret; ret = i2c_smbus_write_byte_data(tc3589x->i2c, reg, data); if (ret < 0) dev_err(tc3589x->dev, "failed to write reg %#x: %d\n", reg, ret); return ret; } EXPORT_SYMBOL_GPL(tc3589x_reg_write); /** * tc3589x_block_read() - read multiple TC3589x registers * @tc3589x: Device to read from * @reg: First register * @length: Number of registers * @values: Buffer to write to */ int tc3589x_block_read(struct tc3589x *tc3589x, u8 reg, u8 length, u8 *values) { int ret; ret = i2c_smbus_read_i2c_block_data(tc3589x->i2c, reg, length, values); if (ret < 0) dev_err(tc3589x->dev, "failed to read regs %#x: %d\n", reg, ret); return ret; } EXPORT_SYMBOL_GPL(tc3589x_block_read); /** * tc3589x_block_write() - write multiple TC3589x registers * @tc3589x: Device to write to * @reg: First register * @length: Number of registers * @values: Values to write */ int tc3589x_block_write(struct tc3589x *tc3589x, u8 reg, u8 length, const u8 *values) { int ret; ret = i2c_smbus_write_i2c_block_data(tc3589x->i2c, reg, length, values); if (ret < 0) dev_err(tc3589x->dev, "failed to write regs %#x: %d\n", reg, ret); return ret; } EXPORT_SYMBOL_GPL(tc3589x_block_write); /** * tc3589x_set_bits() - set the value of a bitfield in a TC3589x register * @tc3589x: Device to write to * @reg: Register to write * @mask: Mask of bits to set * @values: Value to set */ int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val) { int ret; mutex_lock(&tc3589x->lock); ret = tc3589x_reg_read(tc3589x, reg); if (ret < 0) goto out; ret &= ~mask; ret |= val; ret = tc3589x_reg_write(tc3589x, reg, ret); out: mutex_unlock(&tc3589x->lock); return ret; } EXPORT_SYMBOL_GPL(tc3589x_set_bits); static struct resource gpio_resources[] = { { .start = TC3589x_INT_GPIIRQ, .end = TC3589x_INT_GPIIRQ, .flags = IORESOURCE_IRQ, }, }; static struct resource keypad_resources[] = { { .start = TC3589x_INT_KBDIRQ, .end = TC3589x_INT_KBDIRQ, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell tc3589x_dev_gpio[] = { { .name = "tc3589x-gpio", .num_resources = ARRAY_SIZE(gpio_resources), .resources = &gpio_resources[0], }, }; static struct mfd_cell tc3589x_dev_keypad[] = { { .name = "tc3589x-keypad", .num_resources = ARRAY_SIZE(keypad_resources), .resources = &keypad_resources[0], }, }; static irqreturn_t tc3589x_irq(int irq, void *data) { struct tc3589x *tc3589x = data; int status; again: status = tc3589x_reg_read(tc3589x, TC3589x_IRQST); if (status < 0) return IRQ_NONE; while (status) { int bit = __ffs(status); handle_nested_irq(tc3589x->irq_base + bit); status &= ~(1 << bit); } /* * A dummy read or write (to any register) appears to be necessary to * have the last interrupt clear (for example, GPIO IC write) take * effect. In such a case, recheck for any interrupt which is still * pending. */ status = tc3589x_reg_read(tc3589x, TC3589x_IRQST); if (status) goto again; return IRQ_HANDLED; } static int tc3589x_irq_init(struct tc3589x *tc3589x) { int base = tc3589x->irq_base; int irq; for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) { irq_set_chip_data(irq, tc3589x); irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_edge_irq); irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif } return 0; } static void tc3589x_irq_remove(struct tc3589x *tc3589x) { int base = tc3589x->irq_base; int irq; for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) { #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); } } static int tc3589x_chip_init(struct tc3589x *tc3589x) { int manf, ver, ret; manf = tc3589x_reg_read(tc3589x, TC3589x_MANFCODE); if (manf < 0) return manf; ver = tc3589x_reg_read(tc3589x, TC3589x_VERSION); if (ver < 0) return ver; if (manf != TC3589x_MANFCODE_MAGIC) { dev_err(tc3589x->dev, "unknown manufacturer: %#x\n", manf); return -EINVAL; } dev_info(tc3589x->dev, "manufacturer: %#x, version: %#x\n", manf, ver); /* * Put everything except the IRQ module into reset; * also spare the GPIO module for any pin initialization * done during pre-kernel boot */ ret = tc3589x_reg_write(tc3589x, TC3589x_RSTCTRL, TC3589x_RSTCTRL_TIMRST | TC3589x_RSTCTRL_ROTRST | TC3589x_RSTCTRL_KBDRST); if (ret < 0) return ret; /* Clear the reset interrupt. */ return tc3589x_reg_write(tc3589x, TC3589x_RSTINTCLR, 0x1); } static int __devinit tc3589x_device_init(struct tc3589x *tc3589x) { int ret = 0; unsigned int blocks = tc3589x->pdata->block; if (blocks & TC3589x_BLOCK_GPIO) { ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_gpio, ARRAY_SIZE(tc3589x_dev_gpio), NULL, tc3589x->irq_base); if (ret) { dev_err(tc3589x->dev, "failed to add gpio child\n"); return ret; } dev_info(tc3589x->dev, "added gpio block\n"); } if (blocks & TC3589x_BLOCK_KEYPAD) { ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_keypad, ARRAY_SIZE(tc3589x_dev_keypad), NULL, tc3589x->irq_base); if (ret) { dev_err(tc3589x->dev, "failed to keypad child\n"); return ret; } dev_info(tc3589x->dev, "added keypad block\n"); } return ret; } static int __devinit tc3589x_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct tc3589x_platform_data *pdata = i2c->dev.platform_data; struct tc3589x *tc3589x; int ret; if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK)) return -EIO; tc3589x = kzalloc(sizeof(struct tc3589x), GFP_KERNEL); if (!tc3589x) return -ENOMEM; mutex_init(&tc3589x->lock); tc3589x->dev = &i2c->dev; tc3589x->i2c = i2c; tc3589x->pdata = pdata; tc3589x->irq_base = pdata->irq_base; tc3589x->num_gpio = id->driver_data; i2c_set_clientdata(i2c, tc3589x); ret = tc3589x_chip_init(tc3589x); if (ret) goto out_free; ret = tc3589x_irq_init(tc3589x); if (ret) goto out_free; ret = request_threaded_irq(tc3589x->i2c->irq, NULL, tc3589x_irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "tc3589x", tc3589x); if (ret) { dev_err(tc3589x->dev, "failed to request IRQ: %d\n", ret); goto out_removeirq; } ret = tc3589x_device_init(tc3589x); if (ret) { dev_err(tc3589x->dev, "failed to add child devices\n"); goto out_freeirq; } return 0; out_freeirq: free_irq(tc3589x->i2c->irq, tc3589x); out_removeirq: tc3589x_irq_remove(tc3589x); out_free: kfree(tc3589x); return ret; } static int __devexit tc3589x_remove(struct i2c_client *client) { struct tc3589x *tc3589x = i2c_get_clientdata(client); mfd_remove_devices(tc3589x->dev); free_irq(tc3589x->i2c->irq, tc3589x); tc3589x_irq_remove(tc3589x); kfree(tc3589x); return 0; } #ifdef CONFIG_PM static int tc3589x_suspend(struct device *dev) { struct tc3589x *tc3589x = dev_get_drvdata(dev); struct i2c_client *client = tc3589x->i2c; int ret = 0; /* put the system to sleep mode */ if (!device_may_wakeup(&client->dev)) ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE, TC3589x_CLKMODE_MODCTL_SLEEP); return ret; } static int tc3589x_resume(struct device *dev) { struct tc3589x *tc3589x = dev_get_drvdata(dev); struct i2c_client *client = tc3589x->i2c; int ret = 0; /* enable the system into operation */ if (!device_may_wakeup(&client->dev)) ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE, TC3589x_CLKMODE_MODCTL_OPERATION); return ret; } static const SIMPLE_DEV_PM_OPS(tc3589x_dev_pm_ops, tc3589x_suspend, tc3589x_resume); #endif static const struct i2c_device_id tc3589x_id[] = { { "tc3589x", 24 }, { } }; MODULE_DEVICE_TABLE(i2c, tc3589x_id); static struct i2c_driver tc3589x_driver = { .driver.name = "tc3589x", .driver.owner = THIS_MODULE, #ifdef CONFIG_PM .driver.pm = &tc3589x_dev_pm_ops, #endif .probe = tc3589x_probe, .remove = __devexit_p(tc3589x_remove), .id_table = tc3589x_id, }; static int __init tc3589x_init(void) { return i2c_add_driver(&tc3589x_driver); } subsys_initcall(tc3589x_init); static void __exit tc3589x_exit(void) { i2c_del_driver(&tc3589x_driver); } module_exit(tc3589x_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("TC3589x MFD core driver"); MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
gpl-2.0
junkyde/vikinger
arch/arm/mach-vt8500/wm8505_7in.c
5092
2232
/* * arch/arm/mach-vt8500/wm8505_7in.c * * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/io.h> #include <linux/pm.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "devices.h" static void __iomem *pmc_hiber; static struct platform_device *devices[] __initdata = { &vt8500_device_uart0, &vt8500_device_ehci, &vt8500_device_wm8505_fb, &vt8500_device_ge_rops, &vt8500_device_pwm, &vt8500_device_pwmbl, &vt8500_device_rtc, }; static void vt8500_power_off(void) { local_irq_disable(); writew(5, pmc_hiber); asm("mcr%? p15, 0, %0, c7, c0, 4" : : "r" (0)); } void __init wm8505_7in_init(void) { #ifdef CONFIG_FB_WM8505 void __iomem *gpio_mux_reg = ioremap(wmt_gpio_base + 0x200, 4); if (gpio_mux_reg) { writel(readl(gpio_mux_reg) | 0x80000000, gpio_mux_reg); iounmap(gpio_mux_reg); } else { printk(KERN_ERR "Could not remap the GPIO mux register, display may not work properly!\n"); } #endif pmc_hiber = ioremap(wmt_pmc_base + 0x12, 2); if (pmc_hiber) pm_power_off = &vt8500_power_off; else printk(KERN_ERR "PMC Hibernation register could not be remapped, not enabling power off!\n"); wm8505_set_resources(); platform_add_devices(devices, ARRAY_SIZE(devices)); vt8500_gpio_init(); } MACHINE_START(WM8505_7IN_NETBOOK, "WM8505 7-inch generic netbook") .atag_offset = 0x100, .reserve = wm8505_reserve_mem, .map_io = wm8505_map_io, .init_irq = wm8505_init_irq, .timer = &vt8500_timer, .init_machine = wm8505_7in_init, MACHINE_END
gpl-2.0
ultrasystem/linux
drivers/mfd/tc3589x.c
5092
9531
/* * Copyright (C) ST-Ericsson SA 2010 * * License Terms: GNU General Public License, version 2 * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mfd/core.h> #include <linux/mfd/tc3589x.h> #define TC3589x_CLKMODE_MODCTL_SLEEP 0x0 #define TC3589x_CLKMODE_MODCTL_OPERATION (1 << 0) /** * tc3589x_reg_read() - read a single TC3589x register * @tc3589x: Device to read from * @reg: Register to read */ int tc3589x_reg_read(struct tc3589x *tc3589x, u8 reg) { int ret; ret = i2c_smbus_read_byte_data(tc3589x->i2c, reg); if (ret < 0) dev_err(tc3589x->dev, "failed to read reg %#x: %d\n", reg, ret); return ret; } EXPORT_SYMBOL_GPL(tc3589x_reg_read); /** * tc3589x_reg_read() - write a single TC3589x register * @tc3589x: Device to write to * @reg: Register to read * @data: Value to write */ int tc3589x_reg_write(struct tc3589x *tc3589x, u8 reg, u8 data) { int ret; ret = i2c_smbus_write_byte_data(tc3589x->i2c, reg, data); if (ret < 0) dev_err(tc3589x->dev, "failed to write reg %#x: %d\n", reg, ret); return ret; } EXPORT_SYMBOL_GPL(tc3589x_reg_write); /** * tc3589x_block_read() - read multiple TC3589x registers * @tc3589x: Device to read from * @reg: First register * @length: Number of registers * @values: Buffer to write to */ int tc3589x_block_read(struct tc3589x *tc3589x, u8 reg, u8 length, u8 *values) { int ret; ret = i2c_smbus_read_i2c_block_data(tc3589x->i2c, reg, length, values); if (ret < 0) dev_err(tc3589x->dev, "failed to read regs %#x: %d\n", reg, ret); return ret; } EXPORT_SYMBOL_GPL(tc3589x_block_read); /** * tc3589x_block_write() - write multiple TC3589x registers * @tc3589x: Device to write to * @reg: First register * @length: Number of registers * @values: Values to write */ int tc3589x_block_write(struct tc3589x *tc3589x, u8 reg, u8 length, const u8 *values) { int ret; ret = i2c_smbus_write_i2c_block_data(tc3589x->i2c, reg, length, values); if (ret < 0) dev_err(tc3589x->dev, "failed to write regs %#x: %d\n", reg, ret); return ret; } EXPORT_SYMBOL_GPL(tc3589x_block_write); /** * tc3589x_set_bits() - set the value of a bitfield in a TC3589x register * @tc3589x: Device to write to * @reg: Register to write * @mask: Mask of bits to set * @values: Value to set */ int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val) { int ret; mutex_lock(&tc3589x->lock); ret = tc3589x_reg_read(tc3589x, reg); if (ret < 0) goto out; ret &= ~mask; ret |= val; ret = tc3589x_reg_write(tc3589x, reg, ret); out: mutex_unlock(&tc3589x->lock); return ret; } EXPORT_SYMBOL_GPL(tc3589x_set_bits); static struct resource gpio_resources[] = { { .start = TC3589x_INT_GPIIRQ, .end = TC3589x_INT_GPIIRQ, .flags = IORESOURCE_IRQ, }, }; static struct resource keypad_resources[] = { { .start = TC3589x_INT_KBDIRQ, .end = TC3589x_INT_KBDIRQ, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell tc3589x_dev_gpio[] = { { .name = "tc3589x-gpio", .num_resources = ARRAY_SIZE(gpio_resources), .resources = &gpio_resources[0], }, }; static struct mfd_cell tc3589x_dev_keypad[] = { { .name = "tc3589x-keypad", .num_resources = ARRAY_SIZE(keypad_resources), .resources = &keypad_resources[0], }, }; static irqreturn_t tc3589x_irq(int irq, void *data) { struct tc3589x *tc3589x = data; int status; again: status = tc3589x_reg_read(tc3589x, TC3589x_IRQST); if (status < 0) return IRQ_NONE; while (status) { int bit = __ffs(status); handle_nested_irq(tc3589x->irq_base + bit); status &= ~(1 << bit); } /* * A dummy read or write (to any register) appears to be necessary to * have the last interrupt clear (for example, GPIO IC write) take * effect. In such a case, recheck for any interrupt which is still * pending. */ status = tc3589x_reg_read(tc3589x, TC3589x_IRQST); if (status) goto again; return IRQ_HANDLED; } static int tc3589x_irq_init(struct tc3589x *tc3589x) { int base = tc3589x->irq_base; int irq; for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) { irq_set_chip_data(irq, tc3589x); irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_edge_irq); irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif } return 0; } static void tc3589x_irq_remove(struct tc3589x *tc3589x) { int base = tc3589x->irq_base; int irq; for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) { #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); } } static int tc3589x_chip_init(struct tc3589x *tc3589x) { int manf, ver, ret; manf = tc3589x_reg_read(tc3589x, TC3589x_MANFCODE); if (manf < 0) return manf; ver = tc3589x_reg_read(tc3589x, TC3589x_VERSION); if (ver < 0) return ver; if (manf != TC3589x_MANFCODE_MAGIC) { dev_err(tc3589x->dev, "unknown manufacturer: %#x\n", manf); return -EINVAL; } dev_info(tc3589x->dev, "manufacturer: %#x, version: %#x\n", manf, ver); /* * Put everything except the IRQ module into reset; * also spare the GPIO module for any pin initialization * done during pre-kernel boot */ ret = tc3589x_reg_write(tc3589x, TC3589x_RSTCTRL, TC3589x_RSTCTRL_TIMRST | TC3589x_RSTCTRL_ROTRST | TC3589x_RSTCTRL_KBDRST); if (ret < 0) return ret; /* Clear the reset interrupt. */ return tc3589x_reg_write(tc3589x, TC3589x_RSTINTCLR, 0x1); } static int __devinit tc3589x_device_init(struct tc3589x *tc3589x) { int ret = 0; unsigned int blocks = tc3589x->pdata->block; if (blocks & TC3589x_BLOCK_GPIO) { ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_gpio, ARRAY_SIZE(tc3589x_dev_gpio), NULL, tc3589x->irq_base); if (ret) { dev_err(tc3589x->dev, "failed to add gpio child\n"); return ret; } dev_info(tc3589x->dev, "added gpio block\n"); } if (blocks & TC3589x_BLOCK_KEYPAD) { ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_keypad, ARRAY_SIZE(tc3589x_dev_keypad), NULL, tc3589x->irq_base); if (ret) { dev_err(tc3589x->dev, "failed to keypad child\n"); return ret; } dev_info(tc3589x->dev, "added keypad block\n"); } return ret; } static int __devinit tc3589x_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct tc3589x_platform_data *pdata = i2c->dev.platform_data; struct tc3589x *tc3589x; int ret; if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK)) return -EIO; tc3589x = kzalloc(sizeof(struct tc3589x), GFP_KERNEL); if (!tc3589x) return -ENOMEM; mutex_init(&tc3589x->lock); tc3589x->dev = &i2c->dev; tc3589x->i2c = i2c; tc3589x->pdata = pdata; tc3589x->irq_base = pdata->irq_base; tc3589x->num_gpio = id->driver_data; i2c_set_clientdata(i2c, tc3589x); ret = tc3589x_chip_init(tc3589x); if (ret) goto out_free; ret = tc3589x_irq_init(tc3589x); if (ret) goto out_free; ret = request_threaded_irq(tc3589x->i2c->irq, NULL, tc3589x_irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "tc3589x", tc3589x); if (ret) { dev_err(tc3589x->dev, "failed to request IRQ: %d\n", ret); goto out_removeirq; } ret = tc3589x_device_init(tc3589x); if (ret) { dev_err(tc3589x->dev, "failed to add child devices\n"); goto out_freeirq; } return 0; out_freeirq: free_irq(tc3589x->i2c->irq, tc3589x); out_removeirq: tc3589x_irq_remove(tc3589x); out_free: kfree(tc3589x); return ret; } static int __devexit tc3589x_remove(struct i2c_client *client) { struct tc3589x *tc3589x = i2c_get_clientdata(client); mfd_remove_devices(tc3589x->dev); free_irq(tc3589x->i2c->irq, tc3589x); tc3589x_irq_remove(tc3589x); kfree(tc3589x); return 0; } #ifdef CONFIG_PM static int tc3589x_suspend(struct device *dev) { struct tc3589x *tc3589x = dev_get_drvdata(dev); struct i2c_client *client = tc3589x->i2c; int ret = 0; /* put the system to sleep mode */ if (!device_may_wakeup(&client->dev)) ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE, TC3589x_CLKMODE_MODCTL_SLEEP); return ret; } static int tc3589x_resume(struct device *dev) { struct tc3589x *tc3589x = dev_get_drvdata(dev); struct i2c_client *client = tc3589x->i2c; int ret = 0; /* enable the system into operation */ if (!device_may_wakeup(&client->dev)) ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE, TC3589x_CLKMODE_MODCTL_OPERATION); return ret; } static const SIMPLE_DEV_PM_OPS(tc3589x_dev_pm_ops, tc3589x_suspend, tc3589x_resume); #endif static const struct i2c_device_id tc3589x_id[] = { { "tc3589x", 24 }, { } }; MODULE_DEVICE_TABLE(i2c, tc3589x_id); static struct i2c_driver tc3589x_driver = { .driver.name = "tc3589x", .driver.owner = THIS_MODULE, #ifdef CONFIG_PM .driver.pm = &tc3589x_dev_pm_ops, #endif .probe = tc3589x_probe, .remove = __devexit_p(tc3589x_remove), .id_table = tc3589x_id, }; static int __init tc3589x_init(void) { return i2c_add_driver(&tc3589x_driver); } subsys_initcall(tc3589x_init); static void __exit tc3589x_exit(void) { i2c_del_driver(&tc3589x_driver); } module_exit(tc3589x_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("TC3589x MFD core driver"); MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
gpl-2.0
heaven001/android_kernel_sony_msm8974
net/rds/ib_sysctl.c
8164
4213
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/sysctl.h> #include <linux/proc_fs.h> #include "ib.h" static struct ctl_table_header *rds_ib_sysctl_hdr; unsigned long rds_ib_sysctl_max_send_wr = RDS_IB_DEFAULT_SEND_WR; unsigned long rds_ib_sysctl_max_recv_wr = RDS_IB_DEFAULT_RECV_WR; unsigned long rds_ib_sysctl_max_recv_allocation = (128 * 1024 * 1024) / RDS_FRAG_SIZE; static unsigned long rds_ib_sysctl_max_wr_min = 1; /* hardware will fail CQ creation long before this */ static unsigned long rds_ib_sysctl_max_wr_max = (u32)~0; unsigned long rds_ib_sysctl_max_unsig_wrs = 16; static unsigned long rds_ib_sysctl_max_unsig_wr_min = 1; static unsigned long rds_ib_sysctl_max_unsig_wr_max = 64; /* * This sysctl does nothing. * * Backwards compatibility with RDS 3.0 wire protocol * disables initial FC credit exchange. * If it's ever possible to drop 3.0 support, * setting this to 1 and moving init/refill of send/recv * rings from ib_cm_connect_complete() back into ib_setup_qp() * will cause credits to be added before protocol negotiation. */ unsigned int rds_ib_sysctl_flow_control = 0; static ctl_table rds_ib_sysctl_table[] = { { .procname = "max_send_wr", .data = &rds_ib_sysctl_max_send_wr, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_ib_sysctl_max_wr_min, .extra2 = &rds_ib_sysctl_max_wr_max, }, { .procname = "max_recv_wr", .data = &rds_ib_sysctl_max_recv_wr, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_ib_sysctl_max_wr_min, .extra2 = &rds_ib_sysctl_max_wr_max, }, { .procname = "max_unsignaled_wr", .data = &rds_ib_sysctl_max_unsig_wrs, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_ib_sysctl_max_unsig_wr_min, .extra2 = &rds_ib_sysctl_max_unsig_wr_max, }, { .procname = "max_recv_allocation", .data = &rds_ib_sysctl_max_recv_allocation, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "flow_control", .data = &rds_ib_sysctl_flow_control, .maxlen = sizeof(rds_ib_sysctl_flow_control), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static struct ctl_path rds_ib_sysctl_path[] = { { .procname = "net", }, { .procname = "rds", }, { .procname = "ib", }, { } }; void rds_ib_sysctl_exit(void) { if (rds_ib_sysctl_hdr) unregister_sysctl_table(rds_ib_sysctl_hdr); } int rds_ib_sysctl_init(void) { rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table); if (!rds_ib_sysctl_hdr) return -ENOMEM; return 0; }
gpl-2.0
batman38102/android_kernel_samsung_mint
drivers/gpu/drm/nouveau/nv17_tv_modes.c
8164
21909
/* * Copyright (C) 2009 Francisco Jerez. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm_crtc_helper.h" #include "nouveau_drv.h" #include "nouveau_encoder.h" #include "nouveau_crtc.h" #include "nouveau_hw.h" #include "nv17_tv.h" char *nv17_tv_norm_names[NUM_TV_NORMS] = { [TV_NORM_PAL] = "PAL", [TV_NORM_PAL_M] = "PAL-M", [TV_NORM_PAL_N] = "PAL-N", [TV_NORM_PAL_NC] = "PAL-Nc", [TV_NORM_NTSC_M] = "NTSC-M", [TV_NORM_NTSC_J] = "NTSC-J", [TV_NORM_HD480I] = "hd480i", [TV_NORM_HD480P] = "hd480p", [TV_NORM_HD576I] = "hd576i", [TV_NORM_HD576P] = "hd576p", [TV_NORM_HD720P] = "hd720p", [TV_NORM_HD1080I] = "hd1080i" }; /* TV standard specific parameters */ struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = { [TV_NORM_PAL] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 576, 50000, { 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 } } } }, [TV_NORM_PAL_M] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 480, 59940, { 0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0, 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c, 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 } } } }, [TV_NORM_PAL_N] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 576, 50000, { 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0, 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, 0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 } } } }, [TV_NORM_PAL_NC] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 576, 50000, { 0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 } } } }, [TV_NORM_NTSC_M] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 480, 59940, { 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0, 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c, 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 } } } }, [TV_NORM_NTSC_J] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 480, 59940, { 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0, 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4, 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 } } } }, [TV_NORM_HD480I] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 480, 59940, { 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0, 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4, 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 } } } }, [TV_NORM_HD576I] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 576, 50000, { 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 } } } }, [TV_NORM_HD480P] = { CTV_ENC_MODE, { .ctv_enc_mode = { .mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 735, 743, 858, 0, 480, 490, 494, 525, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314, 0x354003a, 0x40000, 0x6f0344, 0x18100000, 0x10160004, 0x10060005, 0x1006000c, 0x10060020, 0x10060021, 0x140e0022, 0x10060202, 0x1802020a, 0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x70, 0x3ff0000, 0x57, 0x2e001e, 0x258012c, 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300, 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400 } } } }, [TV_NORM_HD576P] = { CTV_ENC_MODE, { .ctv_enc_mode = { .mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 730, 738, 864, 0, 576, 581, 585, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314, 0x354003a, 0x40000, 0x6f0344, 0x18100000, 0x10060001, 0x10060009, 0x10060026, 0x10060027, 0x140e0028, 0x10060268, 0x1810026d, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x69, 0x3ff0000, 0x57, 0x2e001e, 0x258012c, 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300, 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400 } } } }, [TV_NORM_HD720P] = { CTV_ENC_MODE, { .ctv_enc_mode = { .mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, .ctv_regs = { 0x1260394, 0x0, 0x0, 0x622, 0x66b0021, 0x6004a, 0x1210626, 0x8170000, 0x70004, 0x70016, 0x70017, 0x40f0018, 0x702e8, 0x81702ed, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0, 0x2e40001, 0x58, 0x2e001e, 0x258012c, 0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300, 0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0 } } } }, [TV_NORM_HD1080I] = { CTV_ENC_MODE, { .ctv_enc_mode = { .mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, .ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868, 0x8940028, 0x60054, 0xe80870, 0xbf70000, 0xbc70004, 0x70005, 0x70012, 0x70013, 0x40f0014, 0x70230, 0xbf70232, 0xbf70233, 0x1c70237, 0x70238, 0x70244, 0x70245, 0x40f0246, 0x70462, 0x1f70464, 0x0, 0x2e40001, 0x58, 0x2e001e, 0x258012c, 0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300, 0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0 } } } } }; /* * The following is some guesswork on how the TV encoder flicker * filter/rescaler works: * * It seems to use some sort of resampling filter, it is controlled * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they * control the horizontal and vertical stage respectively, there is * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER, * but they seem to do nothing. A rough guess might be that they could * be used to independently control the filtering of each interlaced * field, but I don't know how they are enabled. The whole filtering * process seems to be disabled with bits 26:27 of PTV_200, but we * aren't doing that. * * The layout of both register sets is the same: * * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40] * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c] * * Each coefficient is stored in bits [31],[15:9] in two's complement * format. They seem to be some kind of weights used in a low-pass * filter. Both A and B coefficients are applied to the 14 nearest * samples on each side (Listed from nearest to furthermost. They * roughly cover 2 framebuffer pixels on each side). They are * probably multiplied with some more hardwired weights before being * used: B-coefficients are applied the same on both sides, * A-coefficients are inverted before being applied to the opposite * side. * * After all the hassle, I got the following formula by empirical * means... */ #define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o) #define id1 (1LL << 8) #define id2 (1LL << 16) #define id3 (1LL << 24) #define id4 (1LL << 32) #define id5 (1LL << 48) static struct filter_params{ int64_t k1; int64_t ki; int64_t ki2; int64_t ki3; int64_t kr; int64_t kir; int64_t ki2r; int64_t ki3r; int64_t kf; int64_t kif; int64_t ki2f; int64_t ki3f; int64_t krf; int64_t kirf; int64_t ki2rf; int64_t ki3rf; } fparams[2][4] = { /* Horizontal filter parameters */ { {64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5, 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4, 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3, -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1}, {-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5, 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4, 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3, -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1}, {-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5, 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4, 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3, 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1}, {51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5, -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4, -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3, 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1}, }, /* Vertical filter parameters */ { {67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5, -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4, -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3, 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1}, {6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5, 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4, 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3, -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1}, {-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5, 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4, 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3, -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1}, {-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5, 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4, 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3, -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1}, } }; static void tv_setup_filter(struct drm_encoder *encoder) { struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); struct drm_display_mode *mode = &encoder->crtc->mode; uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter, &tv_enc->state.vfilter}; int i, j, k; int32_t overscan = calc_overscan(tv_enc->overscan); int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100); uint64_t rs[] = {mode->hdisplay * id3, mode->vdisplay * id3}; do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay); do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay); for (k = 0; k < 2; k++) { rs[k] = max((int64_t)rs[k], id2); for (j = 0; j < 4; j++) { struct filter_params *p = &fparams[k][j]; for (i = 0; i < 7; i++) { int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i) + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i) * rs[k] + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i) * flicker + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i) * flicker * rs[k]; (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9); } } } } /* Hardware state saving/restoring */ static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) { int i, j; uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; for (i = 0; i < 4; i++) { for (j = 0; j < 7; j++) regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j); } } static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) { int i, j; uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; for (i = 0; i < 4; i++) { for (j = 0; j < 7; j++) nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]); } } void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state) { int i; for (i = 0; i < 0x40; i++) state->tv_enc[i] = nv_read_tv_enc(dev, i); tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter); tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2); tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter); nv_save_ptv(dev, state, 200); nv_save_ptv(dev, state, 204); nv_save_ptv(dev, state, 208); nv_save_ptv(dev, state, 20c); nv_save_ptv(dev, state, 304); nv_save_ptv(dev, state, 500); nv_save_ptv(dev, state, 504); nv_save_ptv(dev, state, 508); nv_save_ptv(dev, state, 600); nv_save_ptv(dev, state, 604); nv_save_ptv(dev, state, 608); nv_save_ptv(dev, state, 60c); nv_save_ptv(dev, state, 610); nv_save_ptv(dev, state, 614); } void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state) { int i; for (i = 0; i < 0x40; i++) nv_write_tv_enc(dev, i, state->tv_enc[i]); tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter); tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2); tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter); nv_load_ptv(dev, state, 200); nv_load_ptv(dev, state, 204); nv_load_ptv(dev, state, 208); nv_load_ptv(dev, state, 20c); nv_load_ptv(dev, state, 304); nv_load_ptv(dev, state, 500); nv_load_ptv(dev, state, 504); nv_load_ptv(dev, state, 508); nv_load_ptv(dev, state, 600); nv_load_ptv(dev, state, 604); nv_load_ptv(dev, state, 608); nv_load_ptv(dev, state, 60c); nv_load_ptv(dev, state, 610); nv_load_ptv(dev, state, 614); /* This is required for some settings to kick in. */ nv_write_tv_enc(dev, 0x3e, 1); nv_write_tv_enc(dev, 0x3e, 0); } /* Timings similar to the ones the blob sets */ const struct drm_display_mode nv17_tv_modes[] = { { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0, 320, 344, 392, 560, 0, 200, 200, 202, 220, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0, 320, 344, 392, 560, 0, 240, 240, 246, 263, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0, 400, 432, 496, 640, 0, 300, 300, 303, 314, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0, 640, 672, 768, 880, 0, 480, 480, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0, 720, 752, 872, 960, 0, 480, 480, 493, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0, 720, 776, 856, 960, 0, 576, 576, 588, 597, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0, 800, 840, 920, 1040, 0, 600, 600, 604, 618, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0, 1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, {} }; void nv17_tv_update_properties(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct nv17_tv_state *regs = &tv_enc->state; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); int subconnector = tv_enc->select_subconnector ? tv_enc->select_subconnector : tv_enc->subconnector; switch (subconnector) { case DRM_MODE_SUBCONNECTOR_Composite: { regs->ptv_204 = 0x2; /* The composite connector may be found on either pin. */ if (tv_enc->pin_mask & 0x4) regs->ptv_204 |= 0x010000; else if (tv_enc->pin_mask & 0x2) regs->ptv_204 |= 0x100000; else regs->ptv_204 |= 0x110000; regs->tv_enc[0x7] = 0x10; break; } case DRM_MODE_SUBCONNECTOR_SVIDEO: regs->ptv_204 = 0x11012; regs->tv_enc[0x7] = 0x18; break; case DRM_MODE_SUBCONNECTOR_Component: regs->ptv_204 = 0x111333; regs->tv_enc[0x7] = 0x14; break; case DRM_MODE_SUBCONNECTOR_SCART: regs->ptv_204 = 0x111012; regs->tv_enc[0x7] = 0x18; break; } regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255, tv_enc->saturation); regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255, tv_enc->saturation); regs->tv_enc[0x25] = tv_enc->hue * 255 / 100; nv_load_ptv(dev, regs, 204); nv_load_tv_enc(dev, regs, 7); nv_load_tv_enc(dev, regs, 20); nv_load_tv_enc(dev, regs, 22); nv_load_tv_enc(dev, regs, 25); } void nv17_tv_update_rescaler(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct nv17_tv_state *regs = &tv_enc->state; regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8); tv_setup_filter(encoder); nv_load_ptv(dev, regs, 208); tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter); tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2); tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter); } void nv17_ctv_update_rescaler(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); int head = nouveau_crtc(encoder->crtc)->index; struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; struct drm_display_mode *crtc_mode = &encoder->crtc->mode; struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode; int overscan, hmargin, vmargin, hratio, vratio; /* The rescaler doesn't do the right thing for interlaced modes. */ if (output_mode->flags & DRM_MODE_FLAG_INTERLACE) overscan = 100; else overscan = tv_enc->overscan; hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2; vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2; hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin, overscan); vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin, overscan); hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin); vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3; regs->fp_horiz_regs[FP_VALID_START] = hmargin; regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1; regs->fp_vert_regs[FP_VALID_START] = vmargin; regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1; regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE | XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) | NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE | XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START, regs->fp_horiz_regs[FP_VALID_START]); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END, regs->fp_horiz_regs[FP_VALID_END]); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START, regs->fp_vert_regs[FP_VALID_START]); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END, regs->fp_vert_regs[FP_VALID_END]); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1); }
gpl-2.0
TeamGlade-Devices/android_kernel_htc_pico
drivers/gpu/drm/nouveau/nv17_tv_modes.c
8164
21909
/* * Copyright (C) 2009 Francisco Jerez. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm_crtc_helper.h" #include "nouveau_drv.h" #include "nouveau_encoder.h" #include "nouveau_crtc.h" #include "nouveau_hw.h" #include "nv17_tv.h" char *nv17_tv_norm_names[NUM_TV_NORMS] = { [TV_NORM_PAL] = "PAL", [TV_NORM_PAL_M] = "PAL-M", [TV_NORM_PAL_N] = "PAL-N", [TV_NORM_PAL_NC] = "PAL-Nc", [TV_NORM_NTSC_M] = "NTSC-M", [TV_NORM_NTSC_J] = "NTSC-J", [TV_NORM_HD480I] = "hd480i", [TV_NORM_HD480P] = "hd480p", [TV_NORM_HD576I] = "hd576i", [TV_NORM_HD576P] = "hd576p", [TV_NORM_HD720P] = "hd720p", [TV_NORM_HD1080I] = "hd1080i" }; /* TV standard specific parameters */ struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = { [TV_NORM_PAL] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 576, 50000, { 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 } } } }, [TV_NORM_PAL_M] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 480, 59940, { 0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0, 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c, 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 } } } }, [TV_NORM_PAL_N] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 576, 50000, { 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0, 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, 0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 } } } }, [TV_NORM_PAL_NC] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 576, 50000, { 0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 } } } }, [TV_NORM_NTSC_M] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 480, 59940, { 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0, 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c, 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 } } } }, [TV_NORM_NTSC_J] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 480, 59940, { 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0, 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4, 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 } } } }, [TV_NORM_HD480I] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 480, 59940, { 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0, 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4, 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 } } } }, [TV_NORM_HD576I] = { TV_ENC_MODE, { .tv_enc_mode = { 720, 576, 50000, { 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 } } } }, [TV_NORM_HD480P] = { CTV_ENC_MODE, { .ctv_enc_mode = { .mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 735, 743, 858, 0, 480, 490, 494, 525, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314, 0x354003a, 0x40000, 0x6f0344, 0x18100000, 0x10160004, 0x10060005, 0x1006000c, 0x10060020, 0x10060021, 0x140e0022, 0x10060202, 0x1802020a, 0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x70, 0x3ff0000, 0x57, 0x2e001e, 0x258012c, 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300, 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400 } } } }, [TV_NORM_HD576P] = { CTV_ENC_MODE, { .ctv_enc_mode = { .mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 730, 738, 864, 0, 576, 581, 585, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314, 0x354003a, 0x40000, 0x6f0344, 0x18100000, 0x10060001, 0x10060009, 0x10060026, 0x10060027, 0x140e0028, 0x10060268, 0x1810026d, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, 0x69, 0x3ff0000, 0x57, 0x2e001e, 0x258012c, 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300, 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400 } } } }, [TV_NORM_HD720P] = { CTV_ENC_MODE, { .ctv_enc_mode = { .mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, .ctv_regs = { 0x1260394, 0x0, 0x0, 0x622, 0x66b0021, 0x6004a, 0x1210626, 0x8170000, 0x70004, 0x70016, 0x70017, 0x40f0018, 0x702e8, 0x81702ed, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0, 0x2e40001, 0x58, 0x2e001e, 0x258012c, 0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300, 0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0 } } } }, [TV_NORM_HD1080I] = { CTV_ENC_MODE, { .ctv_enc_mode = { .mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, .ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868, 0x8940028, 0x60054, 0xe80870, 0xbf70000, 0xbc70004, 0x70005, 0x70012, 0x70013, 0x40f0014, 0x70230, 0xbf70232, 0xbf70233, 0x1c70237, 0x70238, 0x70244, 0x70245, 0x40f0246, 0x70462, 0x1f70464, 0x0, 0x2e40001, 0x58, 0x2e001e, 0x258012c, 0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300, 0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0 } } } } }; /* * The following is some guesswork on how the TV encoder flicker * filter/rescaler works: * * It seems to use some sort of resampling filter, it is controlled * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they * control the horizontal and vertical stage respectively, there is * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER, * but they seem to do nothing. A rough guess might be that they could * be used to independently control the filtering of each interlaced * field, but I don't know how they are enabled. The whole filtering * process seems to be disabled with bits 26:27 of PTV_200, but we * aren't doing that. * * The layout of both register sets is the same: * * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40] * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c] * * Each coefficient is stored in bits [31],[15:9] in two's complement * format. They seem to be some kind of weights used in a low-pass * filter. Both A and B coefficients are applied to the 14 nearest * samples on each side (Listed from nearest to furthermost. They * roughly cover 2 framebuffer pixels on each side). They are * probably multiplied with some more hardwired weights before being * used: B-coefficients are applied the same on both sides, * A-coefficients are inverted before being applied to the opposite * side. * * After all the hassle, I got the following formula by empirical * means... */ #define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o) #define id1 (1LL << 8) #define id2 (1LL << 16) #define id3 (1LL << 24) #define id4 (1LL << 32) #define id5 (1LL << 48) static struct filter_params{ int64_t k1; int64_t ki; int64_t ki2; int64_t ki3; int64_t kr; int64_t kir; int64_t ki2r; int64_t ki3r; int64_t kf; int64_t kif; int64_t ki2f; int64_t ki3f; int64_t krf; int64_t kirf; int64_t ki2rf; int64_t ki3rf; } fparams[2][4] = { /* Horizontal filter parameters */ { {64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5, 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4, 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3, -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1}, {-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5, 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4, 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3, -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1}, {-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5, 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4, 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3, 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1}, {51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5, -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4, -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3, 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1}, }, /* Vertical filter parameters */ { {67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5, -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4, -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3, 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1}, {6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5, 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4, 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3, -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1}, {-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5, 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4, 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3, -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1}, {-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5, 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4, 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3, -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1}, } }; static void tv_setup_filter(struct drm_encoder *encoder) { struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); struct drm_display_mode *mode = &encoder->crtc->mode; uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter, &tv_enc->state.vfilter}; int i, j, k; int32_t overscan = calc_overscan(tv_enc->overscan); int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100); uint64_t rs[] = {mode->hdisplay * id3, mode->vdisplay * id3}; do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay); do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay); for (k = 0; k < 2; k++) { rs[k] = max((int64_t)rs[k], id2); for (j = 0; j < 4; j++) { struct filter_params *p = &fparams[k][j]; for (i = 0; i < 7; i++) { int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i) + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i) * rs[k] + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i) * flicker + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i) * flicker * rs[k]; (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9); } } } } /* Hardware state saving/restoring */ static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) { int i, j; uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; for (i = 0; i < 4; i++) { for (j = 0; j < 7; j++) regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j); } } static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) { int i, j; uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; for (i = 0; i < 4; i++) { for (j = 0; j < 7; j++) nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]); } } void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state) { int i; for (i = 0; i < 0x40; i++) state->tv_enc[i] = nv_read_tv_enc(dev, i); tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter); tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2); tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter); nv_save_ptv(dev, state, 200); nv_save_ptv(dev, state, 204); nv_save_ptv(dev, state, 208); nv_save_ptv(dev, state, 20c); nv_save_ptv(dev, state, 304); nv_save_ptv(dev, state, 500); nv_save_ptv(dev, state, 504); nv_save_ptv(dev, state, 508); nv_save_ptv(dev, state, 600); nv_save_ptv(dev, state, 604); nv_save_ptv(dev, state, 608); nv_save_ptv(dev, state, 60c); nv_save_ptv(dev, state, 610); nv_save_ptv(dev, state, 614); } void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state) { int i; for (i = 0; i < 0x40; i++) nv_write_tv_enc(dev, i, state->tv_enc[i]); tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter); tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2); tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter); nv_load_ptv(dev, state, 200); nv_load_ptv(dev, state, 204); nv_load_ptv(dev, state, 208); nv_load_ptv(dev, state, 20c); nv_load_ptv(dev, state, 304); nv_load_ptv(dev, state, 500); nv_load_ptv(dev, state, 504); nv_load_ptv(dev, state, 508); nv_load_ptv(dev, state, 600); nv_load_ptv(dev, state, 604); nv_load_ptv(dev, state, 608); nv_load_ptv(dev, state, 60c); nv_load_ptv(dev, state, 610); nv_load_ptv(dev, state, 614); /* This is required for some settings to kick in. */ nv_write_tv_enc(dev, 0x3e, 1); nv_write_tv_enc(dev, 0x3e, 0); } /* Timings similar to the ones the blob sets */ const struct drm_display_mode nv17_tv_modes[] = { { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0, 320, 344, 392, 560, 0, 200, 200, 202, 220, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0, 320, 344, 392, 560, 0, 240, 240, 246, 263, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0, 400, 432, 496, 640, 0, 300, 300, 303, 314, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0, 640, 672, 768, 880, 0, 480, 480, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0, 720, 752, 872, 960, 0, 480, 480, 493, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0, 720, 776, 856, 960, 0, 576, 576, 588, 597, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0, 800, 840, 920, 1040, 0, 600, 600, 604, 618, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0, 1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, {} }; void nv17_tv_update_properties(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct nv17_tv_state *regs = &tv_enc->state; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); int subconnector = tv_enc->select_subconnector ? tv_enc->select_subconnector : tv_enc->subconnector; switch (subconnector) { case DRM_MODE_SUBCONNECTOR_Composite: { regs->ptv_204 = 0x2; /* The composite connector may be found on either pin. */ if (tv_enc->pin_mask & 0x4) regs->ptv_204 |= 0x010000; else if (tv_enc->pin_mask & 0x2) regs->ptv_204 |= 0x100000; else regs->ptv_204 |= 0x110000; regs->tv_enc[0x7] = 0x10; break; } case DRM_MODE_SUBCONNECTOR_SVIDEO: regs->ptv_204 = 0x11012; regs->tv_enc[0x7] = 0x18; break; case DRM_MODE_SUBCONNECTOR_Component: regs->ptv_204 = 0x111333; regs->tv_enc[0x7] = 0x14; break; case DRM_MODE_SUBCONNECTOR_SCART: regs->ptv_204 = 0x111012; regs->tv_enc[0x7] = 0x18; break; } regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255, tv_enc->saturation); regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255, tv_enc->saturation); regs->tv_enc[0x25] = tv_enc->hue * 255 / 100; nv_load_ptv(dev, regs, 204); nv_load_tv_enc(dev, regs, 7); nv_load_tv_enc(dev, regs, 20); nv_load_tv_enc(dev, regs, 22); nv_load_tv_enc(dev, regs, 25); } void nv17_tv_update_rescaler(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct nv17_tv_state *regs = &tv_enc->state; regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8); tv_setup_filter(encoder); nv_load_ptv(dev, regs, 208); tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter); tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2); tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter); } void nv17_ctv_update_rescaler(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); int head = nouveau_crtc(encoder->crtc)->index; struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; struct drm_display_mode *crtc_mode = &encoder->crtc->mode; struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode; int overscan, hmargin, vmargin, hratio, vratio; /* The rescaler doesn't do the right thing for interlaced modes. */ if (output_mode->flags & DRM_MODE_FLAG_INTERLACE) overscan = 100; else overscan = tv_enc->overscan; hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2; vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2; hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin, overscan); vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin, overscan); hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin); vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3; regs->fp_horiz_regs[FP_VALID_START] = hmargin; regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1; regs->fp_vert_regs[FP_VALID_START] = vmargin; regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1; regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE | XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) | NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE | XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START, regs->fp_horiz_regs[FP_VALID_START]); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END, regs->fp_horiz_regs[FP_VALID_END]); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START, regs->fp_vert_regs[FP_VALID_START]); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END, regs->fp_vert_regs[FP_VALID_END]); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1); }
gpl-2.0
nobooya/e975-kk-kernel
arch/parisc/lib/memcpy.c
8676
15736
/* * Optimized memory copy routines. * * Copyright (C) 2004 Randolph Chung <tausq@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Portions derived from the GNU C Library * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc. * * Several strategies are tried to try to get the best performance for various * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using * general registers. Unaligned copies are handled either by aligning the * destination and then using shift-and-write method, or in a few cases by * falling back to a byte-at-a-time copy. * * I chose to implement this in C because it is easier to maintain and debug, * and in my experiments it appears that the C code generated by gcc (3.3/3.4 * at the time of writing) is fairly optimal. Unfortunately some of the * semantics of the copy routine (exception handling) is difficult to express * in C, so we have to play some tricks to get it to work. * * All the loads and stores are done via explicit asm() code in order to use * the right space registers. * * Testing with various alignments and buffer sizes shows that this code is * often >10x faster than a simple byte-at-a-time copy, even for strangely * aligned operands. It is interesting to note that the glibc version * of memcpy (written in C) is actually quite fast already. This routine is * able to beat it by 30-40% for aligned copies because of the loop unrolling, * but in some cases the glibc version is still slightly faster. This lends * more credibility that gcc can generate very good code as long as we are * careful. * * TODO: * - cache prefetching needs more experimentation to get optimal settings * - try not to use the post-increment address modifiers; they create additional * interlocks * - replace byte-copy loops with stybs sequences */ #ifdef __KERNEL__ #include <linux/module.h> #include <linux/compiler.h> #include <asm/uaccess.h> #define s_space "%%sr1" #define d_space "%%sr2" #else #include "memcpy.h" #define s_space "%%sr0" #define d_space "%%sr0" #define pa_memcpy new2_copy #endif DECLARE_PER_CPU(struct exception_data, exception_data); #define preserve_branch(label) do { \ volatile int dummy; \ /* The following branch is never taken, it's just here to */ \ /* prevent gcc from optimizing away our exception code. */ \ if (unlikely(dummy != dummy)) \ goto label; \ } while (0) #define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) #define get_kernel_space() (0) #define MERGE(w0, sh_1, w1, sh_2) ({ \ unsigned int _r; \ asm volatile ( \ "mtsar %3\n" \ "shrpw %1, %2, %%sar, %0\n" \ : "=r"(_r) \ : "r"(w0), "r"(w1), "r"(sh_2) \ ); \ _r; \ }) #define THRESHOLD 16 #ifdef DEBUG_MEMCPY #define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0) #else #define DPRINTF(fmt, args...) #endif #define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ __asm__ __volatile__ ( \ "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ : _tt(_t), "+r"(_a) \ : \ : "r8") #define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ __asm__ __volatile__ ( \ "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ : "+r"(_a) \ : _tt(_t) \ : "r8") #define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e) #define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e) #define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e) #define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e) #define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e) #define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e) #define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \ __asm__ __volatile__ ( \ "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ : _tt(_t) \ : "r"(_a) \ : "r8") #define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \ __asm__ __volatile__ ( \ "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ : \ : _tt(_t), "r"(_a) \ : "r8") #define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e) #define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e) #ifdef CONFIG_PREFETCH static inline void prefetch_src(const void *addr) { __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr)); } static inline void prefetch_dst(const void *addr) { __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr)); } #else #define prefetch_src(addr) do { } while(0) #define prefetch_dst(addr) do { } while(0) #endif /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words * per loop. This code is derived from glibc. */ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len) { /* gcc complains that a2 and a3 may be uninitialized, but actually * they cannot be. Initialize a2/a3 to shut gcc up. */ register unsigned int a0, a1, a2 = 0, a3 = 0; int sh_1, sh_2; struct exception_data *d; /* prefetch_src((const void *)src); */ /* Calculate how to shift a word read at the memory operation aligned srcp to make it aligned for copy. */ sh_1 = 8 * (src % sizeof(unsigned int)); sh_2 = 8 * sizeof(unsigned int) - sh_1; /* Make src aligned by rounding it down. */ src &= -sizeof(unsigned int); switch (len % 4) { case 2: /* a1 = ((unsigned int *) src)[0]; a2 = ((unsigned int *) src)[1]; */ ldw(s_space, 0, src, a1, cda_ldw_exc); ldw(s_space, 4, src, a2, cda_ldw_exc); src -= 1 * sizeof(unsigned int); dst -= 3 * sizeof(unsigned int); len += 2; goto do1; case 3: /* a0 = ((unsigned int *) src)[0]; a1 = ((unsigned int *) src)[1]; */ ldw(s_space, 0, src, a0, cda_ldw_exc); ldw(s_space, 4, src, a1, cda_ldw_exc); src -= 0 * sizeof(unsigned int); dst -= 2 * sizeof(unsigned int); len += 1; goto do2; case 0: if (len == 0) return 0; /* a3 = ((unsigned int *) src)[0]; a0 = ((unsigned int *) src)[1]; */ ldw(s_space, 0, src, a3, cda_ldw_exc); ldw(s_space, 4, src, a0, cda_ldw_exc); src -=-1 * sizeof(unsigned int); dst -= 1 * sizeof(unsigned int); len += 0; goto do3; case 1: /* a2 = ((unsigned int *) src)[0]; a3 = ((unsigned int *) src)[1]; */ ldw(s_space, 0, src, a2, cda_ldw_exc); ldw(s_space, 4, src, a3, cda_ldw_exc); src -=-2 * sizeof(unsigned int); dst -= 0 * sizeof(unsigned int); len -= 1; if (len == 0) goto do0; goto do4; /* No-op. */ } do { /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */ do4: /* a0 = ((unsigned int *) src)[0]; */ ldw(s_space, 0, src, a0, cda_ldw_exc); /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */ stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc); do3: /* a1 = ((unsigned int *) src)[1]; */ ldw(s_space, 4, src, a1, cda_ldw_exc); /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */ stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc); do2: /* a2 = ((unsigned int *) src)[2]; */ ldw(s_space, 8, src, a2, cda_ldw_exc); /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */ stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc); do1: /* a3 = ((unsigned int *) src)[3]; */ ldw(s_space, 12, src, a3, cda_ldw_exc); /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */ stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc); src += 4 * sizeof(unsigned int); dst += 4 * sizeof(unsigned int); len -= 4; } while (len != 0); do0: /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */ stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc); preserve_branch(handle_load_error); preserve_branch(handle_store_error); return 0; handle_load_error: __asm__ __volatile__ ("cda_ldw_exc:\n"); d = &__get_cpu_var(exception_data); DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); return o_len * 4 - d->fault_addr + o_src; handle_store_error: __asm__ __volatile__ ("cda_stw_exc:\n"); d = &__get_cpu_var(exception_data); DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); return o_len * 4 - d->fault_addr + o_dst; } /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) { register unsigned long src, dst, t1, t2, t3; register unsigned char *pcs, *pcd; register unsigned int *pws, *pwd; register double *pds, *pdd; unsigned long ret = 0; unsigned long o_dst, o_src, o_len; struct exception_data *d; src = (unsigned long)srcp; dst = (unsigned long)dstp; pcs = (unsigned char *)srcp; pcd = (unsigned char *)dstp; o_dst = dst; o_src = src; o_len = len; /* prefetch_src((const void *)srcp); */ if (len < THRESHOLD) goto byte_copy; /* Check alignment */ t1 = (src ^ dst); if (unlikely(t1 & (sizeof(double)-1))) goto unaligned_copy; /* src and dst have same alignment. */ /* Copy bytes till we are double-aligned. */ t2 = src & (sizeof(double) - 1); if (unlikely(t2 != 0)) { t2 = sizeof(double) - t2; while (t2 && len) { /* *pcd++ = *pcs++; */ ldbma(s_space, pcs, t3, pmc_load_exc); len--; stbma(d_space, t3, pcd, pmc_store_exc); t2--; } } pds = (double *)pcs; pdd = (double *)pcd; #if 0 /* Copy 8 doubles at a time */ while (len >= 8*sizeof(double)) { register double r1, r2, r3, r4, r5, r6, r7, r8; /* prefetch_src((char *)pds + L1_CACHE_BYTES); */ flddma(s_space, pds, r1, pmc_load_exc); flddma(s_space, pds, r2, pmc_load_exc); flddma(s_space, pds, r3, pmc_load_exc); flddma(s_space, pds, r4, pmc_load_exc); fstdma(d_space, r1, pdd, pmc_store_exc); fstdma(d_space, r2, pdd, pmc_store_exc); fstdma(d_space, r3, pdd, pmc_store_exc); fstdma(d_space, r4, pdd, pmc_store_exc); #if 0 if (L1_CACHE_BYTES <= 32) prefetch_src((char *)pds + L1_CACHE_BYTES); #endif flddma(s_space, pds, r5, pmc_load_exc); flddma(s_space, pds, r6, pmc_load_exc); flddma(s_space, pds, r7, pmc_load_exc); flddma(s_space, pds, r8, pmc_load_exc); fstdma(d_space, r5, pdd, pmc_store_exc); fstdma(d_space, r6, pdd, pmc_store_exc); fstdma(d_space, r7, pdd, pmc_store_exc); fstdma(d_space, r8, pdd, pmc_store_exc); len -= 8*sizeof(double); } #endif pws = (unsigned int *)pds; pwd = (unsigned int *)pdd; word_copy: while (len >= 8*sizeof(unsigned int)) { register unsigned int r1,r2,r3,r4,r5,r6,r7,r8; /* prefetch_src((char *)pws + L1_CACHE_BYTES); */ ldwma(s_space, pws, r1, pmc_load_exc); ldwma(s_space, pws, r2, pmc_load_exc); ldwma(s_space, pws, r3, pmc_load_exc); ldwma(s_space, pws, r4, pmc_load_exc); stwma(d_space, r1, pwd, pmc_store_exc); stwma(d_space, r2, pwd, pmc_store_exc); stwma(d_space, r3, pwd, pmc_store_exc); stwma(d_space, r4, pwd, pmc_store_exc); ldwma(s_space, pws, r5, pmc_load_exc); ldwma(s_space, pws, r6, pmc_load_exc); ldwma(s_space, pws, r7, pmc_load_exc); ldwma(s_space, pws, r8, pmc_load_exc); stwma(d_space, r5, pwd, pmc_store_exc); stwma(d_space, r6, pwd, pmc_store_exc); stwma(d_space, r7, pwd, pmc_store_exc); stwma(d_space, r8, pwd, pmc_store_exc); len -= 8*sizeof(unsigned int); } while (len >= 4*sizeof(unsigned int)) { register unsigned int r1,r2,r3,r4; ldwma(s_space, pws, r1, pmc_load_exc); ldwma(s_space, pws, r2, pmc_load_exc); ldwma(s_space, pws, r3, pmc_load_exc); ldwma(s_space, pws, r4, pmc_load_exc); stwma(d_space, r1, pwd, pmc_store_exc); stwma(d_space, r2, pwd, pmc_store_exc); stwma(d_space, r3, pwd, pmc_store_exc); stwma(d_space, r4, pwd, pmc_store_exc); len -= 4*sizeof(unsigned int); } pcs = (unsigned char *)pws; pcd = (unsigned char *)pwd; byte_copy: while (len) { /* *pcd++ = *pcs++; */ ldbma(s_space, pcs, t3, pmc_load_exc); stbma(d_space, t3, pcd, pmc_store_exc); len--; } return 0; unaligned_copy: /* possibly we are aligned on a word, but not on a double... */ if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) { t2 = src & (sizeof(unsigned int) - 1); if (unlikely(t2 != 0)) { t2 = sizeof(unsigned int) - t2; while (t2) { /* *pcd++ = *pcs++; */ ldbma(s_space, pcs, t3, pmc_load_exc); stbma(d_space, t3, pcd, pmc_store_exc); len--; t2--; } } pws = (unsigned int *)pcs; pwd = (unsigned int *)pcd; goto word_copy; } /* Align the destination. */ if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) { t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1)); while (t2) { /* *pcd++ = *pcs++; */ ldbma(s_space, pcs, t3, pmc_load_exc); stbma(d_space, t3, pcd, pmc_store_exc); len--; t2--; } dst = (unsigned long)pcd; src = (unsigned long)pcs; } ret = copy_dstaligned(dst, src, len / sizeof(unsigned int), o_dst, o_src, o_len); if (ret) return ret; pcs += (len & -sizeof(unsigned int)); pcd += (len & -sizeof(unsigned int)); len %= sizeof(unsigned int); preserve_branch(handle_load_error); preserve_branch(handle_store_error); goto byte_copy; handle_load_error: __asm__ __volatile__ ("pmc_load_exc:\n"); d = &__get_cpu_var(exception_data); DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); return o_len - d->fault_addr + o_src; handle_store_error: __asm__ __volatile__ ("pmc_store_exc:\n"); d = &__get_cpu_var(exception_data); DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); return o_len - d->fault_addr + o_dst; } #ifdef __KERNEL__ unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len) { mtsp(get_kernel_space(), 1); mtsp(get_user_space(), 2); return pa_memcpy((void __force *)dst, src, len); } EXPORT_SYMBOL(__copy_from_user); unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len) { mtsp(get_user_space(), 1); mtsp(get_kernel_space(), 2); return pa_memcpy(dst, (void __force *)src, len); } unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len) { mtsp(get_user_space(), 1); mtsp(get_user_space(), 2); return pa_memcpy((void __force *)dst, (void __force *)src, len); } void * memcpy(void * dst,const void *src, size_t count) { mtsp(get_kernel_space(), 1); mtsp(get_kernel_space(), 2); pa_memcpy(dst, src, count); return dst; } EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(copy_from_user); EXPORT_SYMBOL(copy_in_user); EXPORT_SYMBOL(memcpy); #endif
gpl-2.0
garwynn/L900_MA7_Kernel
fs/afs/cache.c
12772
11040
/* AFS caching stuff * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include "internal.h" static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen); static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static enum fscache_checkaux afs_vlocation_cache_check_aux( void *cookie_netfs_data, const void *buffer, uint16_t buflen); static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static void afs_vnode_cache_get_attr(const void *cookie_netfs_data, uint64_t *size); static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen); static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen); static void afs_vnode_cache_now_uncached(void *cookie_netfs_data); struct fscache_netfs afs_cache_netfs = { .name = "afs", .version = 0, }; struct fscache_cookie_def afs_cell_cache_index_def = { .name = "AFS.cell", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = afs_cell_cache_get_key, .get_aux = afs_cell_cache_get_aux, .check_aux = afs_cell_cache_check_aux, }; struct fscache_cookie_def afs_vlocation_cache_index_def = { .name = "AFS.vldb", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = afs_vlocation_cache_get_key, .get_aux = afs_vlocation_cache_get_aux, .check_aux = afs_vlocation_cache_check_aux, }; struct fscache_cookie_def afs_volume_cache_index_def = { .name = "AFS.volume", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = afs_volume_cache_get_key, }; struct fscache_cookie_def afs_vnode_cache_index_def = { .name = "AFS.vnode", .type = FSCACHE_COOKIE_TYPE_DATAFILE, .get_key = afs_vnode_cache_get_key, .get_attr = afs_vnode_cache_get_attr, .get_aux = afs_vnode_cache_get_aux, .check_aux = afs_vnode_cache_check_aux, .now_uncached = afs_vnode_cache_now_uncached, }; /* * set the key for the index entry */ static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_cell *cell = cookie_netfs_data; uint16_t klen; _enter("%p,%p,%u", cell, buffer, bufmax); klen = strlen(cell->name); if (klen > bufmax) return 0; memcpy(buffer, cell->name, klen); return klen; } /* * provide new auxiliary cache data */ static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_cell *cell = cookie_netfs_data; uint16_t dlen; _enter("%p,%p,%u", cell, buffer, bufmax); dlen = cell->vl_naddrs * sizeof(cell->vl_addrs[0]); dlen = min(dlen, bufmax); dlen &= ~(sizeof(cell->vl_addrs[0]) - 1); memcpy(buffer, cell->vl_addrs, dlen); return dlen; } /* * check that the auxiliary data indicates that the entry is still valid */ static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen) { _leave(" = OKAY"); return FSCACHE_CHECKAUX_OKAY; } /*****************************************************************************/ /* * set the key for the index entry */ static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vlocation *vlocation = cookie_netfs_data; uint16_t klen; _enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax); klen = strnlen(vlocation->vldb.name, sizeof(vlocation->vldb.name)); if (klen > bufmax) return 0; memcpy(buffer, vlocation->vldb.name, klen); _leave(" = %u", klen); return klen; } /* * provide new auxiliary cache data */ static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vlocation *vlocation = cookie_netfs_data; uint16_t dlen; _enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax); dlen = sizeof(struct afs_cache_vlocation); dlen -= offsetof(struct afs_cache_vlocation, nservers); if (dlen > bufmax) return 0; memcpy(buffer, (uint8_t *)&vlocation->vldb.nservers, dlen); _leave(" = %u", dlen); return dlen; } /* * check that the auxiliary data indicates that the entry is still valid */ static enum fscache_checkaux afs_vlocation_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen) { const struct afs_cache_vlocation *cvldb; struct afs_vlocation *vlocation = cookie_netfs_data; uint16_t dlen; _enter("{%s},%p,%u", vlocation->vldb.name, buffer, buflen); /* check the size of the data is what we're expecting */ dlen = sizeof(struct afs_cache_vlocation); dlen -= offsetof(struct afs_cache_vlocation, nservers); if (dlen != buflen) return FSCACHE_CHECKAUX_OBSOLETE; cvldb = container_of(buffer, struct afs_cache_vlocation, nservers); /* if what's on disk is more valid than what's in memory, then use the * VL record from the cache */ if (!vlocation->valid || vlocation->vldb.rtime == cvldb->rtime) { memcpy((uint8_t *)&vlocation->vldb.nservers, buffer, dlen); vlocation->valid = 1; _leave(" = SUCCESS [c->m]"); return FSCACHE_CHECKAUX_OKAY; } /* need to update the cache if the cached info differs */ if (memcmp(&vlocation->vldb, buffer, dlen) != 0) { /* delete if the volume IDs for this name differ */ if (memcmp(&vlocation->vldb.vid, &cvldb->vid, sizeof(cvldb->vid)) != 0 ) { _leave(" = OBSOLETE"); return FSCACHE_CHECKAUX_OBSOLETE; } _leave(" = UPDATE"); return FSCACHE_CHECKAUX_NEEDS_UPDATE; } _leave(" = OKAY"); return FSCACHE_CHECKAUX_OKAY; } /*****************************************************************************/ /* * set the key for the volume index entry */ static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_volume *volume = cookie_netfs_data; uint16_t klen; _enter("{%u},%p,%u", volume->type, buffer, bufmax); klen = sizeof(volume->type); if (klen > bufmax) return 0; memcpy(buffer, &volume->type, sizeof(volume->type)); _leave(" = %u", klen); return klen; } /*****************************************************************************/ /* * set the key for the index entry */ static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vnode *vnode = cookie_netfs_data; uint16_t klen; _enter("{%x,%x,%llx},%p,%u", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version, buffer, bufmax); klen = sizeof(vnode->fid.vnode); if (klen > bufmax) return 0; memcpy(buffer, &vnode->fid.vnode, sizeof(vnode->fid.vnode)); _leave(" = %u", klen); return klen; } /* * provide updated file attributes */ static void afs_vnode_cache_get_attr(const void *cookie_netfs_data, uint64_t *size) { const struct afs_vnode *vnode = cookie_netfs_data; _enter("{%x,%x,%llx},", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version); *size = vnode->status.size; } /* * provide new auxiliary cache data */ static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct afs_vnode *vnode = cookie_netfs_data; uint16_t dlen; _enter("{%x,%x,%Lx},%p,%u", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version, buffer, bufmax); dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.data_version); if (dlen > bufmax) return 0; memcpy(buffer, &vnode->fid.unique, sizeof(vnode->fid.unique)); buffer += sizeof(vnode->fid.unique); memcpy(buffer, &vnode->status.data_version, sizeof(vnode->status.data_version)); _leave(" = %u", dlen); return dlen; } /* * check that the auxiliary data indicates that the entry is still valid */ static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data, const void *buffer, uint16_t buflen) { struct afs_vnode *vnode = cookie_netfs_data; uint16_t dlen; _enter("{%x,%x,%llx},%p,%u", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version, buffer, buflen); /* check the size of the data is what we're expecting */ dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.data_version); if (dlen != buflen) { _leave(" = OBSOLETE [len %hx != %hx]", dlen, buflen); return FSCACHE_CHECKAUX_OBSOLETE; } if (memcmp(buffer, &vnode->fid.unique, sizeof(vnode->fid.unique) ) != 0) { unsigned unique; memcpy(&unique, buffer, sizeof(unique)); _leave(" = OBSOLETE [uniq %x != %x]", unique, vnode->fid.unique); return FSCACHE_CHECKAUX_OBSOLETE; } if (memcmp(buffer + sizeof(vnode->fid.unique), &vnode->status.data_version, sizeof(vnode->status.data_version) ) != 0) { afs_dataversion_t version; memcpy(&version, buffer + sizeof(vnode->fid.unique), sizeof(version)); _leave(" = OBSOLETE [vers %llx != %llx]", version, vnode->status.data_version); return FSCACHE_CHECKAUX_OBSOLETE; } _leave(" = SUCCESS"); return FSCACHE_CHECKAUX_OKAY; } /* * indication the cookie is no longer uncached * - this function is called when the backing store currently caching a cookie * is removed * - the netfs should use this to clean up any markers indicating cached pages * - this is mandatory for any object that may have data */ static void afs_vnode_cache_now_uncached(void *cookie_netfs_data) { struct afs_vnode *vnode = cookie_netfs_data; struct pagevec pvec; pgoff_t first; int loop, nr_pages; _enter("{%x,%x,%Lx}", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version); pagevec_init(&pvec, 0); first = 0; for (;;) { /* grab a bunch of pages to clean */ nr_pages = pagevec_lookup(&pvec, vnode->vfs_inode.i_mapping, first, PAGEVEC_SIZE - pagevec_count(&pvec)); if (!nr_pages) break; for (loop = 0; loop < nr_pages; loop++) ClearPageFsCache(pvec.pages[loop]); first = pvec.pages[nr_pages - 1]->index + 1; pvec.nr = nr_pages; pagevec_release(&pvec); cond_resched(); } _leave(""); }
gpl-2.0
greguu/linux-3.11.3-borzoi
drivers/gpu/drm/savage/savage_state.c
2533
30904
/* savage_state.c -- State and drawing support for Savage * * Copyright 2004 Felix Kuehling * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <drm/drmP.h> #include <drm/savage_drm.h> #include "savage_drv.h" void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, const struct drm_clip_rect * pbox) { uint32_t scstart = dev_priv->state.s3d.new_scstart; uint32_t scend = dev_priv->state.s3d.new_scend; scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) | ((uint32_t) pbox->x1 & 0x000007ff) | (((uint32_t) pbox->y1 << 16) & 0x07ff0000); scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | (((uint32_t) pbox->x2 - 1) & 0x000007ff) | ((((uint32_t) pbox->y2 - 1) << 16) & 0x07ff0000); if (scstart != dev_priv->state.s3d.scstart || scend != dev_priv->state.s3d.scend) { DMA_LOCALS; BEGIN_DMA(4); DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2); DMA_WRITE(scstart); DMA_WRITE(scend); dev_priv->state.s3d.scstart = scstart; dev_priv->state.s3d.scend = scend; dev_priv->waiting = 1; DMA_COMMIT(); } } void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, const struct drm_clip_rect * pbox) { uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) | ((uint32_t) pbox->x1 & 0x000007ff) | (((uint32_t) pbox->y1 << 12) & 0x00fff000); drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) | (((uint32_t) pbox->x2 - 1) & 0x000007ff) | ((((uint32_t) pbox->y2 - 1) << 12) & 0x00fff000); if (drawctrl0 != dev_priv->state.s4.drawctrl0 || drawctrl1 != dev_priv->state.s4.drawctrl1) { DMA_LOCALS; BEGIN_DMA(4); DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2); DMA_WRITE(drawctrl0); DMA_WRITE(drawctrl1); dev_priv->state.s4.drawctrl0 = drawctrl0; dev_priv->state.s4.drawctrl1 = drawctrl1; dev_priv->waiting = 1; DMA_COMMIT(); } } static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit, uint32_t addr) { if ((addr & 6) != 2) { /* reserved bits */ DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); return -EINVAL; } if (!(addr & 1)) { /* local */ addr &= ~7; if (addr < dev_priv->texture_offset || addr >= dev_priv->texture_offset + dev_priv->texture_size) { DRM_ERROR ("bad texAddr%d %08x (local addr out of range)\n", unit, addr); return -EINVAL; } } else { /* AGP */ if (!dev_priv->agp_textures) { DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", unit, addr); return -EINVAL; } addr &= ~7; if (addr < dev_priv->agp_textures->offset || addr >= (dev_priv->agp_textures->offset + dev_priv->agp_textures->size)) { DRM_ERROR ("bad texAddr%d %08x (AGP addr out of range)\n", unit, addr); return -EINVAL; } } return 0; } #define SAVE_STATE(reg,where) \ if(start <= reg && start+count > reg) \ dev_priv->state.where = regs[reg - start] #define SAVE_STATE_MASK(reg,where,mask) do { \ if(start <= reg && start+count > reg) { \ uint32_t tmp; \ tmp = regs[reg - start]; \ dev_priv->state.where = (tmp & (mask)) | \ (dev_priv->state.where & ~(mask)); \ } \ } while (0) static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, unsigned int start, unsigned int count, const uint32_t *regs) { if (start < SAVAGE_TEXPALADDR_S3D || start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", start, start + count - 1); return -EINVAL; } SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, ~SAVAGE_SCISSOR_MASK_S3D); SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend, ~SAVAGE_SCISSOR_MASK_S3D); /* if any texture regs were changed ... */ if (start <= SAVAGE_TEXCTRL_S3D && start + count > SAVAGE_TEXPALADDR_S3D) { /* ... check texture state */ SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl); SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) return savage_verify_texaddr(dev_priv, 0, dev_priv->state.s3d.texaddr); } return 0; } static int savage_verify_state_s4(drm_savage_private_t * dev_priv, unsigned int start, unsigned int count, const uint32_t *regs) { int ret = 0; if (start < SAVAGE_DRAWLOCALCTRL_S4 || start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", start, start + count - 1); return -EINVAL; } SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, ~SAVAGE_SCISSOR_MASK_S4); SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1, ~SAVAGE_SCISSOR_MASK_S4); /* if any texture regs were changed ... */ if (start <= SAVAGE_TEXDESCR_S4 && start + count > SAVAGE_TEXPALADDR_S4) { /* ... check texture state */ SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) ret |= savage_verify_texaddr(dev_priv, 0, dev_priv->state.s4.texaddr0); if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) ret |= savage_verify_texaddr(dev_priv, 1, dev_priv->state.s4.texaddr1); } return ret; } #undef SAVE_STATE #undef SAVE_STATE_MASK static int savage_dispatch_state(drm_savage_private_t * dev_priv, const drm_savage_cmd_header_t * cmd_header, const uint32_t *regs) { unsigned int count = cmd_header->state.count; unsigned int start = cmd_header->state.start; unsigned int count2 = 0; unsigned int bci_size; int ret; DMA_LOCALS; if (!count) return 0; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ret = savage_verify_state_s3d(dev_priv, start, count, regs); if (ret != 0) return ret; /* scissor regs are emitted in savage_dispatch_draw */ if (start < SAVAGE_SCSTART_S3D) { if (start + count > SAVAGE_SCEND_S3D + 1) count2 = count - (SAVAGE_SCEND_S3D + 1 - start); if (start + count > SAVAGE_SCSTART_S3D) count = SAVAGE_SCSTART_S3D - start; } else if (start <= SAVAGE_SCEND_S3D) { if (start + count > SAVAGE_SCEND_S3D + 1) { count -= SAVAGE_SCEND_S3D + 1 - start; start = SAVAGE_SCEND_S3D + 1; } else return 0; } } else { ret = savage_verify_state_s4(dev_priv, start, count, regs); if (ret != 0) return ret; /* scissor regs are emitted in savage_dispatch_draw */ if (start < SAVAGE_DRAWCTRL0_S4) { if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) count2 = count - (SAVAGE_DRAWCTRL1_S4 + 1 - start); if (start + count > SAVAGE_DRAWCTRL0_S4) count = SAVAGE_DRAWCTRL0_S4 - start; } else if (start <= SAVAGE_DRAWCTRL1_S4) { if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) { count -= SAVAGE_DRAWCTRL1_S4 + 1 - start; start = SAVAGE_DRAWCTRL1_S4 + 1; } else return 0; } } bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255; if (cmd_header->state.global) { BEGIN_DMA(bci_size + 1); DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); dev_priv->waiting = 1; } else { BEGIN_DMA(bci_size); } do { while (count > 0) { unsigned int n = count < 255 ? count : 255; DMA_SET_REGISTERS(start, n); DMA_COPY(regs, n); count -= n; start += n; regs += n; } start += 2; regs += 2; count = count2; count2 = 0; } while (count); DMA_COMMIT(); return 0; } static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv, const drm_savage_cmd_header_t * cmd_header, const struct drm_buf * dmabuf) { unsigned char reorder = 0; unsigned int prim = cmd_header->prim.prim; unsigned int skip = cmd_header->prim.skip; unsigned int n = cmd_header->prim.count; unsigned int start = cmd_header->prim.start; unsigned int i; BCI_LOCALS; if (!dmabuf) { DRM_ERROR("called without dma buffers!\n"); return -EINVAL; } if (!n) return 0; switch (prim) { case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of vertices %u in TRILIST\n", n); return -EINVAL; } break; case SAVAGE_PRIM_TRISTRIP: case SAVAGE_PRIM_TRIFAN: if (n < 3) { DRM_ERROR ("wrong number of vertices %u in TRIFAN/STRIP\n", n); return -EINVAL; } break; default: DRM_ERROR("invalid primitive type %u\n", prim); return -EINVAL; } if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { if (skip != 0) { DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); return -EINVAL; } } else { unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); return -EINVAL; } if (reorder) { DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); return -EINVAL; } } if (start + n > dmabuf->total / 32) { DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", start, start + n - 1, dmabuf->total / 32); return -EINVAL; } /* Vertex DMA doesn't work with command DMA at the same time, * so we use BCI_... to submit commands here. Flush buffered * faked DMA first. */ DMA_FLUSH(); if (dmabuf->bus_address != dev_priv->state.common.vbaddr) { BEGIN_BCI(2); BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1); BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type); dev_priv->state.common.vbaddr = dmabuf->bus_address; } if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) { /* Workaround for what looks like a hardware bug. If a * WAIT_3D_IDLE was emitted some time before the * indexed drawing command then the engine will lock * up. There are two known workarounds: * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */ BEGIN_BCI(63); for (i = 0; i < 63; ++i) BCI_WRITE(BCI_CMD_WAIT); dev_priv->waiting = 0; } prim <<= 25; while (n != 0) { /* Can emit up to 255 indices (85 triangles) at once. */ unsigned int count = n > 255 ? 255 : n; if (reorder) { /* Need to reorder indices for correct flat * shading while preserving the clock sense * for correct culling. Only on Savage3D. */ int reorder[3] = { -1, -1, -1 }; reorder[start % 3] = 2; BEGIN_BCI((count + 1 + 1) / 2); BCI_DRAW_INDICES_S3D(count, prim, start + 2); for (i = start + 1; i + 1 < start + count; i += 2) BCI_WRITE((i + reorder[i % 3]) | ((i + 1 + reorder[(i + 1) % 3]) << 16)); if (i < start + count) BCI_WRITE(i + reorder[i % 3]); } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { BEGIN_BCI((count + 1 + 1) / 2); BCI_DRAW_INDICES_S3D(count, prim, start); for (i = start + 1; i + 1 < start + count; i += 2) BCI_WRITE(i | ((i + 1) << 16)); if (i < start + count) BCI_WRITE(i); } else { BEGIN_BCI((count + 2 + 1) / 2); BCI_DRAW_INDICES_S4(count, prim, skip); for (i = start; i + 1 < start + count; i += 2) BCI_WRITE(i | ((i + 1) << 16)); if (i < start + count) BCI_WRITE(i); } start += count; n -= count; prim |= BCI_CMD_DRAW_CONT; } return 0; } static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, const drm_savage_cmd_header_t * cmd_header, const uint32_t *vtxbuf, unsigned int vb_size, unsigned int vb_stride) { unsigned char reorder = 0; unsigned int prim = cmd_header->prim.prim; unsigned int skip = cmd_header->prim.skip; unsigned int n = cmd_header->prim.count; unsigned int start = cmd_header->prim.start; unsigned int vtx_size; unsigned int i; DMA_LOCALS; if (!n) return 0; switch (prim) { case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of vertices %u in TRILIST\n", n); return -EINVAL; } break; case SAVAGE_PRIM_TRISTRIP: case SAVAGE_PRIM_TRIFAN: if (n < 3) { DRM_ERROR ("wrong number of vertices %u in TRIFAN/STRIP\n", n); return -EINVAL; } break; default: DRM_ERROR("invalid primitive type %u\n", prim); return -EINVAL; } if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { if (skip > SAVAGE_SKIP_ALL_S3D) { DRM_ERROR("invalid skip flags 0x%04x\n", skip); return -EINVAL; } vtx_size = 8; /* full vertex */ } else { if (skip > SAVAGE_SKIP_ALL_S4) { DRM_ERROR("invalid skip flags 0x%04x\n", skip); return -EINVAL; } vtx_size = 10; /* full vertex */ } vtx_size -= (skip & 1) + (skip >> 1 & 1) + (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); if (vtx_size > vb_stride) { DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", vtx_size, vb_stride); return -EINVAL; } if (start + n > vb_size / (vb_stride * 4)) { DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", start, start + n - 1, vb_size / (vb_stride * 4)); return -EINVAL; } prim <<= 25; while (n != 0) { /* Can emit up to 255 vertices (85 triangles) at once. */ unsigned int count = n > 255 ? 255 : n; if (reorder) { /* Need to reorder vertices for correct flat * shading while preserving the clock sense * for correct culling. Only on Savage3D. */ int reorder[3] = { -1, -1, -1 }; reorder[start % 3] = 2; BEGIN_DMA(count * vtx_size + 1); DMA_DRAW_PRIMITIVE(count, prim, skip); for (i = start; i < start + count; ++i) { unsigned int j = i + reorder[i % 3]; DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); } DMA_COMMIT(); } else { BEGIN_DMA(count * vtx_size + 1); DMA_DRAW_PRIMITIVE(count, prim, skip); if (vb_stride == vtx_size) { DMA_COPY(&vtxbuf[vb_stride * start], vtx_size * count); } else { for (i = start; i < start + count; ++i) { DMA_COPY(&vtxbuf [vb_stride * i], vtx_size); } } DMA_COMMIT(); } start += count; n -= count; prim |= BCI_CMD_DRAW_CONT; } return 0; } static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, const drm_savage_cmd_header_t * cmd_header, const uint16_t *idx, const struct drm_buf * dmabuf) { unsigned char reorder = 0; unsigned int prim = cmd_header->idx.prim; unsigned int skip = cmd_header->idx.skip; unsigned int n = cmd_header->idx.count; unsigned int i; BCI_LOCALS; if (!dmabuf) { DRM_ERROR("called without dma buffers!\n"); return -EINVAL; } if (!n) return 0; switch (prim) { case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of indices %u in TRILIST\n", n); return -EINVAL; } break; case SAVAGE_PRIM_TRISTRIP: case SAVAGE_PRIM_TRIFAN: if (n < 3) { DRM_ERROR ("wrong number of indices %u in TRIFAN/STRIP\n", n); return -EINVAL; } break; default: DRM_ERROR("invalid primitive type %u\n", prim); return -EINVAL; } if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { if (skip != 0) { DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); return -EINVAL; } } else { unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); return -EINVAL; } if (reorder) { DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); return -EINVAL; } } /* Vertex DMA doesn't work with command DMA at the same time, * so we use BCI_... to submit commands here. Flush buffered * faked DMA first. */ DMA_FLUSH(); if (dmabuf->bus_address != dev_priv->state.common.vbaddr) { BEGIN_BCI(2); BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1); BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type); dev_priv->state.common.vbaddr = dmabuf->bus_address; } if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) { /* Workaround for what looks like a hardware bug. If a * WAIT_3D_IDLE was emitted some time before the * indexed drawing command then the engine will lock * up. There are two known workarounds: * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */ BEGIN_BCI(63); for (i = 0; i < 63; ++i) BCI_WRITE(BCI_CMD_WAIT); dev_priv->waiting = 0; } prim <<= 25; while (n != 0) { /* Can emit up to 255 indices (85 triangles) at once. */ unsigned int count = n > 255 ? 255 : n; /* check indices */ for (i = 0; i < count; ++i) { if (idx[i] > dmabuf->total / 32) { DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", i, idx[i], dmabuf->total / 32); return -EINVAL; } } if (reorder) { /* Need to reorder indices for correct flat * shading while preserving the clock sense * for correct culling. Only on Savage3D. */ int reorder[3] = { 2, -1, -1 }; BEGIN_BCI((count + 1 + 1) / 2); BCI_DRAW_INDICES_S3D(count, prim, idx[2]); for (i = 1; i + 1 < count; i += 2) BCI_WRITE(idx[i + reorder[i % 3]] | (idx[i + 1 + reorder[(i + 1) % 3]] << 16)); if (i < count) BCI_WRITE(idx[i + reorder[i % 3]]); } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { BEGIN_BCI((count + 1 + 1) / 2); BCI_DRAW_INDICES_S3D(count, prim, idx[0]); for (i = 1; i + 1 < count; i += 2) BCI_WRITE(idx[i] | (idx[i + 1] << 16)); if (i < count) BCI_WRITE(idx[i]); } else { BEGIN_BCI((count + 2 + 1) / 2); BCI_DRAW_INDICES_S4(count, prim, skip); for (i = 0; i + 1 < count; i += 2) BCI_WRITE(idx[i] | (idx[i + 1] << 16)); if (i < count) BCI_WRITE(idx[i]); } idx += count; n -= count; prim |= BCI_CMD_DRAW_CONT; } return 0; } static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, const drm_savage_cmd_header_t * cmd_header, const uint16_t *idx, const uint32_t *vtxbuf, unsigned int vb_size, unsigned int vb_stride) { unsigned char reorder = 0; unsigned int prim = cmd_header->idx.prim; unsigned int skip = cmd_header->idx.skip; unsigned int n = cmd_header->idx.count; unsigned int vtx_size; unsigned int i; DMA_LOCALS; if (!n) return 0; switch (prim) { case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of indices %u in TRILIST\n", n); return -EINVAL; } break; case SAVAGE_PRIM_TRISTRIP: case SAVAGE_PRIM_TRIFAN: if (n < 3) { DRM_ERROR ("wrong number of indices %u in TRIFAN/STRIP\n", n); return -EINVAL; } break; default: DRM_ERROR("invalid primitive type %u\n", prim); return -EINVAL; } if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { if (skip > SAVAGE_SKIP_ALL_S3D) { DRM_ERROR("invalid skip flags 0x%04x\n", skip); return -EINVAL; } vtx_size = 8; /* full vertex */ } else { if (skip > SAVAGE_SKIP_ALL_S4) { DRM_ERROR("invalid skip flags 0x%04x\n", skip); return -EINVAL; } vtx_size = 10; /* full vertex */ } vtx_size -= (skip & 1) + (skip >> 1 & 1) + (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); if (vtx_size > vb_stride) { DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", vtx_size, vb_stride); return -EINVAL; } prim <<= 25; while (n != 0) { /* Can emit up to 255 vertices (85 triangles) at once. */ unsigned int count = n > 255 ? 255 : n; /* Check indices */ for (i = 0; i < count; ++i) { if (idx[i] > vb_size / (vb_stride * 4)) { DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", i, idx[i], vb_size / (vb_stride * 4)); return -EINVAL; } } if (reorder) { /* Need to reorder vertices for correct flat * shading while preserving the clock sense * for correct culling. Only on Savage3D. */ int reorder[3] = { 2, -1, -1 }; BEGIN_DMA(count * vtx_size + 1); DMA_DRAW_PRIMITIVE(count, prim, skip); for (i = 0; i < count; ++i) { unsigned int j = idx[i + reorder[i % 3]]; DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); } DMA_COMMIT(); } else { BEGIN_DMA(count * vtx_size + 1); DMA_DRAW_PRIMITIVE(count, prim, skip); for (i = 0; i < count; ++i) { unsigned int j = idx[i]; DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); } DMA_COMMIT(); } idx += count; n -= count; prim |= BCI_CMD_DRAW_CONT; } return 0; } static int savage_dispatch_clear(drm_savage_private_t * dev_priv, const drm_savage_cmd_header_t * cmd_header, const drm_savage_cmd_header_t *data, unsigned int nbox, const struct drm_clip_rect *boxes) { unsigned int flags = cmd_header->clear0.flags; unsigned int clear_cmd; unsigned int i, nbufs; DMA_LOCALS; if (nbox == 0) return 0; clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; BCI_CMD_SET_ROP(clear_cmd, 0xCC); nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) + ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0); if (nbufs == 0) return 0; if (data->clear1.mask != 0xffffffff) { /* set mask */ BEGIN_DMA(2); DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); DMA_WRITE(data->clear1.mask); DMA_COMMIT(); } for (i = 0; i < nbox; ++i) { unsigned int x, y, w, h; unsigned int buf; x = boxes[i].x1, y = boxes[i].y1; w = boxes[i].x2 - boxes[i].x1; h = boxes[i].y2 - boxes[i].y1; BEGIN_DMA(nbufs * 6); for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { if (!(flags & buf)) continue; DMA_WRITE(clear_cmd); switch (buf) { case SAVAGE_FRONT: DMA_WRITE(dev_priv->front_offset); DMA_WRITE(dev_priv->front_bd); break; case SAVAGE_BACK: DMA_WRITE(dev_priv->back_offset); DMA_WRITE(dev_priv->back_bd); break; case SAVAGE_DEPTH: DMA_WRITE(dev_priv->depth_offset); DMA_WRITE(dev_priv->depth_bd); break; } DMA_WRITE(data->clear1.value); DMA_WRITE(BCI_X_Y(x, y)); DMA_WRITE(BCI_W_H(w, h)); } DMA_COMMIT(); } if (data->clear1.mask != 0xffffffff) { /* reset mask */ BEGIN_DMA(2); DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); DMA_WRITE(0xffffffff); DMA_COMMIT(); } return 0; } static int savage_dispatch_swap(drm_savage_private_t * dev_priv, unsigned int nbox, const struct drm_clip_rect *boxes) { unsigned int swap_cmd; unsigned int i; DMA_LOCALS; if (nbox == 0) return 0; swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD; BCI_CMD_SET_ROP(swap_cmd, 0xCC); for (i = 0; i < nbox; ++i) { BEGIN_DMA(6); DMA_WRITE(swap_cmd); DMA_WRITE(dev_priv->back_offset); DMA_WRITE(dev_priv->back_bd); DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1, boxes[i].y2 - boxes[i].y1)); DMA_COMMIT(); } return 0; } static int savage_dispatch_draw(drm_savage_private_t * dev_priv, const drm_savage_cmd_header_t *start, const drm_savage_cmd_header_t *end, const struct drm_buf * dmabuf, const unsigned int *vtxbuf, unsigned int vb_size, unsigned int vb_stride, unsigned int nbox, const struct drm_clip_rect *boxes) { unsigned int i, j; int ret; for (i = 0; i < nbox; ++i) { const drm_savage_cmd_header_t *cmdbuf; dev_priv->emit_clip_rect(dev_priv, &boxes[i]); cmdbuf = start; while (cmdbuf < end) { drm_savage_cmd_header_t cmd_header; cmd_header = *cmdbuf; cmdbuf++; switch (cmd_header.cmd.cmd) { case SAVAGE_CMD_DMA_PRIM: ret = savage_dispatch_dma_prim( dev_priv, &cmd_header, dmabuf); break; case SAVAGE_CMD_VB_PRIM: ret = savage_dispatch_vb_prim( dev_priv, &cmd_header, vtxbuf, vb_size, vb_stride); break; case SAVAGE_CMD_DMA_IDX: j = (cmd_header.idx.count + 3) / 4; /* j was check in savage_bci_cmdbuf */ ret = savage_dispatch_dma_idx(dev_priv, &cmd_header, (const uint16_t *)cmdbuf, dmabuf); cmdbuf += j; break; case SAVAGE_CMD_VB_IDX: j = (cmd_header.idx.count + 3) / 4; /* j was check in savage_bci_cmdbuf */ ret = savage_dispatch_vb_idx(dev_priv, &cmd_header, (const uint16_t *)cmdbuf, (const uint32_t *)vtxbuf, vb_size, vb_stride); cmdbuf += j; break; default: /* What's the best return code? EFAULT? */ DRM_ERROR("IMPLEMENTATION ERROR: " "non-drawing-command %d\n", cmd_header.cmd.cmd); return -EINVAL; } if (ret != 0) return ret; } } return 0; } int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_savage_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *dmabuf; drm_savage_cmdbuf_t *cmdbuf = data; drm_savage_cmd_header_t *kcmd_addr = NULL; drm_savage_cmd_header_t *first_draw_cmd; unsigned int *kvb_addr = NULL; struct drm_clip_rect *kbox_addr = NULL; unsigned int i, j; int ret = 0; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (dma && dma->buflist) { if (cmdbuf->dma_idx > dma->buf_count) { DRM_ERROR ("vertex buffer index %u out of range (0-%u)\n", cmdbuf->dma_idx, dma->buf_count - 1); return -EINVAL; } dmabuf = dma->buflist[cmdbuf->dma_idx]; } else { dmabuf = NULL; } /* Copy the user buffers into kernel temporary areas. This hasn't been * a performance loss compared to VERIFYAREA_READ/ * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct * for locking on FreeBSD. */ if (cmdbuf->size) { kcmd_addr = kmalloc_array(cmdbuf->size, 8, GFP_KERNEL); if (kcmd_addr == NULL) return -ENOMEM; if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, cmdbuf->size * 8)) { kfree(kcmd_addr); return -EFAULT; } cmdbuf->cmd_addr = kcmd_addr; } if (cmdbuf->vb_size) { kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL); if (kvb_addr == NULL) { ret = -ENOMEM; goto done; } if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr, cmdbuf->vb_size)) { ret = -EFAULT; goto done; } cmdbuf->vb_addr = kvb_addr; } if (cmdbuf->nbox) { kbox_addr = kmalloc_array(cmdbuf->nbox, sizeof(struct drm_clip_rect), GFP_KERNEL); if (kbox_addr == NULL) { ret = -ENOMEM; goto done; } if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect))) { ret = -EFAULT; goto done; } cmdbuf->box_addr = kbox_addr; } /* Make sure writes to DMA buffers are finished before sending * DMA commands to the graphics hardware. */ DRM_MEMORYBARRIER(); /* Coming from user space. Don't know if the Xserver has * emitted wait commands. Assuming the worst. */ dev_priv->waiting = 1; i = 0; first_draw_cmd = NULL; while (i < cmdbuf->size) { drm_savage_cmd_header_t cmd_header; cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr; cmdbuf->cmd_addr++; i++; /* Group drawing commands with same state to minimize * iterations over clip rects. */ j = 0; switch (cmd_header.cmd.cmd) { case SAVAGE_CMD_DMA_IDX: case SAVAGE_CMD_VB_IDX: j = (cmd_header.idx.count + 3) / 4; if (i + j > cmdbuf->size) { DRM_ERROR("indexed drawing command extends " "beyond end of command buffer\n"); DMA_FLUSH(); ret = -EINVAL; goto done; } /* fall through */ case SAVAGE_CMD_DMA_PRIM: case SAVAGE_CMD_VB_PRIM: if (!first_draw_cmd) first_draw_cmd = cmdbuf->cmd_addr - 1; cmdbuf->cmd_addr += j; i += j; break; default: if (first_draw_cmd) { ret = savage_dispatch_draw( dev_priv, first_draw_cmd, cmdbuf->cmd_addr - 1, dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, cmdbuf->nbox, cmdbuf->box_addr); if (ret != 0) goto done; first_draw_cmd = NULL; } } if (first_draw_cmd) continue; switch (cmd_header.cmd.cmd) { case SAVAGE_CMD_STATE: j = (cmd_header.state.count + 1) / 2; if (i + j > cmdbuf->size) { DRM_ERROR("command SAVAGE_CMD_STATE extends " "beyond end of command buffer\n"); DMA_FLUSH(); ret = -EINVAL; goto done; } ret = savage_dispatch_state(dev_priv, &cmd_header, (const uint32_t *)cmdbuf->cmd_addr); cmdbuf->cmd_addr += j; i += j; break; case SAVAGE_CMD_CLEAR: if (i + 1 > cmdbuf->size) { DRM_ERROR("command SAVAGE_CMD_CLEAR extends " "beyond end of command buffer\n"); DMA_FLUSH(); ret = -EINVAL; goto done; } ret = savage_dispatch_clear(dev_priv, &cmd_header, cmdbuf->cmd_addr, cmdbuf->nbox, cmdbuf->box_addr); cmdbuf->cmd_addr++; i++; break; case SAVAGE_CMD_SWAP: ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox, cmdbuf->box_addr); break; default: DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); DMA_FLUSH(); ret = -EINVAL; goto done; } if (ret != 0) { DMA_FLUSH(); goto done; } } if (first_draw_cmd) { ret = savage_dispatch_draw ( dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, cmdbuf->nbox, cmdbuf->box_addr); if (ret != 0) { DMA_FLUSH(); goto done; } } DMA_FLUSH(); if (dmabuf && cmdbuf->discard) { drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private; uint16_t event; event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); savage_freelist_put(dev, dmabuf); } done: /* If we didn't need to allocate them, these'll be NULL */ kfree(kcmd_addr); kfree(kvb_addr); kfree(kbox_addr); return ret; }
gpl-2.0
andrewevans01/T889_Kernel_Recharged
arch/arm/mach-footbridge/netwinder-hw.c
2533
11981
/* * linux/arch/arm/mach-footbridge/netwinder-hw.c * * Netwinder machine fixup * * Copyright (C) 1998, 1999 Russell King, Phil Blundell */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/spinlock.h> #include <asm/hardware/dec21285.h> #include <asm/leds.h> #include <asm/mach-types.h> #include <asm/setup.h> #include <asm/mach/arch.h> #include "common.h" #define IRDA_IO_BASE 0x180 #define GP1_IO_BASE 0x338 #define GP2_IO_BASE 0x33a #ifdef CONFIG_LEDS #define DEFAULT_LEDS 0 #else #define DEFAULT_LEDS GPIO_GREEN_LED #endif /* * Winbond WB83977F accessibility stuff */ static inline void wb977_open(void) { outb(0x87, 0x370); outb(0x87, 0x370); } static inline void wb977_close(void) { outb(0xaa, 0x370); } static inline void wb977_wb(int reg, int val) { outb(reg, 0x370); outb(val, 0x371); } static inline void wb977_ww(int reg, int val) { outb(reg, 0x370); outb(val >> 8, 0x371); outb(reg + 1, 0x370); outb(val & 255, 0x371); } #define wb977_device_select(dev) wb977_wb(0x07, dev) #define wb977_device_disable() wb977_wb(0x30, 0x00) #define wb977_device_enable() wb977_wb(0x30, 0x01) /* * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE */ DEFINE_SPINLOCK(nw_gpio_lock); EXPORT_SYMBOL(nw_gpio_lock); static unsigned int current_gpio_op; static unsigned int current_gpio_io; static unsigned int current_cpld; void nw_gpio_modify_op(unsigned int mask, unsigned int set) { unsigned int new_gpio, changed; new_gpio = (current_gpio_op & ~mask) | set; changed = new_gpio ^ current_gpio_op; current_gpio_op = new_gpio; if (changed & 0xff) outb(new_gpio, GP1_IO_BASE); if (changed & 0xff00) outb(new_gpio >> 8, GP2_IO_BASE); } EXPORT_SYMBOL(nw_gpio_modify_op); static inline void __gpio_modify_io(int mask, int in) { unsigned int new_gpio, changed; int port; new_gpio = (current_gpio_io & ~mask) | in; changed = new_gpio ^ current_gpio_io; current_gpio_io = new_gpio; changed >>= 1; new_gpio >>= 1; wb977_device_select(7); for (port = 0xe1; changed && port < 0xe8; changed >>= 1) { wb977_wb(port, new_gpio & 1); port += 1; new_gpio >>= 1; } wb977_device_select(8); for (port = 0xe8; changed && port < 0xec; changed >>= 1) { wb977_wb(port, new_gpio & 1); port += 1; new_gpio >>= 1; } } void nw_gpio_modify_io(unsigned int mask, unsigned int in) { /* Open up the SuperIO chip */ wb977_open(); __gpio_modify_io(mask, in); /* Close up the EFER gate */ wb977_close(); } EXPORT_SYMBOL(nw_gpio_modify_io); unsigned int nw_gpio_read(void) { return inb(GP1_IO_BASE) | inb(GP2_IO_BASE) << 8; } EXPORT_SYMBOL(nw_gpio_read); /* * Initialise the Winbond W83977F global registers */ static inline void wb977_init_global(void) { /* * Enable R/W config registers */ wb977_wb(0x26, 0x40); /* * Power down FDC (not used) */ wb977_wb(0x22, 0xfe); /* * GP12, GP11, CIRRX, IRRXH, GP10 */ wb977_wb(0x2a, 0xc1); /* * GP23, GP22, GP21, GP20, GP13 */ wb977_wb(0x2b, 0x6b); /* * GP17, GP16, GP15, GP14 */ wb977_wb(0x2c, 0x55); } /* * Initialise the Winbond W83977F printer port */ static inline void wb977_init_printer(void) { wb977_device_select(1); /* * mode 1 == EPP */ wb977_wb(0xf0, 0x01); } /* * Initialise the Winbond W83977F keyboard controller */ static inline void wb977_init_keyboard(void) { wb977_device_select(5); /* * Keyboard controller address */ wb977_ww(0x60, 0x0060); wb977_ww(0x62, 0x0064); /* * Keyboard IRQ 1, active high, edge trigger */ wb977_wb(0x70, 1); wb977_wb(0x71, 0x02); /* * Mouse IRQ 5, active high, edge trigger */ wb977_wb(0x72, 5); wb977_wb(0x73, 0x02); /* * KBC 8MHz */ wb977_wb(0xf0, 0x40); /* * Enable device */ wb977_device_enable(); } /* * Initialise the Winbond W83977F Infra-Red device */ static inline void wb977_init_irda(void) { wb977_device_select(6); /* * IR base address */ wb977_ww(0x60, IRDA_IO_BASE); /* * IRDA IRQ 6, active high, edge trigger */ wb977_wb(0x70, 6); wb977_wb(0x71, 0x02); /* * RX DMA - ISA DMA 0 */ wb977_wb(0x74, 0x00); /* * TX DMA - Disable Tx DMA */ wb977_wb(0x75, 0x04); /* * Append CRC, Enable bank selection */ wb977_wb(0xf0, 0x03); /* * Enable device */ wb977_device_enable(); } /* * Initialise Winbond W83977F general purpose IO */ static inline void wb977_init_gpio(void) { unsigned long flags; /* * Set up initial I/O definitions */ current_gpio_io = -1; __gpio_modify_io(-1, GPIO_DONE | GPIO_WDTIMER); wb977_device_select(7); /* * Group1 base address */ wb977_ww(0x60, GP1_IO_BASE); wb977_ww(0x62, 0); wb977_ww(0x64, 0); /* * GP10 (Orage button) IRQ 10, active high, edge trigger */ wb977_wb(0x70, 10); wb977_wb(0x71, 0x02); /* * GP10: Debounce filter enabled, IRQ, input */ wb977_wb(0xe0, 0x19); /* * Enable Group1 */ wb977_device_enable(); wb977_device_select(8); /* * Group2 base address */ wb977_ww(0x60, GP2_IO_BASE); /* * Clear watchdog timer regs * - timer disable */ wb977_wb(0xf2, 0x00); /* * - disable LED, no mouse nor keyboard IRQ */ wb977_wb(0xf3, 0x00); /* * - timer counting, disable power LED, disable timeouot */ wb977_wb(0xf4, 0x00); /* * Enable group2 */ wb977_device_enable(); /* * Set Group1/Group2 outputs */ spin_lock_irqsave(&nw_gpio_lock, flags); nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN); spin_unlock_irqrestore(&nw_gpio_lock, flags); } /* * Initialise the Winbond W83977F chip. */ static void __init wb977_init(void) { request_region(0x370, 2, "W83977AF configuration"); /* * Open up the SuperIO chip */ wb977_open(); /* * Initialise the global registers */ wb977_init_global(); /* * Initialise the various devices in * the multi-IO chip. */ wb977_init_printer(); wb977_init_keyboard(); wb977_init_irda(); wb977_init_gpio(); /* * Close up the EFER gate */ wb977_close(); } void nw_cpld_modify(unsigned int mask, unsigned int set) { int msk; current_cpld = (current_cpld & ~mask) | set; nw_gpio_modify_io(GPIO_DATA | GPIO_IOCLK | GPIO_IOLOAD, 0); nw_gpio_modify_op(GPIO_IOLOAD, 0); for (msk = 8; msk; msk >>= 1) { int bit = current_cpld & msk; nw_gpio_modify_op(GPIO_DATA | GPIO_IOCLK, bit ? GPIO_DATA : 0); nw_gpio_modify_op(GPIO_IOCLK, GPIO_IOCLK); } nw_gpio_modify_op(GPIO_IOCLK|GPIO_DATA, 0); nw_gpio_modify_op(GPIO_IOLOAD|GPIO_DSCLK, GPIO_IOLOAD|GPIO_DSCLK); nw_gpio_modify_op(GPIO_IOLOAD, 0); } EXPORT_SYMBOL(nw_cpld_modify); static void __init cpld_init(void) { unsigned long flags; spin_lock_irqsave(&nw_gpio_lock, flags); nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE); spin_unlock_irqrestore(&nw_gpio_lock, flags); } static unsigned char rwa_unlock[] __initdata = { 0x00, 0x00, 0x6a, 0xb5, 0xda, 0xed, 0xf6, 0xfb, 0x7d, 0xbe, 0xdf, 0x6f, 0x37, 0x1b, 0x0d, 0x86, 0xc3, 0x61, 0xb0, 0x58, 0x2c, 0x16, 0x8b, 0x45, 0xa2, 0xd1, 0xe8, 0x74, 0x3a, 0x9d, 0xce, 0xe7, 0x73, 0x39 }; #ifndef DEBUG #define dprintk(x...) #else #define dprintk(x...) printk(x) #endif #define WRITE_RWA(r,v) do { outb((r), 0x279); udelay(10); outb((v), 0xa79); } while (0) static inline void rwa010_unlock(void) { int i; WRITE_RWA(2, 2); mdelay(10); for (i = 0; i < sizeof(rwa_unlock); i++) { outb(rwa_unlock[i], 0x279); udelay(10); } } static inline void rwa010_read_ident(void) { unsigned char si[9]; int i, j; WRITE_RWA(3, 0); WRITE_RWA(0, 128); outb(1, 0x279); mdelay(1); dprintk("Identifier: "); for (i = 0; i < 9; i++) { si[i] = 0; for (j = 0; j < 8; j++) { int bit; udelay(250); inb(0x203); udelay(250); bit = inb(0x203); dprintk("%02X ", bit); bit = (bit == 0xaa) ? 1 : 0; si[i] |= bit << j; } dprintk("(%02X) ", si[i]); } dprintk("\n"); } static inline void rwa010_global_init(void) { WRITE_RWA(6, 2); // Assign a card no = 2 dprintk("Card no = %d\n", inb(0x203)); /* disable the modem section of the chip */ WRITE_RWA(7, 3); WRITE_RWA(0x30, 0); /* disable the cdrom section of the chip */ WRITE_RWA(7, 4); WRITE_RWA(0x30, 0); /* disable the MPU-401 section of the chip */ WRITE_RWA(7, 2); WRITE_RWA(0x30, 0); } static inline void rwa010_game_port_init(void) { int i; WRITE_RWA(7, 5); dprintk("Slider base: "); WRITE_RWA(0x61, 1); i = inb(0x203); WRITE_RWA(0x60, 2); dprintk("%02X%02X (201)\n", inb(0x203), i); WRITE_RWA(0x30, 1); } static inline void rwa010_waveartist_init(int base, int irq, int dma) { int i; WRITE_RWA(7, 0); dprintk("WaveArtist base: "); WRITE_RWA(0x61, base & 255); i = inb(0x203); WRITE_RWA(0x60, base >> 8); dprintk("%02X%02X (%X),", inb(0x203), i, base); WRITE_RWA(0x70, irq); dprintk(" irq: %d (%d),", inb(0x203), irq); WRITE_RWA(0x74, dma); dprintk(" dma: %d (%d)\n", inb(0x203), dma); WRITE_RWA(0x30, 1); } static inline void rwa010_soundblaster_init(int sb_base, int al_base, int irq, int dma) { int i; WRITE_RWA(7, 1); dprintk("SoundBlaster base: "); WRITE_RWA(0x61, sb_base & 255); i = inb(0x203); WRITE_RWA(0x60, sb_base >> 8); dprintk("%02X%02X (%X),", inb(0x203), i, sb_base); dprintk(" irq: "); WRITE_RWA(0x70, irq); dprintk("%d (%d),", inb(0x203), irq); dprintk(" 8-bit DMA: "); WRITE_RWA(0x74, dma); dprintk("%d (%d)\n", inb(0x203), dma); dprintk("AdLib base: "); WRITE_RWA(0x63, al_base & 255); i = inb(0x203); WRITE_RWA(0x62, al_base >> 8); dprintk("%02X%02X (%X)\n", inb(0x203), i, al_base); WRITE_RWA(0x30, 1); } static void rwa010_soundblaster_reset(void) { int i; outb(1, 0x226); udelay(3); outb(0, 0x226); for (i = 0; i < 5; i++) { if (inb(0x22e) & 0x80) break; mdelay(1); } if (i == 5) printk("SoundBlaster: DSP reset failed\n"); dprintk("SoundBlaster DSP reset: %02X (AA)\n", inb(0x22a)); for (i = 0; i < 5; i++) { if ((inb(0x22c) & 0x80) == 0) break; mdelay(1); } if (i == 5) printk("SoundBlaster: DSP not ready\n"); else { outb(0xe1, 0x22c); dprintk("SoundBlaster DSP id: "); i = inb(0x22a); udelay(1); i |= inb(0x22a) << 8; dprintk("%04X\n", i); for (i = 0; i < 5; i++) { if ((inb(0x22c) & 0x80) == 0) break; mdelay(1); } if (i == 5) printk("SoundBlaster: could not turn speaker off\n"); outb(0xd3, 0x22c); } /* turn on OPL3 */ outb(5, 0x38a); outb(1, 0x38b); } static void __init rwa010_init(void) { rwa010_unlock(); rwa010_read_ident(); rwa010_global_init(); rwa010_game_port_init(); rwa010_waveartist_init(0x250, 3, 7); rwa010_soundblaster_init(0x220, 0x388, 3, 1); rwa010_soundblaster_reset(); } /* * Initialise any other hardware after we've got the PCI bus * initialised. We may need the PCI bus to talk to this other * hardware. */ static int __init nw_hw_init(void) { if (machine_is_netwinder()) { unsigned long flags; wb977_init(); cpld_init(); rwa010_init(); spin_lock_irqsave(&nw_gpio_lock, flags); nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS); spin_unlock_irqrestore(&nw_gpio_lock, flags); } return 0; } __initcall(nw_hw_init); /* * Older NeTTroms either do not provide a parameters * page, or they don't supply correct information in * the parameter page. */ static void __init fixup_netwinder(struct machine_desc *desc, struct tag *tags, char **cmdline, struct meminfo *mi) { #ifdef CONFIG_ISAPNP extern int isapnp_disable; /* * We must not use the kernels ISAPnP code * on the NetWinder - it will reset the settings * for the WaveArtist chip and render it inoperable. */ isapnp_disable = 1; #endif } MACHINE_START(NETWINDER, "Rebel-NetWinder") /* Maintainer: Russell King/Rebel.com */ .boot_params = 0x00000100, .video_start = 0x000a0000, .video_end = 0x000bffff, .reserve_lp0 = 1, .reserve_lp2 = 1, .fixup = fixup_netwinder, .map_io = footbridge_map_io, .init_irq = footbridge_init_irq, .timer = &isa_timer, MACHINE_END
gpl-2.0
tiny4579/android_kernel_msm
drivers/tty/serial/sa1100.c
3045
23052
/* * Driver for SA11x0 serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Copyright (C) 2000 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if defined(CONFIG_SERIAL_SA1100_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/io.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/mach/serial_sa1100.h> /* We've been assigned a range on the "Low-density serial ports" major */ #define SERIAL_SA1100_MAJOR 204 #define MINOR_START 5 #define NR_PORTS 3 #define SA1100_ISR_PASS_LIMIT 256 /* * Convert from ignore_status_mask or read_status_mask to UTSR[01] */ #define SM_TO_UTSR0(x) ((x) & 0xff) #define SM_TO_UTSR1(x) ((x) >> 8) #define UTSR0_TO_SM(x) ((x)) #define UTSR1_TO_SM(x) ((x) << 8) #define UART_GET_UTCR0(sport) __raw_readl((sport)->port.membase + UTCR0) #define UART_GET_UTCR1(sport) __raw_readl((sport)->port.membase + UTCR1) #define UART_GET_UTCR2(sport) __raw_readl((sport)->port.membase + UTCR2) #define UART_GET_UTCR3(sport) __raw_readl((sport)->port.membase + UTCR3) #define UART_GET_UTSR0(sport) __raw_readl((sport)->port.membase + UTSR0) #define UART_GET_UTSR1(sport) __raw_readl((sport)->port.membase + UTSR1) #define UART_GET_CHAR(sport) __raw_readl((sport)->port.membase + UTDR) #define UART_PUT_UTCR0(sport,v) __raw_writel((v),(sport)->port.membase + UTCR0) #define UART_PUT_UTCR1(sport,v) __raw_writel((v),(sport)->port.membase + UTCR1) #define UART_PUT_UTCR2(sport,v) __raw_writel((v),(sport)->port.membase + UTCR2) #define UART_PUT_UTCR3(sport,v) __raw_writel((v),(sport)->port.membase + UTCR3) #define UART_PUT_UTSR0(sport,v) __raw_writel((v),(sport)->port.membase + UTSR0) #define UART_PUT_UTSR1(sport,v) __raw_writel((v),(sport)->port.membase + UTSR1) #define UART_PUT_CHAR(sport,v) __raw_writel((v),(sport)->port.membase + UTDR) /* * This is the size of our serial port register set. */ #define UART_PORT_SIZE 0x24 /* * This determines how often we check the modem status signals * for any change. They generally aren't connected to an IRQ * so we have to poll them. We also check immediately before * filling the TX fifo incase CTS has been dropped. */ #define MCTRL_TIMEOUT (250*HZ/1000) struct sa1100_port { struct uart_port port; struct timer_list timer; unsigned int old_status; }; /* * Handle any change of modem status signal since we were last called. */ static void sa1100_mctrl_check(struct sa1100_port *sport) { unsigned int status, changed; status = sport->port.ops->get_mctrl(&sport->port); changed = status ^ sport->old_status; if (changed == 0) return; sport->old_status = status; if (changed & TIOCM_RI) sport->port.icount.rng++; if (changed & TIOCM_DSR) sport->port.icount.dsr++; if (changed & TIOCM_CAR) uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); } /* * This is our per-port timeout handler, for checking the * modem status signals. */ static void sa1100_timeout(unsigned long data) { struct sa1100_port *sport = (struct sa1100_port *)data; unsigned long flags; if (sport->port.state) { spin_lock_irqsave(&sport->port.lock, flags); sa1100_mctrl_check(sport); spin_unlock_irqrestore(&sport->port.lock, flags); mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); } } /* * interrupts disabled on entry */ static void sa1100_stop_tx(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; u32 utcr3; utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_TIE); sport->port.read_status_mask &= ~UTSR0_TO_SM(UTSR0_TFS); } /* * port locked and interrupts disabled */ static void sa1100_start_tx(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; u32 utcr3; utcr3 = UART_GET_UTCR3(sport); sport->port.read_status_mask |= UTSR0_TO_SM(UTSR0_TFS); UART_PUT_UTCR3(sport, utcr3 | UTCR3_TIE); } /* * Interrupts enabled */ static void sa1100_stop_rx(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; u32 utcr3; utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_RIE); } /* * Set the modem control timer to fire immediately. */ static void sa1100_enable_ms(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; mod_timer(&sport->timer, jiffies); } static void sa1100_rx_chars(struct sa1100_port *sport) { struct tty_struct *tty = sport->port.state->port.tty; unsigned int status, ch, flg; status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | UTSR0_TO_SM(UART_GET_UTSR0(sport)); while (status & UTSR1_TO_SM(UTSR1_RNE)) { ch = UART_GET_CHAR(sport); sport->port.icount.rx++; flg = TTY_NORMAL; /* * note that the error handling code is * out of the main execution path */ if (status & UTSR1_TO_SM(UTSR1_PRE | UTSR1_FRE | UTSR1_ROR)) { if (status & UTSR1_TO_SM(UTSR1_PRE)) sport->port.icount.parity++; else if (status & UTSR1_TO_SM(UTSR1_FRE)) sport->port.icount.frame++; if (status & UTSR1_TO_SM(UTSR1_ROR)) sport->port.icount.overrun++; status &= sport->port.read_status_mask; if (status & UTSR1_TO_SM(UTSR1_PRE)) flg = TTY_PARITY; else if (status & UTSR1_TO_SM(UTSR1_FRE)) flg = TTY_FRAME; #ifdef SUPPORT_SYSRQ sport->port.sysrq = 0; #endif } if (uart_handle_sysrq_char(&sport->port, ch)) goto ignore_char; uart_insert_char(&sport->port, status, UTSR1_TO_SM(UTSR1_ROR), ch, flg); ignore_char: status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | UTSR0_TO_SM(UART_GET_UTSR0(sport)); } tty_flip_buffer_push(tty); } static void sa1100_tx_chars(struct sa1100_port *sport) { struct circ_buf *xmit = &sport->port.state->xmit; if (sport->port.x_char) { UART_PUT_CHAR(sport, sport->port.x_char); sport->port.icount.tx++; sport->port.x_char = 0; return; } /* * Check the modem control lines before * transmitting anything. */ sa1100_mctrl_check(sport); if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { sa1100_stop_tx(&sport->port); return; } /* * Tried using FIFO (not checking TNF) for fifo fill: * still had the '4 bytes repeated' problem. */ while (UART_GET_UTSR1(sport) & UTSR1_TNF) { UART_PUT_CHAR(sport, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); sport->port.icount.tx++; if (uart_circ_empty(xmit)) break; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); if (uart_circ_empty(xmit)) sa1100_stop_tx(&sport->port); } static irqreturn_t sa1100_int(int irq, void *dev_id) { struct sa1100_port *sport = dev_id; unsigned int status, pass_counter = 0; spin_lock(&sport->port.lock); status = UART_GET_UTSR0(sport); status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS; do { if (status & (UTSR0_RFS | UTSR0_RID)) { /* Clear the receiver idle bit, if set */ if (status & UTSR0_RID) UART_PUT_UTSR0(sport, UTSR0_RID); sa1100_rx_chars(sport); } /* Clear the relevant break bits */ if (status & (UTSR0_RBB | UTSR0_REB)) UART_PUT_UTSR0(sport, status & (UTSR0_RBB | UTSR0_REB)); if (status & UTSR0_RBB) sport->port.icount.brk++; if (status & UTSR0_REB) uart_handle_break(&sport->port); if (status & UTSR0_TFS) sa1100_tx_chars(sport); if (pass_counter++ > SA1100_ISR_PASS_LIMIT) break; status = UART_GET_UTSR0(sport); status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS; } while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID)); spin_unlock(&sport->port.lock); return IRQ_HANDLED; } /* * Return TIOCSER_TEMT when transmitter is not busy. */ static unsigned int sa1100_tx_empty(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; return UART_GET_UTSR1(sport) & UTSR1_TBY ? 0 : TIOCSER_TEMT; } static unsigned int sa1100_get_mctrl(struct uart_port *port) { return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; } static void sa1100_set_mctrl(struct uart_port *port, unsigned int mctrl) { } /* * Interrupts always disabled. */ static void sa1100_break_ctl(struct uart_port *port, int break_state) { struct sa1100_port *sport = (struct sa1100_port *)port; unsigned long flags; unsigned int utcr3; spin_lock_irqsave(&sport->port.lock, flags); utcr3 = UART_GET_UTCR3(sport); if (break_state == -1) utcr3 |= UTCR3_BRK; else utcr3 &= ~UTCR3_BRK; UART_PUT_UTCR3(sport, utcr3); spin_unlock_irqrestore(&sport->port.lock, flags); } static int sa1100_startup(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; int retval; /* * Allocate the IRQ */ retval = request_irq(sport->port.irq, sa1100_int, 0, "sa11x0-uart", sport); if (retval) return retval; /* * Finally, clear and enable interrupts */ UART_PUT_UTSR0(sport, -1); UART_PUT_UTCR3(sport, UTCR3_RXE | UTCR3_TXE | UTCR3_RIE); /* * Enable modem status interrupts */ spin_lock_irq(&sport->port.lock); sa1100_enable_ms(&sport->port); spin_unlock_irq(&sport->port.lock); return 0; } static void sa1100_shutdown(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; /* * Stop our timer. */ del_timer_sync(&sport->timer); /* * Free the interrupt */ free_irq(sport->port.irq, sport); /* * Disable all interrupts, port and break condition. */ UART_PUT_UTCR3(sport, 0); } static void sa1100_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct sa1100_port *sport = (struct sa1100_port *)port; unsigned long flags; unsigned int utcr0, old_utcr3, baud, quot; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; /* * We only support CS7 and CS8. */ while ((termios->c_cflag & CSIZE) != CS7 && (termios->c_cflag & CSIZE) != CS8) { termios->c_cflag &= ~CSIZE; termios->c_cflag |= old_csize; old_csize = CS8; } if ((termios->c_cflag & CSIZE) == CS8) utcr0 = UTCR0_DSS; else utcr0 = 0; if (termios->c_cflag & CSTOPB) utcr0 |= UTCR0_SBS; if (termios->c_cflag & PARENB) { utcr0 |= UTCR0_PE; if (!(termios->c_cflag & PARODD)) utcr0 |= UTCR0_OES; } /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = uart_get_divisor(port, baud); spin_lock_irqsave(&sport->port.lock, flags); sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS); sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR); if (termios->c_iflag & INPCK) sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE); if (termios->c_iflag & (BRKINT | PARMRK)) sport->port.read_status_mask |= UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB); /* * Characters to ignore */ sport->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) sport->port.ignore_status_mask |= UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE); if (termios->c_iflag & IGNBRK) { sport->port.ignore_status_mask |= UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB); /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) sport->port.ignore_status_mask |= UTSR1_TO_SM(UTSR1_ROR); } del_timer_sync(&sport->timer); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); /* * disable interrupts and drain transmitter */ old_utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)); while (UART_GET_UTSR1(sport) & UTSR1_TBY) barrier(); /* then, disable everything */ UART_PUT_UTCR3(sport, 0); /* set the parity, stop bits and data size */ UART_PUT_UTCR0(sport, utcr0); /* set the baud rate */ quot -= 1; UART_PUT_UTCR1(sport, ((quot & 0xf00) >> 8)); UART_PUT_UTCR2(sport, (quot & 0xff)); UART_PUT_UTSR0(sport, -1); UART_PUT_UTCR3(sport, old_utcr3); if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) sa1100_enable_ms(&sport->port); spin_unlock_irqrestore(&sport->port.lock, flags); } static const char *sa1100_type(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; return sport->port.type == PORT_SA1100 ? "SA1100" : NULL; } /* * Release the memory region(s) being used by 'port'. */ static void sa1100_release_port(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; release_mem_region(sport->port.mapbase, UART_PORT_SIZE); } /* * Request the memory region(s) being used by 'port'. */ static int sa1100_request_port(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; return request_mem_region(sport->port.mapbase, UART_PORT_SIZE, "sa11x0-uart") != NULL ? 0 : -EBUSY; } /* * Configure/autoconfigure the port. */ static void sa1100_config_port(struct uart_port *port, int flags) { struct sa1100_port *sport = (struct sa1100_port *)port; if (flags & UART_CONFIG_TYPE && sa1100_request_port(&sport->port) == 0) sport->port.type = PORT_SA1100; } /* * Verify the new serial_struct (for TIOCSSERIAL). * The only change we allow are to the flags and type, and * even then only between PORT_SA1100 and PORT_UNKNOWN */ static int sa1100_verify_port(struct uart_port *port, struct serial_struct *ser) { struct sa1100_port *sport = (struct sa1100_port *)port; int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_SA1100) ret = -EINVAL; if (sport->port.irq != ser->irq) ret = -EINVAL; if (ser->io_type != SERIAL_IO_MEM) ret = -EINVAL; if (sport->port.uartclk / 16 != ser->baud_base) ret = -EINVAL; if ((void *)sport->port.mapbase != ser->iomem_base) ret = -EINVAL; if (sport->port.iobase != ser->port) ret = -EINVAL; if (ser->hub6 != 0) ret = -EINVAL; return ret; } static struct uart_ops sa1100_pops = { .tx_empty = sa1100_tx_empty, .set_mctrl = sa1100_set_mctrl, .get_mctrl = sa1100_get_mctrl, .stop_tx = sa1100_stop_tx, .start_tx = sa1100_start_tx, .stop_rx = sa1100_stop_rx, .enable_ms = sa1100_enable_ms, .break_ctl = sa1100_break_ctl, .startup = sa1100_startup, .shutdown = sa1100_shutdown, .set_termios = sa1100_set_termios, .type = sa1100_type, .release_port = sa1100_release_port, .request_port = sa1100_request_port, .config_port = sa1100_config_port, .verify_port = sa1100_verify_port, }; static struct sa1100_port sa1100_ports[NR_PORTS]; /* * Setup the SA1100 serial ports. Note that we don't include the IrDA * port here since we have our own SIR/FIR driver (see drivers/net/irda) * * Note also that we support "console=ttySAx" where "x" is either 0 or 1. * Which serial port this ends up being depends on the machine you're * running this kernel on. I'm not convinced that this is a good idea, * but that's the way it traditionally works. * * Note that NanoEngine UART3 becomes UART2, and UART2 is no longer * used here. */ static void __init sa1100_init_ports(void) { static int first = 1; int i; if (!first) return; first = 0; for (i = 0; i < NR_PORTS; i++) { sa1100_ports[i].port.uartclk = 3686400; sa1100_ports[i].port.ops = &sa1100_pops; sa1100_ports[i].port.fifosize = 8; sa1100_ports[i].port.line = i; sa1100_ports[i].port.iotype = UPIO_MEM; init_timer(&sa1100_ports[i].timer); sa1100_ports[i].timer.function = sa1100_timeout; sa1100_ports[i].timer.data = (unsigned long)&sa1100_ports[i]; } /* * make transmit lines outputs, so that when the port * is closed, the output is in the MARK state. */ PPDR |= PPC_TXD1 | PPC_TXD3; PPSR |= PPC_TXD1 | PPC_TXD3; } void __devinit sa1100_register_uart_fns(struct sa1100_port_fns *fns) { if (fns->get_mctrl) sa1100_pops.get_mctrl = fns->get_mctrl; if (fns->set_mctrl) sa1100_pops.set_mctrl = fns->set_mctrl; sa1100_pops.pm = fns->pm; sa1100_pops.set_wake = fns->set_wake; } void __init sa1100_register_uart(int idx, int port) { if (idx >= NR_PORTS) { printk(KERN_ERR "%s: bad index number %d\n", __func__, idx); return; } switch (port) { case 1: sa1100_ports[idx].port.membase = (void __iomem *)&Ser1UTCR0; sa1100_ports[idx].port.mapbase = _Ser1UTCR0; sa1100_ports[idx].port.irq = IRQ_Ser1UART; sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; break; case 2: sa1100_ports[idx].port.membase = (void __iomem *)&Ser2UTCR0; sa1100_ports[idx].port.mapbase = _Ser2UTCR0; sa1100_ports[idx].port.irq = IRQ_Ser2ICP; sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; break; case 3: sa1100_ports[idx].port.membase = (void __iomem *)&Ser3UTCR0; sa1100_ports[idx].port.mapbase = _Ser3UTCR0; sa1100_ports[idx].port.irq = IRQ_Ser3UART; sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; break; default: printk(KERN_ERR "%s: bad port number %d\n", __func__, port); } } #ifdef CONFIG_SERIAL_SA1100_CONSOLE static void sa1100_console_putchar(struct uart_port *port, int ch) { struct sa1100_port *sport = (struct sa1100_port *)port; while (!(UART_GET_UTSR1(sport) & UTSR1_TNF)) barrier(); UART_PUT_CHAR(sport, ch); } /* * Interrupts are disabled on entering */ static void sa1100_console_write(struct console *co, const char *s, unsigned int count) { struct sa1100_port *sport = &sa1100_ports[co->index]; unsigned int old_utcr3, status; /* * First, save UTCR3 and then disable interrupts */ old_utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, (old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)) | UTCR3_TXE); uart_console_write(&sport->port, s, count, sa1100_console_putchar); /* * Finally, wait for transmitter to become empty * and restore UTCR3 */ do { status = UART_GET_UTSR1(sport); } while (status & UTSR1_TBY); UART_PUT_UTCR3(sport, old_utcr3); } /* * If the port was already initialised (eg, by a boot loader), * try to determine the current setup. */ static void __init sa1100_console_get_options(struct sa1100_port *sport, int *baud, int *parity, int *bits) { unsigned int utcr3; utcr3 = UART_GET_UTCR3(sport) & (UTCR3_RXE | UTCR3_TXE); if (utcr3 == (UTCR3_RXE | UTCR3_TXE)) { /* ok, the port was enabled */ unsigned int utcr0, quot; utcr0 = UART_GET_UTCR0(sport); *parity = 'n'; if (utcr0 & UTCR0_PE) { if (utcr0 & UTCR0_OES) *parity = 'e'; else *parity = 'o'; } if (utcr0 & UTCR0_DSS) *bits = 8; else *bits = 7; quot = UART_GET_UTCR2(sport) | UART_GET_UTCR1(sport) << 8; quot &= 0xfff; *baud = sport->port.uartclk / (16 * (quot + 1)); } } static int __init sa1100_console_setup(struct console *co, char *options) { struct sa1100_port *sport; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index == -1 || co->index >= NR_PORTS) co->index = 0; sport = &sa1100_ports[co->index]; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else sa1100_console_get_options(sport, &baud, &parity, &bits); return uart_set_options(&sport->port, co, baud, parity, bits, flow); } static struct uart_driver sa1100_reg; static struct console sa1100_console = { .name = "ttySA", .write = sa1100_console_write, .device = uart_console_device, .setup = sa1100_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &sa1100_reg, }; static int __init sa1100_rs_console_init(void) { sa1100_init_ports(); register_console(&sa1100_console); return 0; } console_initcall(sa1100_rs_console_init); #define SA1100_CONSOLE &sa1100_console #else #define SA1100_CONSOLE NULL #endif static struct uart_driver sa1100_reg = { .owner = THIS_MODULE, .driver_name = "ttySA", .dev_name = "ttySA", .major = SERIAL_SA1100_MAJOR, .minor = MINOR_START, .nr = NR_PORTS, .cons = SA1100_CONSOLE, }; static int sa1100_serial_suspend(struct platform_device *dev, pm_message_t state) { struct sa1100_port *sport = platform_get_drvdata(dev); if (sport) uart_suspend_port(&sa1100_reg, &sport->port); return 0; } static int sa1100_serial_resume(struct platform_device *dev) { struct sa1100_port *sport = platform_get_drvdata(dev); if (sport) uart_resume_port(&sa1100_reg, &sport->port); return 0; } static int sa1100_serial_probe(struct platform_device *dev) { struct resource *res = dev->resource; int i; for (i = 0; i < dev->num_resources; i++, res++) if (res->flags & IORESOURCE_MEM) break; if (i < dev->num_resources) { for (i = 0; i < NR_PORTS; i++) { if (sa1100_ports[i].port.mapbase != res->start) continue; sa1100_ports[i].port.dev = &dev->dev; uart_add_one_port(&sa1100_reg, &sa1100_ports[i].port); platform_set_drvdata(dev, &sa1100_ports[i]); break; } } return 0; } static int sa1100_serial_remove(struct platform_device *pdev) { struct sa1100_port *sport = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (sport) uart_remove_one_port(&sa1100_reg, &sport->port); return 0; } static struct platform_driver sa11x0_serial_driver = { .probe = sa1100_serial_probe, .remove = sa1100_serial_remove, .suspend = sa1100_serial_suspend, .resume = sa1100_serial_resume, .driver = { .name = "sa11x0-uart", .owner = THIS_MODULE, }, }; static int __init sa1100_serial_init(void) { int ret; printk(KERN_INFO "Serial: SA11x0 driver\n"); sa1100_init_ports(); ret = uart_register_driver(&sa1100_reg); if (ret == 0) { ret = platform_driver_register(&sa11x0_serial_driver); if (ret) uart_unregister_driver(&sa1100_reg); } return ret; } static void __exit sa1100_serial_exit(void) { platform_driver_unregister(&sa11x0_serial_driver); uart_unregister_driver(&sa1100_reg); } module_init(sa1100_serial_init); module_exit(sa1100_serial_exit); MODULE_AUTHOR("Deep Blue Solutions Ltd"); MODULE_DESCRIPTION("SA1100 generic serial port driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_SA1100_MAJOR); MODULE_ALIAS("platform:sa11x0-uart");
gpl-2.0
MoKee/android_kernel_lge_gproj
fs/quota/quota.c
4837
9809
/* * Quota code necessary even when VFS quota support is not compiled * into the kernel. The interesting stuff is over in dquot.c, here * we have symbols for initial quotactl(2) handling, the sysctl(2) * variables, etc - things needed even when quota support disabled. */ #include <linux/fs.h> #include <linux/namei.h> #include <linux/slab.h> #include <asm/current.h> #include <asm/uaccess.h> #include <linux/kernel.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/capability.h> #include <linux/quotaops.h> #include <linux/types.h> #include <linux/writeback.h> static int check_quotactl_permission(struct super_block *sb, int type, int cmd, qid_t id) { switch (cmd) { /* these commands do not require any special privilegues */ case Q_GETFMT: case Q_SYNC: case Q_GETINFO: case Q_XGETQSTAT: case Q_XQUOTASYNC: break; /* allow to query information for dquots we "own" */ case Q_GETQUOTA: case Q_XGETQUOTA: if ((type == USRQUOTA && current_euid() == id) || (type == GRPQUOTA && in_egroup_p(id))) break; /*FALLTHROUGH*/ default: if (!capable(CAP_SYS_ADMIN)) return -EPERM; } return security_quotactl(cmd, type, id, sb); } static void quota_sync_one(struct super_block *sb, void *arg) { if (sb->s_qcop && sb->s_qcop->quota_sync) sb->s_qcop->quota_sync(sb, *(int *)arg, 1); } static int quota_sync_all(int type) { int ret; if (type >= MAXQUOTAS) return -EINVAL; ret = security_quotactl(Q_SYNC, type, 0, NULL); if (!ret) iterate_supers(quota_sync_one, &type); return ret; } static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id, struct path *path) { if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta) return -ENOSYS; if (sb->s_qcop->quota_on_meta) return sb->s_qcop->quota_on_meta(sb, type, id); if (IS_ERR(path)) return PTR_ERR(path); return sb->s_qcop->quota_on(sb, type, id, path); } static int quota_getfmt(struct super_block *sb, int type, void __user *addr) { __u32 fmt; down_read(&sb_dqopt(sb)->dqptr_sem); if (!sb_has_quota_active(sb, type)) { up_read(&sb_dqopt(sb)->dqptr_sem); return -ESRCH; } fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; up_read(&sb_dqopt(sb)->dqptr_sem); if (copy_to_user(addr, &fmt, sizeof(fmt))) return -EFAULT; return 0; } static int quota_getinfo(struct super_block *sb, int type, void __user *addr) { struct if_dqinfo info; int ret; if (!sb->s_qcop->get_info) return -ENOSYS; ret = sb->s_qcop->get_info(sb, type, &info); if (!ret && copy_to_user(addr, &info, sizeof(info))) return -EFAULT; return ret; } static int quota_setinfo(struct super_block *sb, int type, void __user *addr) { struct if_dqinfo info; if (copy_from_user(&info, addr, sizeof(info))) return -EFAULT; if (!sb->s_qcop->set_info) return -ENOSYS; return sb->s_qcop->set_info(sb, type, &info); } static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src) { dst->dqb_bhardlimit = src->d_blk_hardlimit; dst->dqb_bsoftlimit = src->d_blk_softlimit; dst->dqb_curspace = src->d_bcount; dst->dqb_ihardlimit = src->d_ino_hardlimit; dst->dqb_isoftlimit = src->d_ino_softlimit; dst->dqb_curinodes = src->d_icount; dst->dqb_btime = src->d_btimer; dst->dqb_itime = src->d_itimer; dst->dqb_valid = QIF_ALL; } static int quota_getquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct fs_disk_quota fdq; struct if_dqblk idq; int ret; if (!sb->s_qcop->get_dqblk) return -ENOSYS; ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq); if (ret) return ret; copy_to_if_dqblk(&idq, &fdq); if (copy_to_user(addr, &idq, sizeof(idq))) return -EFAULT; return 0; } static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src) { dst->d_blk_hardlimit = src->dqb_bhardlimit; dst->d_blk_softlimit = src->dqb_bsoftlimit; dst->d_bcount = src->dqb_curspace; dst->d_ino_hardlimit = src->dqb_ihardlimit; dst->d_ino_softlimit = src->dqb_isoftlimit; dst->d_icount = src->dqb_curinodes; dst->d_btimer = src->dqb_btime; dst->d_itimer = src->dqb_itime; dst->d_fieldmask = 0; if (src->dqb_valid & QIF_BLIMITS) dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD; if (src->dqb_valid & QIF_SPACE) dst->d_fieldmask |= FS_DQ_BCOUNT; if (src->dqb_valid & QIF_ILIMITS) dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD; if (src->dqb_valid & QIF_INODES) dst->d_fieldmask |= FS_DQ_ICOUNT; if (src->dqb_valid & QIF_BTIME) dst->d_fieldmask |= FS_DQ_BTIMER; if (src->dqb_valid & QIF_ITIME) dst->d_fieldmask |= FS_DQ_ITIMER; } static int quota_setquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct fs_disk_quota fdq; struct if_dqblk idq; if (copy_from_user(&idq, addr, sizeof(idq))) return -EFAULT; if (!sb->s_qcop->set_dqblk) return -ENOSYS; copy_from_if_dqblk(&fdq, &idq); return sb->s_qcop->set_dqblk(sb, type, id, &fdq); } static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr) { __u32 flags; if (copy_from_user(&flags, addr, sizeof(flags))) return -EFAULT; if (!sb->s_qcop->set_xstate) return -ENOSYS; return sb->s_qcop->set_xstate(sb, flags, cmd); } static int quota_getxstate(struct super_block *sb, void __user *addr) { struct fs_quota_stat fqs; int ret; if (!sb->s_qcop->get_xstate) return -ENOSYS; ret = sb->s_qcop->get_xstate(sb, &fqs); if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) return -EFAULT; return ret; } static int quota_setxquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct fs_disk_quota fdq; if (copy_from_user(&fdq, addr, sizeof(fdq))) return -EFAULT; if (!sb->s_qcop->set_dqblk) return -ENOSYS; return sb->s_qcop->set_dqblk(sb, type, id, &fdq); } static int quota_getxquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct fs_disk_quota fdq; int ret; if (!sb->s_qcop->get_dqblk) return -ENOSYS; ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq); if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) return -EFAULT; return ret; } /* Copy parameters and call proper function */ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr, struct path *path) { int ret; if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) return -EINVAL; if (!sb->s_qcop) return -ENOSYS; ret = check_quotactl_permission(sb, type, cmd, id); if (ret < 0) return ret; switch (cmd) { case Q_QUOTAON: return quota_quotaon(sb, type, cmd, id, path); case Q_QUOTAOFF: if (!sb->s_qcop->quota_off) return -ENOSYS; return sb->s_qcop->quota_off(sb, type); case Q_GETFMT: return quota_getfmt(sb, type, addr); case Q_GETINFO: return quota_getinfo(sb, type, addr); case Q_SETINFO: return quota_setinfo(sb, type, addr); case Q_GETQUOTA: return quota_getquota(sb, type, id, addr); case Q_SETQUOTA: return quota_setquota(sb, type, id, addr); case Q_SYNC: if (!sb->s_qcop->quota_sync) return -ENOSYS; return sb->s_qcop->quota_sync(sb, type, 1); case Q_XQUOTAON: case Q_XQUOTAOFF: case Q_XQUOTARM: return quota_setxstate(sb, cmd, addr); case Q_XGETQSTAT: return quota_getxstate(sb, addr); case Q_XSETQLIM: return quota_setxquota(sb, type, id, addr); case Q_XGETQUOTA: return quota_getxquota(sb, type, id, addr); case Q_XQUOTASYNC: if (sb->s_flags & MS_RDONLY) return -EROFS; /* XFS quotas are fully coherent now, making this call a noop */ return 0; default: return -EINVAL; } } /* Return 1 if 'cmd' will block on frozen filesystem */ static int quotactl_cmd_write(int cmd) { switch (cmd) { case Q_GETFMT: case Q_GETINFO: case Q_SYNC: case Q_XGETQSTAT: case Q_XGETQUOTA: case Q_XQUOTASYNC: return 0; } return 1; } /* * look up a superblock on which quota ops will be performed * - use the name of a block device to find the superblock thereon */ static struct super_block *quotactl_block(const char __user *special, int cmd) { #ifdef CONFIG_BLOCK struct block_device *bdev; struct super_block *sb; char *tmp = getname(special); if (IS_ERR(tmp)) return ERR_CAST(tmp); bdev = lookup_bdev(tmp); putname(tmp); if (IS_ERR(bdev)) return ERR_CAST(bdev); if (quotactl_cmd_write(cmd)) sb = get_super_thawed(bdev); else sb = get_super(bdev); bdput(bdev); if (!sb) return ERR_PTR(-ENODEV); return sb; #else return ERR_PTR(-ENODEV); #endif } /* * This is the system call interface. This communicates with * the user-level programs. Currently this only supports diskquota * calls. Maybe we need to add the process quotas etc. in the future, * but we probably should use rlimits for that. */ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr) { uint cmds, type; struct super_block *sb = NULL; struct path path, *pathp = NULL; int ret; cmds = cmd >> SUBCMDSHIFT; type = cmd & SUBCMDMASK; /* * As a special case Q_SYNC can be called without a specific device. * It will iterate all superblocks that have quota enabled and call * the sync action on each of them. */ if (!special) { if (cmds == Q_SYNC) return quota_sync_all(type); return -ENODEV; } /* * Path for quotaon has to be resolved before grabbing superblock * because that gets s_umount sem which is also possibly needed by path * resolution (think about autofs) and thus deadlocks could arise. */ if (cmds == Q_QUOTAON) { ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); if (ret) pathp = ERR_PTR(ret); else pathp = &path; } sb = quotactl_block(special, cmds); if (IS_ERR(sb)) { ret = PTR_ERR(sb); goto out; } ret = do_quotactl(sb, type, cmds, id, addr, pathp); drop_super(sb); out: if (pathp && !IS_ERR(pathp)) path_put(pathp); return ret; }
gpl-2.0
obsolete-ra/kernel_motorola_msm8226
arch/powerpc/kernel/io-workarounds.c
4837
4251
/* * Support PCI IO workaround * * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org> * IBM, Corp. * (C) Copyright 2007-2008 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/sched.h> /* for init_mm */ #include <asm/io.h> #include <asm/machdep.h> #include <asm/pgtable.h> #include <asm/ppc-pci.h> #include <asm/io-workarounds.h> #define IOWA_MAX_BUS 8 static struct iowa_bus iowa_busses[IOWA_MAX_BUS]; static unsigned int iowa_bus_count; static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr) { int i, j; struct resource *res; unsigned long vstart, vend; for (i = 0; i < iowa_bus_count; i++) { struct iowa_bus *bus = &iowa_busses[i]; struct pci_controller *phb = bus->phb; if (vaddr) { vstart = (unsigned long)phb->io_base_virt; vend = vstart + phb->pci_io_size - 1; if ((vaddr >= vstart) && (vaddr <= vend)) return bus; } if (paddr) for (j = 0; j < 3; j++) { res = &phb->mem_resources[j]; if (paddr >= res->start && paddr <= res->end) return bus; } } return NULL; } struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) { struct iowa_bus *bus; int token; token = PCI_GET_ADDR_TOKEN(addr); if (token && token <= iowa_bus_count) bus = &iowa_busses[token - 1]; else { unsigned long vaddr, paddr; pte_t *ptep; vaddr = (unsigned long)PCI_FIX_ADDR(addr); if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) return NULL; ptep = find_linux_pte(init_mm.pgd, vaddr); if (ptep == NULL) paddr = 0; else paddr = pte_pfn(*ptep) << PAGE_SHIFT; bus = iowa_pci_find(vaddr, paddr); if (bus == NULL) return NULL; } return bus; } struct iowa_bus *iowa_pio_find_bus(unsigned long port) { unsigned long vaddr = (unsigned long)pci_io_base + port; return iowa_pci_find(vaddr, 0); } #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ static ret iowa_##name at \ { \ struct iowa_bus *bus; \ bus = iowa_##space##_find_bus(aa); \ if (bus && bus->ops && bus->ops->name) \ return bus->ops->name al; \ return __do_##name al; \ } #define DEF_PCI_AC_NORET(name, at, al, space, aa) \ static void iowa_##name at \ { \ struct iowa_bus *bus; \ bus = iowa_##space##_find_bus(aa); \ if (bus && bus->ops && bus->ops->name) { \ bus->ops->name al; \ return; \ } \ __do_##name al; \ } #include <asm/io-defs.h> #undef DEF_PCI_AC_RET #undef DEF_PCI_AC_NORET static const struct ppc_pci_io __devinitconst iowa_pci_io = { #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name, #define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name, #include <asm/io-defs.h> #undef DEF_PCI_AC_RET #undef DEF_PCI_AC_NORET }; static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, unsigned long flags, void *caller) { struct iowa_bus *bus; void __iomem *res = __ioremap_caller(addr, size, flags, caller); int busno; bus = iowa_pci_find(0, (unsigned long)addr); if (bus != NULL) { busno = bus - iowa_busses; PCI_SET_ADDR_TOKEN(res, busno + 1); } return res; } /* Enable IO workaround */ static void __devinit io_workaround_init(void) { static int io_workaround_inited; if (io_workaround_inited) return; ppc_pci_io = iowa_pci_io; ppc_md.ioremap = iowa_ioremap; io_workaround_inited = 1; } /* Register new bus to support workaround */ void __devinit iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops, int (*initfunc)(struct iowa_bus *, void *), void *data) { struct iowa_bus *bus; struct device_node *np = phb->dn; io_workaround_init(); if (iowa_bus_count >= IOWA_MAX_BUS) { pr_err("IOWA:Too many pci bridges, " "workarounds disabled for %s\n", np->full_name); return; } bus = &iowa_busses[iowa_bus_count]; bus->phb = phb; bus->ops = ops; bus->private = data; if (initfunc) if ((*initfunc)(bus, data)) return; iowa_bus_count++; pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name); }
gpl-2.0
MWisBest/omap
net/mac80211/rc80211_minstrel_ht.c
4837
23838
/* * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/debugfs.h> #include <linux/random.h> #include <linux/ieee80211.h> #include <net/mac80211.h> #include "rate.h" #include "rc80211_minstrel.h" #include "rc80211_minstrel_ht.h" #define AVG_PKT_SIZE 1200 #define SAMPLE_COLUMNS 10 #define EWMA_LEVEL 75 /* Number of bits for an average sized packet */ #define MCS_NBITS (AVG_PKT_SIZE << 3) /* Number of symbols for a packet with (bps) bits per symbol */ #define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps)) /* Transmission time for a packet containing (syms) symbols */ #define MCS_SYMBOL_TIME(sgi, syms) \ (sgi ? \ ((syms) * 18 + 4) / 5 : /* syms * 3.6 us */ \ (syms) << 2 /* syms * 4 us */ \ ) /* Transmit duration for the raw data part of an average sized packet */ #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) /* * Define group sort order: HT40 -> SGI -> #streams */ #define GROUP_IDX(_streams, _sgi, _ht40) \ MINSTREL_MAX_STREAMS * 2 * _ht40 + \ MINSTREL_MAX_STREAMS * _sgi + \ _streams - 1 /* MCS rate information for an MCS group */ #define MCS_GROUP(_streams, _sgi, _ht40) \ [GROUP_IDX(_streams, _sgi, _ht40)] = { \ .streams = _streams, \ .flags = \ (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ .duration = { \ MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \ MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \ MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \ MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \ MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \ MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \ MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \ MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \ } \ } /* * To enable sufficiently targeted rate sampling, MCS rates are divided into * groups, based on the number of streams and flags (HT40, SGI) that they * use. * * Sortorder has to be fixed for GROUP_IDX macro to be applicable: * HT40 -> SGI -> #streams */ const struct mcs_group minstrel_mcs_groups[] = { MCS_GROUP(1, 0, 0), MCS_GROUP(2, 0, 0), #if MINSTREL_MAX_STREAMS >= 3 MCS_GROUP(3, 0, 0), #endif MCS_GROUP(1, 1, 0), MCS_GROUP(2, 1, 0), #if MINSTREL_MAX_STREAMS >= 3 MCS_GROUP(3, 1, 0), #endif MCS_GROUP(1, 0, 1), MCS_GROUP(2, 0, 1), #if MINSTREL_MAX_STREAMS >= 3 MCS_GROUP(3, 0, 1), #endif MCS_GROUP(1, 1, 1), MCS_GROUP(2, 1, 1), #if MINSTREL_MAX_STREAMS >= 3 MCS_GROUP(3, 1, 1), #endif }; static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES]; /* * Perform EWMA (Exponentially Weighted Moving Average) calculation */ static int minstrel_ewma(int old, int new, int weight) { return (new * (100 - weight) + old * weight) / 100; } /* * Look up an MCS group index based on mac80211 rate information */ static int minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) { return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1, !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); } static inline struct minstrel_rate_stats * minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) { return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES]; } /* * Recalculate success probabilities and counters for a rate using EWMA */ static void minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr) { if (unlikely(mr->attempts > 0)) { mr->sample_skipped = 0; mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts); if (!mr->att_hist) mr->probability = mr->cur_prob; else mr->probability = minstrel_ewma(mr->probability, mr->cur_prob, EWMA_LEVEL); mr->att_hist += mr->attempts; mr->succ_hist += mr->success; } else { mr->sample_skipped++; } mr->last_success = mr->success; mr->last_attempts = mr->attempts; mr->success = 0; mr->attempts = 0; } /* * Calculate throughput based on the average A-MPDU length, taking into account * the expected number of retransmissions and their expected length */ static void minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) { struct minstrel_rate_stats *mr; unsigned int usecs; mr = &mi->groups[group].rates[rate]; if (mr->probability < MINSTREL_FRAC(1, 10)) { mr->cur_tp = 0; return; } usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); usecs += minstrel_mcs_groups[group].duration[rate]; mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability); } /* * Update rate statistics and select new primary rates * * Rules for rate selection: * - max_prob_rate must use only one stream, as a tradeoff between delivery * probability and throughput during strong fluctuations * - as long as the max prob rate has a probability of more than 3/4, pick * higher throughput rates, even if the probablity is a bit lower */ static void minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { struct minstrel_mcs_group_data *mg; struct minstrel_rate_stats *mr; int cur_prob, cur_prob_tp, cur_tp, cur_tp2; int group, i, index; if (mi->ampdu_packets > 0) { mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL); mi->ampdu_len = 0; mi->ampdu_packets = 0; } mi->sample_slow = 0; mi->sample_count = 0; mi->max_tp_rate = 0; mi->max_tp_rate2 = 0; mi->max_prob_rate = 0; for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { cur_prob = 0; cur_prob_tp = 0; cur_tp = 0; cur_tp2 = 0; mg = &mi->groups[group]; if (!mg->supported) continue; mg->max_tp_rate = 0; mg->max_tp_rate2 = 0; mg->max_prob_rate = 0; mi->sample_count++; for (i = 0; i < MCS_GROUP_RATES; i++) { if (!(mg->supported & BIT(i))) continue; mr = &mg->rates[i]; mr->retry_updated = false; index = MCS_GROUP_RATES * group + i; minstrel_calc_rate_ewma(mr); minstrel_ht_calc_tp(mi, group, i); if (!mr->cur_tp) continue; /* ignore the lowest rate of each single-stream group */ if (!i && minstrel_mcs_groups[group].streams == 1) continue; if ((mr->cur_tp > cur_prob_tp && mr->probability > MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) { mg->max_prob_rate = index; cur_prob = mr->probability; cur_prob_tp = mr->cur_tp; } if (mr->cur_tp > cur_tp) { swap(index, mg->max_tp_rate); cur_tp = mr->cur_tp; mr = minstrel_get_ratestats(mi, index); } if (index >= mg->max_tp_rate) continue; if (mr->cur_tp > cur_tp2) { mg->max_tp_rate2 = index; cur_tp2 = mr->cur_tp; } } } /* try to sample up to half of the available rates during each interval */ mi->sample_count *= 4; cur_prob = 0; cur_prob_tp = 0; cur_tp = 0; cur_tp2 = 0; for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { mg = &mi->groups[group]; if (!mg->supported) continue; mr = minstrel_get_ratestats(mi, mg->max_prob_rate); if (cur_prob_tp < mr->cur_tp && minstrel_mcs_groups[group].streams == 1) { mi->max_prob_rate = mg->max_prob_rate; cur_prob = mr->cur_prob; cur_prob_tp = mr->cur_tp; } mr = minstrel_get_ratestats(mi, mg->max_tp_rate); if (cur_tp < mr->cur_tp) { mi->max_tp_rate2 = mi->max_tp_rate; cur_tp2 = cur_tp; mi->max_tp_rate = mg->max_tp_rate; cur_tp = mr->cur_tp; } mr = minstrel_get_ratestats(mi, mg->max_tp_rate2); if (cur_tp2 < mr->cur_tp) { mi->max_tp_rate2 = mg->max_tp_rate2; cur_tp2 = mr->cur_tp; } } mi->stats_update = jiffies; } static bool minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate) { if (rate->idx < 0) return false; if (!rate->count) return false; return !!(rate->flags & IEEE80211_TX_RC_MCS); } static void minstrel_next_sample_idx(struct minstrel_ht_sta *mi) { struct minstrel_mcs_group_data *mg; for (;;) { mi->sample_group++; mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups); mg = &mi->groups[mi->sample_group]; if (!mg->supported) continue; if (++mg->index >= MCS_GROUP_RATES) { mg->index = 0; if (++mg->column >= ARRAY_SIZE(sample_table)) mg->column = 0; } break; } } static void minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx, bool primary) { int group, orig_group; orig_group = group = *idx / MCS_GROUP_RATES; while (group > 0) { group--; if (!mi->groups[group].supported) continue; if (minstrel_mcs_groups[group].streams > minstrel_mcs_groups[orig_group].streams) continue; if (primary) *idx = mi->groups[group].max_tp_rate; else *idx = mi->groups[group].max_tp_rate2; break; } } static void minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct sta_info *sta = container_of(pubsta, struct sta_info, sta); u16 tid; if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) return; if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) return; tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; if (likely(sta->ampdu_mlme.tid_tx[tid])) return; if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) return; ieee80211_start_tx_ba_session(pubsta, tid, 5000); } static void minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, struct sk_buff *skb) { struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *ar = info->status.rates; struct minstrel_rate_stats *rate, *rate2; struct minstrel_priv *mp = priv; bool last = false; int group; int i = 0; if (!msp->is_ht) return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb); /* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) { info->status.ampdu_ack_len = (info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0); info->status.ampdu_len = 1; } mi->ampdu_packets++; mi->ampdu_len += info->status.ampdu_len; if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len); mi->sample_tries = 2; mi->sample_count--; } if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) mi->sample_packets += info->status.ampdu_len; for (i = 0; !last; i++) { last = (i == IEEE80211_TX_MAX_RATES - 1) || !minstrel_ht_txstat_valid(&ar[i + 1]); if (!minstrel_ht_txstat_valid(&ar[i])) break; group = minstrel_ht_get_group_idx(&ar[i]); rate = &mi->groups[group].rates[ar[i].idx % 8]; if (last) rate->success += info->status.ampdu_ack_len; rate->attempts += ar[i].count * info->status.ampdu_len; } /* * check for sudden death of spatial multiplexing, * downgrade to a lower number of streams if necessary. */ rate = minstrel_get_ratestats(mi, mi->max_tp_rate); if (rate->attempts > 30 && MINSTREL_FRAC(rate->success, rate->attempts) < MINSTREL_FRAC(20, 100)) minstrel_downgrade_rate(mi, &mi->max_tp_rate, true); rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2); if (rate2->attempts > 30 && MINSTREL_FRAC(rate2->success, rate2->attempts) < MINSTREL_FRAC(20, 100)) minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false); if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { minstrel_ht_update_stats(mp, mi); if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) minstrel_aggr_check(sta, skb); } } static void minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, int index) { struct minstrel_rate_stats *mr; const struct mcs_group *group; unsigned int tx_time, tx_time_rtscts, tx_time_data; unsigned int cw = mp->cw_min; unsigned int ctime = 0; unsigned int t_slot = 9; /* FIXME */ unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len); mr = minstrel_get_ratestats(mi, index); if (mr->probability < MINSTREL_FRAC(1, 10)) { mr->retry_count = 1; mr->retry_count_rtscts = 1; return; } mr->retry_count = 2; mr->retry_count_rtscts = 2; mr->retry_updated = true; group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len; /* Contention time for first 2 tries */ ctime = (t_slot * cw) >> 1; cw = min((cw << 1) | 1, mp->cw_max); ctime += (t_slot * cw) >> 1; cw = min((cw << 1) | 1, mp->cw_max); /* Total TX time for data and Contention after first 2 tries */ tx_time = ctime + 2 * (mi->overhead + tx_time_data); tx_time_rtscts = ctime + 2 * (mi->overhead_rtscts + tx_time_data); /* See how many more tries we can fit inside segment size */ do { /* Contention time for this try */ ctime = (t_slot * cw) >> 1; cw = min((cw << 1) | 1, mp->cw_max); /* Total TX time after this try */ tx_time += ctime + mi->overhead + tx_time_data; tx_time_rtscts += ctime + mi->overhead_rtscts + tx_time_data; if (tx_time_rtscts < mp->segment_size) mr->retry_count_rtscts++; } while ((tx_time < mp->segment_size) && (++mr->retry_count < mp->max_retry)); } static void minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, struct ieee80211_tx_rate *rate, int index, bool sample, bool rtscts) { const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; struct minstrel_rate_stats *mr; mr = minstrel_get_ratestats(mi, index); if (!mr->retry_updated) minstrel_calc_retransmit(mp, mi, index); if (sample) rate->count = 1; else if (mr->probability < MINSTREL_FRAC(20, 100)) rate->count = 2; else if (rtscts) rate->count = mr->retry_count_rtscts; else rate->count = mr->retry_count; rate->flags = IEEE80211_TX_RC_MCS | group->flags; if (rtscts) rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES; } static inline int minstrel_get_duration(int index) { const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; return group->duration[index % MCS_GROUP_RATES]; } static int minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { struct minstrel_rate_stats *mr; struct minstrel_mcs_group_data *mg; int sample_idx = 0; if (mi->sample_wait > 0) { mi->sample_wait--; return -1; } if (!mi->sample_tries) return -1; mi->sample_tries--; mg = &mi->groups[mi->sample_group]; sample_idx = sample_table[mg->column][mg->index]; mr = &mg->rates[sample_idx]; sample_idx += mi->sample_group * MCS_GROUP_RATES; minstrel_next_sample_idx(mi); /* * Sampling might add some overhead (RTS, no aggregation) * to the frame. Hence, don't use sampling for the currently * used max TP rate. */ if (sample_idx == mi->max_tp_rate) return -1; /* * When not using MRR, do not sample if the probability is already * higher than 95% to avoid wasting airtime */ if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100))) return -1; /* * Make sure that lower rates get sampled only occasionally, * if the link is working perfectly. */ if (minstrel_get_duration(sample_idx) > minstrel_get_duration(mi->max_tp_rate)) { if (mr->sample_skipped < 20) return -1; if (mi->sample_slow++ > 2) return -1; } return sample_idx; } static void minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, struct ieee80211_tx_rate_control *txrc) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); struct ieee80211_tx_rate *ar = info->status.rates; struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct minstrel_priv *mp = priv; int sample_idx; bool sample = false; if (rate_control_send_low(sta, priv_sta, txrc)) return; if (!msp->is_ht) return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); info->flags |= mi->tx_flags; /* Don't use EAPOL frames for sampling on non-mrr hw */ if (mp->hw->max_rates == 1 && txrc->skb->protocol == cpu_to_be16(ETH_P_PAE)) sample_idx = -1; else sample_idx = minstrel_get_sample_rate(mp, mi); #ifdef CONFIG_MAC80211_DEBUGFS /* use fixed index if set */ if (mp->fixed_rate_idx != -1) sample_idx = mp->fixed_rate_idx; #endif if (sample_idx >= 0) { sample = true; minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx, true, false); info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; } else { minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate, false, false); } if (mp->hw->max_rates >= 3) { /* * At least 3 tx rates supported, use * sample_rate -> max_tp_rate -> max_prob_rate for sampling and * max_tp_rate -> max_tp_rate2 -> max_prob_rate by default. */ if (sample_idx >= 0) minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate, false, false); else minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2, false, true); minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, false, !sample); ar[3].count = 0; ar[3].idx = -1; } else if (mp->hw->max_rates == 2) { /* * Only 2 tx rates supported, use * sample_rate -> max_prob_rate for sampling and * max_tp_rate -> max_prob_rate by default. */ minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate, false, !sample); ar[2].count = 0; ar[2].idx = -1; } else { /* Not using MRR, only use the first rate */ ar[1].count = 0; ar[1].idx = -1; } mi->total_packets++; /* wraparound */ if (mi->total_packets == ~0) { mi->total_packets = 0; mi->sample_packets = 0; } } static void minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, enum nl80211_channel_type oper_chan_type) { struct minstrel_priv *mp = priv; struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; struct ieee80211_local *local = hw_to_local(mp->hw); u16 sta_cap = sta->ht_cap.cap; int n_supported = 0; int ack_dur; int stbc; int i; unsigned int smps; /* fall back to the old minstrel for legacy stations */ if (!sta->ht_cap.ht_supported) goto use_legacy; BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS); msp->is_ht = true; memset(mi, 0, sizeof(*mi)); mi->stats_update = jiffies; ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1); mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur; mi->overhead_rtscts = mi->overhead + 2 * ack_dur; mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); /* When using MRR, sample more on the first attempt, without delay */ if (mp->has_mrr) { mi->sample_count = 16; mi->sample_wait = 0; } else { mi->sample_count = 8; mi->sample_wait = 8; } mi->sample_tries = 4; stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >> IEEE80211_HT_CAP_RX_STBC_SHIFT; mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) mi->tx_flags |= IEEE80211_TX_CTL_LDPC; if (oper_chan_type != NL80211_CHAN_HT40MINUS && oper_chan_type != NL80211_CHAN_HT40PLUS) sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT; for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { u16 req = 0; mi->groups[i].supported = 0; if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) { if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) req |= IEEE80211_HT_CAP_SGI_40; else req |= IEEE80211_HT_CAP_SGI_20; } if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; if ((sta_cap & req) != req) continue; /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */ if (smps == WLAN_HT_CAP_SM_PS_STATIC && minstrel_mcs_groups[i].streams > 1) continue; mi->groups[i].supported = mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; if (mi->groups[i].supported) n_supported++; } if (!n_supported) goto use_legacy; return; use_legacy: msp->is_ht = false; memset(&msp->legacy, 0, sizeof(msp->legacy)); msp->legacy.r = msp->ratelist; msp->legacy.sample_table = msp->sample_table; return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); } static void minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta) { struct minstrel_priv *mp = priv; minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type); } static void minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, u32 changed, enum nl80211_channel_type oper_chan_type) { minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type); } static void * minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) { struct ieee80211_supported_band *sband; struct minstrel_ht_sta_priv *msp; struct minstrel_priv *mp = priv; struct ieee80211_hw *hw = mp->hw; int max_rates = 0; int i; for (i = 0; i < IEEE80211_NUM_BANDS; i++) { sband = hw->wiphy->bands[i]; if (sband && sband->n_bitrates > max_rates) max_rates = sband->n_bitrates; } msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); if (!msp) return NULL; msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp); if (!msp->ratelist) goto error; msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp); if (!msp->sample_table) goto error1; return msp; error1: kfree(msp->ratelist); error: kfree(msp); return NULL; } static void minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta) { struct minstrel_ht_sta_priv *msp = priv_sta; kfree(msp->sample_table); kfree(msp->ratelist); kfree(msp); } static void * minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) { return mac80211_minstrel.alloc(hw, debugfsdir); } static void minstrel_ht_free(void *priv) { mac80211_minstrel.free(priv); } static struct rate_control_ops mac80211_minstrel_ht = { .name = "minstrel_ht", .tx_status = minstrel_ht_tx_status, .get_rate = minstrel_ht_get_rate, .rate_init = minstrel_ht_rate_init, .rate_update = minstrel_ht_rate_update, .alloc_sta = minstrel_ht_alloc_sta, .free_sta = minstrel_ht_free_sta, .alloc = minstrel_ht_alloc, .free = minstrel_ht_free, #ifdef CONFIG_MAC80211_DEBUGFS .add_sta_debugfs = minstrel_ht_add_sta_debugfs, .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs, #endif }; static void init_sample_table(void) { int col, i, new_idx; u8 rnd[MCS_GROUP_RATES]; memset(sample_table, 0xff, sizeof(sample_table)); for (col = 0; col < SAMPLE_COLUMNS; col++) { for (i = 0; i < MCS_GROUP_RATES; i++) { get_random_bytes(rnd, sizeof(rnd)); new_idx = (i + rnd[i]) % MCS_GROUP_RATES; while (sample_table[col][new_idx] != 0xff) new_idx = (new_idx + 1) % MCS_GROUP_RATES; sample_table[col][new_idx] = i; } } } int __init rc80211_minstrel_ht_init(void) { init_sample_table(); return ieee80211_rate_control_register(&mac80211_minstrel_ht); } void rc80211_minstrel_ht_exit(void) { ieee80211_rate_control_unregister(&mac80211_minstrel_ht); }
gpl-2.0
krystianpe/massive-ninja
drivers/isdn/pcbit/layer2.c
4837
14489
/* * PCBIT-D low-layer interface * * Copyright (C) 1996 Universidade de Lisboa * * Written by Pedro Roque Marques (roque@di.fc.ul.pt) * * This software may be used and distributed according to the terms of * the GNU General Public License, incorporated herein by reference. */ /* * 19991203 - Fernando Carvalho - takion@superbofh.org * Hacked to compile with egcs and run with current version of isdn modules */ /* * Based on documentation provided by Inesc: * - "Interface com bus do PC para o PCBIT e PCBIT-D", Inesc, Jan 93 */ /* * TODO: better handling of errors * re-write/remove debug printks */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/mm.h> #include <linux/skbuff.h> #include <linux/isdnif.h> #include <asm/system.h> #include <asm/io.h> #include "pcbit.h" #include "layer2.h" #include "edss1.h" #undef DEBUG_FRAG /* * Prototypes */ static void pcbit_transmit(struct pcbit_dev *dev); static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack); static void pcbit_l2_error(struct pcbit_dev *dev); static void pcbit_l2_active_conf(struct pcbit_dev *dev, u_char info); static void pcbit_l2_err_recover(unsigned long data); static void pcbit_firmware_bug(struct pcbit_dev *dev); static __inline__ void pcbit_sched_delivery(struct pcbit_dev *dev) { schedule_work(&dev->qdelivery); } /* * Called from layer3 */ int pcbit_l2_write(struct pcbit_dev *dev, ulong msg, ushort refnum, struct sk_buff *skb, unsigned short hdr_len) { struct frame_buf *frame, *ptr; unsigned long flags; if (dev->l2_state != L2_RUNNING && dev->l2_state != L2_LOADING) { dev_kfree_skb(skb); return -1; } if ((frame = kmalloc(sizeof(struct frame_buf), GFP_ATOMIC)) == NULL) { printk(KERN_WARNING "pcbit_2_write: kmalloc failed\n"); dev_kfree_skb(skb); return -1; } frame->msg = msg; frame->refnum = refnum; frame->copied = 0; frame->hdr_len = hdr_len; if (skb) frame->dt_len = skb->len - hdr_len; else frame->dt_len = 0; frame->skb = skb; frame->next = NULL; spin_lock_irqsave(&dev->lock, flags); if (dev->write_queue == NULL) { dev->write_queue = frame; spin_unlock_irqrestore(&dev->lock, flags); pcbit_transmit(dev); } else { for (ptr = dev->write_queue; ptr->next; ptr = ptr->next); ptr->next = frame; spin_unlock_irqrestore(&dev->lock, flags); } return 0; } static __inline__ void pcbit_tx_update(struct pcbit_dev *dev, ushort len) { u_char info; dev->send_seq = (dev->send_seq + 1) % 8; dev->fsize[dev->send_seq] = len; info = 0; info |= dev->rcv_seq << 3; info |= dev->send_seq; writeb(info, dev->sh_mem + BANK4); } /* * called by interrupt service routine or by write_2 */ static void pcbit_transmit(struct pcbit_dev *dev) { struct frame_buf *frame = NULL; unsigned char unacked; int flen; /* fragment frame length including all headers */ int free; int count, cp_len; unsigned long flags; unsigned short tt; if (dev->l2_state != L2_RUNNING && dev->l2_state != L2_LOADING) return; unacked = (dev->send_seq + (8 - dev->unack_seq)) & 0x07; spin_lock_irqsave(&dev->lock, flags); if (dev->free > 16 && dev->write_queue && unacked < 7) { if (!dev->w_busy) dev->w_busy = 1; else { spin_unlock_irqrestore(&dev->lock, flags); return; } frame = dev->write_queue; free = dev->free; spin_unlock_irqrestore(&dev->lock, flags); if (frame->copied == 0) { /* Type 0 frame */ ulong msg; if (frame->skb) flen = FRAME_HDR_LEN + PREHDR_LEN + frame->skb->len; else flen = FRAME_HDR_LEN + PREHDR_LEN; if (flen > free) flen = free; msg = frame->msg; /* * Board level 2 header */ pcbit_writew(dev, flen - FRAME_HDR_LEN); pcbit_writeb(dev, GET_MSG_CPU(msg)); pcbit_writeb(dev, GET_MSG_PROC(msg)); /* TH */ pcbit_writew(dev, frame->hdr_len + PREHDR_LEN); /* TD */ pcbit_writew(dev, frame->dt_len); /* * Board level 3 fixed-header */ /* LEN = TH */ pcbit_writew(dev, frame->hdr_len + PREHDR_LEN); /* XX */ pcbit_writew(dev, 0); /* C + S */ pcbit_writeb(dev, GET_MSG_CMD(msg)); pcbit_writeb(dev, GET_MSG_SCMD(msg)); /* NUM */ pcbit_writew(dev, frame->refnum); count = FRAME_HDR_LEN + PREHDR_LEN; } else { /* Type 1 frame */ flen = 2 + (frame->skb->len - frame->copied); if (flen > free) flen = free; /* TT */ tt = ((ushort) (flen - 2)) | 0x8000U; /* Type 1 */ pcbit_writew(dev, tt); count = 2; } if (frame->skb) { cp_len = frame->skb->len - frame->copied; if (cp_len > flen - count) cp_len = flen - count; memcpy_topcbit(dev, frame->skb->data + frame->copied, cp_len); frame->copied += cp_len; } /* bookkeeping */ dev->free -= flen; pcbit_tx_update(dev, flen); spin_lock_irqsave(&dev->lock, flags); if (frame->skb == NULL || frame->copied == frame->skb->len) { dev->write_queue = frame->next; if (frame->skb != NULL) { /* free frame */ dev_kfree_skb(frame->skb); } kfree(frame); } dev->w_busy = 0; spin_unlock_irqrestore(&dev->lock, flags); } else { spin_unlock_irqrestore(&dev->lock, flags); #ifdef DEBUG printk(KERN_DEBUG "unacked %d free %d write_queue %s\n", unacked, dev->free, dev->write_queue ? "not empty" : "empty"); #endif } } /* * deliver a queued frame to the upper layer */ void pcbit_deliver(struct work_struct *work) { struct frame_buf *frame; unsigned long flags, msg; struct pcbit_dev *dev = container_of(work, struct pcbit_dev, qdelivery); spin_lock_irqsave(&dev->lock, flags); while ((frame = dev->read_queue)) { dev->read_queue = frame->next; spin_unlock_irqrestore(&dev->lock, flags); msg = 0; SET_MSG_CPU(msg, 0); SET_MSG_PROC(msg, 0); SET_MSG_CMD(msg, frame->skb->data[2]); SET_MSG_SCMD(msg, frame->skb->data[3]); frame->refnum = *((ushort *) frame->skb->data + 4); frame->msg = *((ulong *) & msg); skb_pull(frame->skb, 6); pcbit_l3_receive(dev, frame->msg, frame->skb, frame->hdr_len, frame->refnum); kfree(frame); spin_lock_irqsave(&dev->lock, flags); } spin_unlock_irqrestore(&dev->lock, flags); } /* * Reads BANK 2 & Reassembles */ static void pcbit_receive(struct pcbit_dev *dev) { unsigned short tt; u_char cpu, proc; struct frame_buf *frame = NULL; unsigned long flags; u_char type1; if (dev->l2_state != L2_RUNNING && dev->l2_state != L2_LOADING) return; tt = pcbit_readw(dev); if ((tt & 0x7fffU) > 511) { printk(KERN_INFO "pcbit: invalid frame length -> TT=%04x\n", tt); pcbit_l2_error(dev); return; } if (!(tt & 0x8000U)) { /* Type 0 */ type1 = 0; if (dev->read_frame) { printk(KERN_DEBUG "pcbit_receive: Type 0 frame and read_frame != NULL\n"); /* discard previous queued frame */ kfree_skb(dev->read_frame->skb); kfree(dev->read_frame); dev->read_frame = NULL; } frame = kzalloc(sizeof(struct frame_buf), GFP_ATOMIC); if (frame == NULL) { printk(KERN_WARNING "kmalloc failed\n"); return; } cpu = pcbit_readb(dev); proc = pcbit_readb(dev); if (cpu != 0x06 && cpu != 0x02) { printk(KERN_DEBUG "pcbit: invalid cpu value\n"); kfree(frame); pcbit_l2_error(dev); return; } /* * we discard cpu & proc on receiving * but we read it to update the pointer */ frame->hdr_len = pcbit_readw(dev); frame->dt_len = pcbit_readw(dev); /* * 0 sized packet * I don't know if they are an error or not... * But they are very frequent * Not documented */ if (frame->hdr_len == 0) { kfree(frame); #ifdef DEBUG printk(KERN_DEBUG "0 sized frame\n"); #endif pcbit_firmware_bug(dev); return; } /* sanity check the length values */ if (frame->hdr_len > 1024 || frame->dt_len > 2048) { #ifdef DEBUG printk(KERN_DEBUG "length problem: "); printk(KERN_DEBUG "TH=%04x TD=%04x\n", frame->hdr_len, frame->dt_len); #endif pcbit_l2_error(dev); kfree(frame); return; } /* minimum frame read */ frame->skb = dev_alloc_skb(frame->hdr_len + frame->dt_len + ((frame->hdr_len + 15) & ~15)); if (!frame->skb) { printk(KERN_DEBUG "pcbit_receive: out of memory\n"); kfree(frame); return; } /* 16 byte alignment for IP */ if (frame->dt_len) skb_reserve(frame->skb, (frame->hdr_len + 15) & ~15); } else { /* Type 1 */ type1 = 1; tt &= 0x7fffU; if (!(frame = dev->read_frame)) { printk("Type 1 frame and no frame queued\n"); /* usually after an error: toss frame */ dev->readptr += tt; if (dev->readptr > dev->sh_mem + BANK2 + BANKLEN) dev->readptr -= BANKLEN; return; } } memcpy_frompcbit(dev, skb_put(frame->skb, tt), tt); frame->copied += tt; spin_lock_irqsave(&dev->lock, flags); if (frame->copied == frame->hdr_len + frame->dt_len) { if (type1) { dev->read_frame = NULL; } if (dev->read_queue) { struct frame_buf *ptr; for (ptr = dev->read_queue; ptr->next; ptr = ptr->next); ptr->next = frame; } else dev->read_queue = frame; } else { dev->read_frame = frame; } spin_unlock_irqrestore(&dev->lock, flags); } /* * The board sends 0 sized frames * They are TDATA_CONFs that get messed up somehow * gotta send a fake acknowledgment to the upper layer somehow */ static __inline__ void pcbit_fake_conf(struct pcbit_dev *dev, struct pcbit_chan *chan) { isdn_ctrl ictl; if (chan->queued) { chan->queued--; ictl.driver = dev->id; ictl.command = ISDN_STAT_BSENT; ictl.arg = chan->id; dev->dev_if->statcallb(&ictl); } } static void pcbit_firmware_bug(struct pcbit_dev *dev) { struct pcbit_chan *chan; chan = dev->b1; if (chan->fsm_state == ST_ACTIVE) { pcbit_fake_conf(dev, chan); } chan = dev->b2; if (chan->fsm_state == ST_ACTIVE) { pcbit_fake_conf(dev, chan); } } irqreturn_t pcbit_irq_handler(int interrupt, void *devptr) { struct pcbit_dev *dev; u_char info, ack_seq, read_seq; dev = (struct pcbit_dev *) devptr; if (!dev) { printk(KERN_WARNING "pcbit_irq_handler: wrong device\n"); return IRQ_NONE; } if (dev->interrupt) { printk(KERN_DEBUG "pcbit: reentering interrupt hander\n"); return IRQ_HANDLED; } dev->interrupt = 1; info = readb(dev->sh_mem + BANK3); if (dev->l2_state == L2_STARTING || dev->l2_state == L2_ERROR) { pcbit_l2_active_conf(dev, info); dev->interrupt = 0; return IRQ_HANDLED; } if (info & 0x40U) { /* E bit set */ #ifdef DEBUG printk(KERN_DEBUG "pcbit_irq_handler: E bit on\n"); #endif pcbit_l2_error(dev); dev->interrupt = 0; return IRQ_HANDLED; } if (dev->l2_state != L2_RUNNING && dev->l2_state != L2_LOADING) { dev->interrupt = 0; return IRQ_HANDLED; } ack_seq = (info >> 3) & 0x07U; read_seq = (info & 0x07U); dev->interrupt = 0; if (read_seq != dev->rcv_seq) { while (read_seq != dev->rcv_seq) { pcbit_receive(dev); dev->rcv_seq = (dev->rcv_seq + 1) % 8; } pcbit_sched_delivery(dev); } if (ack_seq != dev->unack_seq) { pcbit_recv_ack(dev, ack_seq); } info = dev->rcv_seq << 3; info |= dev->send_seq; writeb(info, dev->sh_mem + BANK4); return IRQ_HANDLED; } static void pcbit_l2_active_conf(struct pcbit_dev *dev, u_char info) { u_char state; state = dev->l2_state; #ifdef DEBUG printk(KERN_DEBUG "layer2_active_confirm\n"); #endif if (info & 0x80U) { dev->rcv_seq = info & 0x07U; dev->l2_state = L2_RUNNING; } else dev->l2_state = L2_DOWN; if (state == L2_STARTING) wake_up_interruptible(&dev->set_running_wq); if (state == L2_ERROR && dev->l2_state == L2_RUNNING) { pcbit_transmit(dev); } } static void pcbit_l2_err_recover(unsigned long data) { struct pcbit_dev *dev; struct frame_buf *frame; dev = (struct pcbit_dev *) data; del_timer(&dev->error_recover_timer); if (dev->w_busy || dev->r_busy) { init_timer(&dev->error_recover_timer); dev->error_recover_timer.expires = jiffies + ERRTIME; add_timer(&dev->error_recover_timer); return; } dev->w_busy = dev->r_busy = 1; if (dev->read_frame) { kfree_skb(dev->read_frame->skb); kfree(dev->read_frame); dev->read_frame = NULL; } if (dev->write_queue) { frame = dev->write_queue; #ifdef FREE_ON_ERROR dev->write_queue = dev->write_queue->next; if (frame->skb) { dev_kfree_skb(frame->skb); } kfree(frame); #else frame->copied = 0; #endif } dev->rcv_seq = dev->send_seq = dev->unack_seq = 0; dev->free = 511; dev->l2_state = L2_ERROR; /* this is an hack... */ pcbit_firmware_bug(dev); dev->writeptr = dev->sh_mem; dev->readptr = dev->sh_mem + BANK2; writeb((0x80U | ((dev->rcv_seq & 0x07) << 3) | (dev->send_seq & 0x07)), dev->sh_mem + BANK4); dev->w_busy = dev->r_busy = 0; } static void pcbit_l2_error(struct pcbit_dev *dev) { if (dev->l2_state == L2_RUNNING) { printk(KERN_INFO "pcbit: layer 2 error\n"); #ifdef DEBUG log_state(dev); #endif dev->l2_state = L2_DOWN; init_timer(&dev->error_recover_timer); dev->error_recover_timer.function = &pcbit_l2_err_recover; dev->error_recover_timer.data = (ulong) dev; dev->error_recover_timer.expires = jiffies + ERRTIME; add_timer(&dev->error_recover_timer); } } /* * Description: * if board acks frames * update dev->free * call pcbit_transmit to write possible queued frames */ static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack) { int i, count; int unacked; unacked = (dev->send_seq + (8 - dev->unack_seq)) & 0x07; /* dev->unack_seq < ack <= dev->send_seq; */ if (unacked) { if (dev->send_seq > dev->unack_seq) { if (ack <= dev->unack_seq || ack > dev->send_seq) { printk(KERN_DEBUG "layer 2 ack unacceptable - dev %d", dev->id); pcbit_l2_error(dev); } else if (ack > dev->send_seq && ack <= dev->unack_seq) { printk(KERN_DEBUG "layer 2 ack unacceptable - dev %d", dev->id); pcbit_l2_error(dev); } } /* ack is acceptable */ i = dev->unack_seq; do { dev->unack_seq = i = (i + 1) % 8; dev->free += dev->fsize[i]; } while (i != ack); count = 0; while (count < 7 && dev->write_queue) { u8 lsend_seq = dev->send_seq; pcbit_transmit(dev); if (dev->send_seq == lsend_seq) break; count++; } } else printk(KERN_DEBUG "recv_ack: unacked = 0\n"); }
gpl-2.0
yun3195/android_kernel_ZTE_Z5S
net/netfilter/xt_mac.c
8677
1791
/* Kernel module to match MAC address parameters. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/etherdevice.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/xt_mac.h> #include <linux/netfilter/x_tables.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("Xtables: MAC address match"); MODULE_ALIAS("ipt_mac"); MODULE_ALIAS("ip6t_mac"); static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_mac_info *info = par->matchinfo; bool ret; if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER) return false; if (skb_mac_header(skb) < skb->head) return false; if (skb_mac_header(skb) + ETH_HLEN > skb->data) return false; ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0; ret ^= info->invert; return ret; } static struct xt_match mac_mt_reg __read_mostly = { .name = "mac", .revision = 0, .family = NFPROTO_UNSPEC, .match = mac_mt, .matchsize = sizeof(struct xt_mac_info), .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), .me = THIS_MODULE, }; static int __init mac_mt_init(void) { return xt_register_match(&mac_mt_reg); } static void __exit mac_mt_exit(void) { xt_unregister_match(&mac_mt_reg); } module_init(mac_mt_init); module_exit(mac_mt_exit);
gpl-2.0
gabrielleLQX/arm-none-eabi_install
net/netfilter/xt_mac.c
8677
1791
/* Kernel module to match MAC address parameters. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/etherdevice.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/xt_mac.h> #include <linux/netfilter/x_tables.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("Xtables: MAC address match"); MODULE_ALIAS("ipt_mac"); MODULE_ALIAS("ip6t_mac"); static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_mac_info *info = par->matchinfo; bool ret; if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER) return false; if (skb_mac_header(skb) < skb->head) return false; if (skb_mac_header(skb) + ETH_HLEN > skb->data) return false; ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0; ret ^= info->invert; return ret; } static struct xt_match mac_mt_reg __read_mostly = { .name = "mac", .revision = 0, .family = NFPROTO_UNSPEC, .match = mac_mt, .matchsize = sizeof(struct xt_mac_info), .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), .me = THIS_MODULE, }; static int __init mac_mt_init(void) { return xt_register_match(&mac_mt_reg); } static void __exit mac_mt_exit(void) { xt_unregister_match(&mac_mt_reg); } module_init(mac_mt_init); module_exit(mac_mt_exit);
gpl-2.0
crpalmer/android_kernel_huawei_kiwi
drivers/net/arcnet/com90io.c
9701
11127
/* * Linux ARCnet driver - COM90xx chipset (IO-mapped buffers) * * Written 1997 by David Woodhouse. * Written 1994-1999 by Avery Pennarun. * Written 1999-2000 by Martin Mares <mj@ucw.cz>. * Derived from skeleton.c by Donald Becker. * * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) * for sponsoring the further development of this driver. * * ********************** * * The original copyright of skeleton.c was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * * For more details, see drivers/net/arcnet.c * * ********************** */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/bootmem.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/io.h> #include <linux/arcdevice.h> #define VERSION "arcnet: COM90xx IO-mapped mode support (by David Woodhouse et el.)\n" /* Internal function declarations */ static int com90io_found(struct net_device *dev); static void com90io_command(struct net_device *dev, int command); static int com90io_status(struct net_device *dev); static void com90io_setmask(struct net_device *dev, int mask); static int com90io_reset(struct net_device *dev, int really_reset); static void com90io_copy_to_card(struct net_device *dev, int bufnum, int offset, void *buf, int count); static void com90io_copy_from_card(struct net_device *dev, int bufnum, int offset, void *buf, int count); /* Handy defines for ARCnet specific stuff */ /* The number of low I/O ports used by the card. */ #define ARCNET_TOTAL_SIZE 16 /* COM 9026 controller chip --> ARCnet register addresses */ #define _INTMASK (ioaddr+0) /* writable */ #define _STATUS (ioaddr+0) /* readable */ #define _COMMAND (ioaddr+1) /* writable, returns random vals on read (?) */ #define _RESET (ioaddr+8) /* software reset (on read) */ #define _MEMDATA (ioaddr+12) /* Data port for IO-mapped memory */ #define _ADDR_HI (ioaddr+15) /* Control registers for said */ #define _ADDR_LO (ioaddr+14) #define _CONFIG (ioaddr+2) /* Configuration register */ #undef ASTATUS #undef ACOMMAND #undef AINTMASK #define ASTATUS() inb(_STATUS) #define ACOMMAND(cmd) outb((cmd),_COMMAND) #define AINTMASK(msk) outb((msk),_INTMASK) #define SETCONF() outb((lp->config),_CONFIG) /**************************************************************************** * * * IO-mapped operation routines * * * ****************************************************************************/ #undef ONE_AT_A_TIME_TX #undef ONE_AT_A_TIME_RX static u_char get_buffer_byte(struct net_device *dev, unsigned offset) { int ioaddr = dev->base_addr; outb(offset >> 8, _ADDR_HI); outb(offset & 0xff, _ADDR_LO); return inb(_MEMDATA); } #ifdef ONE_AT_A_TIME_TX static void put_buffer_byte(struct net_device *dev, unsigned offset, u_char datum) { int ioaddr = dev->base_addr; outb(offset >> 8, _ADDR_HI); outb(offset & 0xff, _ADDR_LO); outb(datum, _MEMDATA); } #endif static void get_whole_buffer(struct net_device *dev, unsigned offset, unsigned length, char *dest) { int ioaddr = dev->base_addr; outb((offset >> 8) | AUTOINCflag, _ADDR_HI); outb(offset & 0xff, _ADDR_LO); while (length--) #ifdef ONE_AT_A_TIME_RX *(dest++) = get_buffer_byte(dev, offset++); #else *(dest++) = inb(_MEMDATA); #endif } static void put_whole_buffer(struct net_device *dev, unsigned offset, unsigned length, char *dest) { int ioaddr = dev->base_addr; outb((offset >> 8) | AUTOINCflag, _ADDR_HI); outb(offset & 0xff, _ADDR_LO); while (length--) #ifdef ONE_AT_A_TIME_TX put_buffer_byte(dev, offset++, *(dest++)); #else outb(*(dest++), _MEMDATA); #endif } /* * We cannot probe for an IO mapped card either, although we can check that * it's where we were told it was, and even autoirq */ static int __init com90io_probe(struct net_device *dev) { int ioaddr = dev->base_addr, status; unsigned long airqmask; BUGLVL(D_NORMAL) printk(VERSION); BUGLVL(D_NORMAL) printk("E-mail me if you actually test this driver, please!\n"); if (!ioaddr) { BUGMSG(D_NORMAL, "No autoprobe for IO mapped cards; you " "must specify the base address!\n"); return -ENODEV; } if (!request_region(ioaddr, ARCNET_TOTAL_SIZE, "com90io probe")) { BUGMSG(D_INIT_REASONS, "IO request_region %x-%x failed.\n", ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1); return -ENXIO; } if (ASTATUS() == 0xFF) { BUGMSG(D_INIT_REASONS, "IO address %x empty\n", ioaddr); goto err_out; } inb(_RESET); mdelay(RESETtime); status = ASTATUS(); if ((status & 0x9D) != (NORXflag | RECONflag | TXFREEflag | RESETflag)) { BUGMSG(D_INIT_REASONS, "Status invalid (%Xh).\n", status); goto err_out; } BUGMSG(D_INIT_REASONS, "Status after reset: %X\n", status); ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear); BUGMSG(D_INIT_REASONS, "Status after reset acknowledged: %X\n", status); status = ASTATUS(); if (status & RESETflag) { BUGMSG(D_INIT_REASONS, "Eternal reset (status=%Xh)\n", status); goto err_out; } outb((0x16 | IOMAPflag) & ~ENABLE16flag, _CONFIG); /* Read first loc'n of memory */ outb(AUTOINCflag, _ADDR_HI); outb(0, _ADDR_LO); if ((status = inb(_MEMDATA)) != 0xd1) { BUGMSG(D_INIT_REASONS, "Signature byte not found" " (%Xh instead).\n", status); goto err_out; } if (!dev->irq) { /* * if we do this, we're sure to get an IRQ since the * card has just reset and the NORXflag is on until * we tell it to start receiving. */ airqmask = probe_irq_on(); outb(NORXflag, _INTMASK); udelay(1); outb(0, _INTMASK); dev->irq = probe_irq_off(airqmask); if ((int)dev->irq <= 0) { BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed\n"); goto err_out; } } release_region(ioaddr, ARCNET_TOTAL_SIZE); /* end of probing */ return com90io_found(dev); err_out: release_region(ioaddr, ARCNET_TOTAL_SIZE); return -ENODEV; } /* Set up the struct net_device associated with this card. Called after * probing succeeds. */ static int __init com90io_found(struct net_device *dev) { struct arcnet_local *lp; int ioaddr = dev->base_addr; int err; /* Reserve the irq */ if (request_irq(dev->irq, arcnet_interrupt, 0, "arcnet (COM90xx-IO)", dev)) { BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); return -ENODEV; } /* Reserve the I/O region */ if (!request_region(dev->base_addr, ARCNET_TOTAL_SIZE, "arcnet (COM90xx-IO)")) { free_irq(dev->irq, dev); return -EBUSY; } lp = netdev_priv(dev); lp->card_name = "COM90xx I/O"; lp->hw.command = com90io_command; lp->hw.status = com90io_status; lp->hw.intmask = com90io_setmask; lp->hw.reset = com90io_reset; lp->hw.owner = THIS_MODULE; lp->hw.copy_to_card = com90io_copy_to_card; lp->hw.copy_from_card = com90io_copy_from_card; lp->config = (0x16 | IOMAPflag) & ~ENABLE16flag; SETCONF(); /* get and check the station ID from offset 1 in shmem */ dev->dev_addr[0] = get_buffer_byte(dev, 1); err = register_netdev(dev); if (err) { outb((inb(_CONFIG) & ~IOMAPflag), _CONFIG); free_irq(dev->irq, dev); release_region(dev->base_addr, ARCNET_TOTAL_SIZE); return err; } BUGMSG(D_NORMAL, "COM90IO: station %02Xh found at %03lXh, IRQ %d.\n", dev->dev_addr[0], dev->base_addr, dev->irq); return 0; } /* * Do a hardware reset on the card, and set up necessary registers. * * This should be called as little as possible, because it disrupts the * token on the network (causes a RECON) and requires a significant delay. * * However, it does make sure the card is in a defined state. */ static int com90io_reset(struct net_device *dev, int really_reset) { struct arcnet_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS()); if (really_reset) { /* reset the card */ inb(_RESET); mdelay(RESETtime); } /* Set the thing to IO-mapped, 8-bit mode */ lp->config = (0x1C | IOMAPflag) & ~ENABLE16flag; SETCONF(); ACOMMAND(CFLAGScmd | RESETclear); /* clear flags & end reset */ ACOMMAND(CFLAGScmd | CONFIGclear); /* verify that the ARCnet signature byte is present */ if (get_buffer_byte(dev, 0) != TESTvalue) { BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n"); return 1; } /* enable extended (512-byte) packets */ ACOMMAND(CONFIGcmd | EXTconf); /* done! return success. */ return 0; } static void com90io_command(struct net_device *dev, int cmd) { short ioaddr = dev->base_addr; ACOMMAND(cmd); } static int com90io_status(struct net_device *dev) { short ioaddr = dev->base_addr; return ASTATUS(); } static void com90io_setmask(struct net_device *dev, int mask) { short ioaddr = dev->base_addr; AINTMASK(mask); } static void com90io_copy_to_card(struct net_device *dev, int bufnum, int offset, void *buf, int count) { TIME("put_whole_buffer", count, put_whole_buffer(dev, bufnum * 512 + offset, count, buf)); } static void com90io_copy_from_card(struct net_device *dev, int bufnum, int offset, void *buf, int count) { TIME("get_whole_buffer", count, get_whole_buffer(dev, bufnum * 512 + offset, count, buf)); } static int io; /* use the insmod io= irq= shmem= options */ static int irq; static char device[9]; /* use eg. device=arc1 to change name */ module_param(io, int, 0); module_param(irq, int, 0); module_param_string(device, device, sizeof(device), 0); MODULE_LICENSE("GPL"); #ifndef MODULE static int __init com90io_setup(char *s) { int ints[4]; s = get_options(s, 4, ints); if (!ints[0]) return 0; switch (ints[0]) { default: /* ERROR */ printk("com90io: Too many arguments.\n"); case 2: /* IRQ */ irq = ints[2]; case 1: /* IO address */ io = ints[1]; } if (*s) snprintf(device, sizeof(device), "%s", s); return 1; } __setup("com90io=", com90io_setup); #endif static struct net_device *my_dev; static int __init com90io_init(void) { struct net_device *dev; int err; dev = alloc_arcdev(device); if (!dev) return -ENOMEM; dev->base_addr = io; dev->irq = irq; if (dev->irq == 2) dev->irq = 9; err = com90io_probe(dev); if (err) { free_netdev(dev); return err; } my_dev = dev; return 0; } static void __exit com90io_exit(void) { struct net_device *dev = my_dev; int ioaddr = dev->base_addr; unregister_netdev(dev); /* Set the thing back to MMAP mode, in case the old driver is loaded later */ outb((inb(_CONFIG) & ~IOMAPflag), _CONFIG); free_irq(dev->irq, dev); release_region(dev->base_addr, ARCNET_TOTAL_SIZE); free_netdev(dev); } module_init(com90io_init) module_exit(com90io_exit)
gpl-2.0
lyapota/s7e_marshmallow
fs/ceph/addr.c
230
36484
#include <linux/ceph/ceph_debug.h> #include <linux/backing-dev.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/writeback.h> /* generic_writepages */ #include <linux/slab.h> #include <linux/pagevec.h> #include <linux/task_io_accounting_ops.h> #include "super.h" #include "mds_client.h" #include "cache.h" #include <linux/ceph/osd_client.h> /* * Ceph address space ops. * * There are a few funny things going on here. * * The page->private field is used to reference a struct * ceph_snap_context for _every_ dirty page. This indicates which * snapshot the page was logically dirtied in, and thus which snap * context needs to be associated with the osd write during writeback. * * Similarly, struct ceph_inode_info maintains a set of counters to * count dirty pages on the inode. In the absence of snapshots, * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. * * When a snapshot is taken (that is, when the client receives * notification that a snapshot was taken), each inode with caps and * with dirty pages (dirty pages implies there is a cap) gets a new * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending * order, new snaps go to the tail). The i_wrbuffer_ref_head count is * moved to capsnap->dirty. (Unless a sync write is currently in * progress. In that case, the capsnap is said to be "pending", new * writes cannot start, and the capsnap isn't "finalized" until the * write completes (or fails) and a final size/mtime for the inode for * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. * * On writeback, we must submit writes to the osd IN SNAP ORDER. So, * we look for the first capsnap in i_cap_snaps and write out pages in * that snap context _only_. Then we move on to the next capsnap, * eventually reaching the "live" or "head" context (i.e., pages that * are not yet snapped) and are writing the most recently dirtied * pages. * * Invalidate and so forth must take care to ensure the dirty page * accounting is preserved. */ #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) #define CONGESTION_OFF_THRESH(congestion_kb) \ (CONGESTION_ON_THRESH(congestion_kb) - \ (CONGESTION_ON_THRESH(congestion_kb) >> 2)) static inline struct ceph_snap_context *page_snap_context(struct page *page) { if (PagePrivate(page)) return (void *)page->private; return NULL; } /* * Dirty a page. Optimistically adjust accounting, on the assumption * that we won't race with invalidate. If we do, readjust. */ static int ceph_set_page_dirty(struct page *page) { struct address_space *mapping = page->mapping; struct inode *inode; struct ceph_inode_info *ci; struct ceph_snap_context *snapc; int ret; if (unlikely(!mapping)) return !TestSetPageDirty(page); if (PageDirty(page)) { dout("%p set_page_dirty %p idx %lu -- already dirty\n", mapping->host, page, page->index); BUG_ON(!PagePrivate(page)); return 0; } inode = mapping->host; ci = ceph_inode(inode); /* * Note that we're grabbing a snapc ref here without holding * any locks! */ snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); /* dirty the head */ spin_lock(&ci->i_ceph_lock); if (ci->i_head_snapc == NULL) ci->i_head_snapc = ceph_get_snap_context(snapc); ++ci->i_wrbuffer_ref_head; if (ci->i_wrbuffer_ref == 0) ihold(inode); ++ci->i_wrbuffer_ref; dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " "snapc %p seq %lld (%d snaps)\n", mapping->host, page, page->index, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, snapc, snapc->seq, snapc->num_snaps); spin_unlock(&ci->i_ceph_lock); /* * Reference snap context in page->private. Also set * PagePrivate so that we get invalidatepage callback. */ BUG_ON(PagePrivate(page)); page->private = (unsigned long)snapc; SetPagePrivate(page); ret = __set_page_dirty_nobuffers(page); WARN_ON(!PageLocked(page)); WARN_ON(!page->mapping); return ret; } /* * If we are truncating the full page (i.e. offset == 0), adjust the * dirty page counters appropriately. Only called if there is private * data on the page. */ static void ceph_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { struct inode *inode; struct ceph_inode_info *ci; struct ceph_snap_context *snapc = page_snap_context(page); inode = page->mapping->host; ci = ceph_inode(inode); if (offset != 0 || length != PAGE_CACHE_SIZE) { dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", inode, page, page->index, offset, length); return; } ceph_invalidate_fscache_page(inode, page); if (!PagePrivate(page)) return; /* * We can get non-dirty pages here due to races between * set_page_dirty and truncate_complete_page; just spit out a * warning, in case we end up with accounting problems later. */ if (!PageDirty(page)) pr_err("%p invalidatepage %p page not dirty\n", inode, page); ClearPageChecked(page); dout("%p invalidatepage %p idx %lu full dirty page\n", inode, page, page->index); ceph_put_wrbuffer_cap_refs(ci, 1, snapc); ceph_put_snap_context(snapc); page->private = 0; ClearPagePrivate(page); } static int ceph_releasepage(struct page *page, gfp_t g) { struct inode *inode = page->mapping ? page->mapping->host : NULL; dout("%p releasepage %p idx %lu\n", inode, page, page->index); WARN_ON(PageDirty(page)); /* Can we release the page from the cache? */ if (!ceph_release_fscache_page(page, g)) return 0; return !PagePrivate(page); } /* * read a single page, without unlocking it. */ static int readpage_nounlock(struct file *filp, struct page *page) { struct inode *inode = file_inode(filp); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->client->osdc; int err = 0; u64 len = PAGE_CACHE_SIZE; err = ceph_readpage_from_fscache(inode, page); if (err == 0) goto out; dout("readpage inode %p file %p page %p index %lu\n", inode, filp, page, page->index); err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, (u64) page_offset(page), &len, ci->i_truncate_seq, ci->i_truncate_size, &page, 1, 0); if (err == -ENOENT) err = 0; if (err < 0) { SetPageError(page); ceph_fscache_readpage_cancel(inode, page); goto out; } if (err < PAGE_CACHE_SIZE) /* zero fill remainder of page */ zero_user_segment(page, err, PAGE_CACHE_SIZE); else flush_dcache_page(page); SetPageUptodate(page); ceph_readpage_to_fscache(inode, page); out: return err < 0 ? err : 0; } static int ceph_readpage(struct file *filp, struct page *page) { int r = readpage_nounlock(filp, page); unlock_page(page); return r; } /* * Finish an async read(ahead) op. */ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) { struct inode *inode = req->r_inode; struct ceph_osd_data *osd_data; int rc = req->r_result; int bytes = le32_to_cpu(msg->hdr.data_len); int num_pages; int i; dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); /* unlock all pages, zeroing any data we didn't read */ osd_data = osd_req_op_extent_osd_data(req, 0); BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); num_pages = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); for (i = 0; i < num_pages; i++) { struct page *page = osd_data->pages[i]; if (rc < 0) goto unlock; if (bytes < (int)PAGE_CACHE_SIZE) { /* zero (remainder of) page */ int s = bytes < 0 ? 0 : bytes; zero_user_segment(page, s, PAGE_CACHE_SIZE); } dout("finish_read %p uptodate %p idx %lu\n", inode, page, page->index); flush_dcache_page(page); SetPageUptodate(page); ceph_readpage_to_fscache(inode, page); unlock: unlock_page(page); page_cache_release(page); bytes -= PAGE_CACHE_SIZE; } kfree(osd_data->pages); } static void ceph_unlock_page_vector(struct page **pages, int num_pages) { int i; for (i = 0; i < num_pages; i++) unlock_page(pages[i]); } /* * start an async read(ahead) operation. return nr_pages we submitted * a read for on success, or negative error code. */ static int start_read(struct inode *inode, struct list_head *page_list, int max) { struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->client->osdc; struct ceph_inode_info *ci = ceph_inode(inode); struct page *page = list_entry(page_list->prev, struct page, lru); struct ceph_vino vino; struct ceph_osd_request *req; u64 off; u64 len; int i; struct page **pages; pgoff_t next_index; int nr_pages = 0; int ret; off = (u64) page_offset(page); /* count pages */ next_index = page->index; list_for_each_entry_reverse(page, page_list, lru) { if (page->index != next_index) break; nr_pages++; next_index++; if (max && nr_pages == max) break; } len = nr_pages << PAGE_CACHE_SHIFT; dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, off, len); vino = ceph_vino(inode); req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 1, CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq, ci->i_truncate_size, false); if (IS_ERR(req)) return PTR_ERR(req); /* build page vector */ nr_pages = calc_pages_for(0, len); pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); ret = -ENOMEM; if (!pages) goto out; for (i = 0; i < nr_pages; ++i) { page = list_entry(page_list->prev, struct page, lru); BUG_ON(PageLocked(page)); list_del(&page->lru); dout("start_read %p adding %p idx %lu\n", inode, page, page->index); if (add_to_page_cache_lru(page, &inode->i_data, page->index, GFP_NOFS)) { ceph_fscache_uncache_page(inode, page); page_cache_release(page); dout("start_read %p add_to_page_cache failed %p\n", inode, page); nr_pages = i; goto out_pages; } pages[i] = page; } osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); req->r_callback = finish_read; req->r_inode = inode; ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); ret = ceph_osdc_start_request(osdc, req, false); if (ret < 0) goto out_pages; ceph_osdc_put_request(req); return nr_pages; out_pages: ceph_unlock_page_vector(pages, nr_pages); ceph_release_page_vector(pages, nr_pages); out: ceph_osdc_put_request(req); return ret; } /* * Read multiple pages. Leave pages we don't read + unlock in page_list; * the caller (VM) cleans them up. */ static int ceph_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned nr_pages) { struct inode *inode = file_inode(file); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); int rc = 0; int max = 0; rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, &nr_pages); if (rc == 0) goto out; if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_SHIFT; dout("readpages %p file %p nr_pages %d max %d\n", inode, file, nr_pages, max); while (!list_empty(page_list)) { rc = start_read(inode, page_list, max); if (rc < 0) goto out; BUG_ON(rc == 0); } out: ceph_fscache_readpages_cancel(inode, page_list); dout("readpages %p file %p ret %d\n", inode, file, rc); return rc; } /* * Get ref for the oldest snapc for an inode with dirty data... that is, the * only snap context we are allowed to write back. */ static struct ceph_snap_context *get_oldest_context(struct inode *inode, u64 *snap_size) { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_snap_context *snapc = NULL; struct ceph_cap_snap *capsnap = NULL; spin_lock(&ci->i_ceph_lock); list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, capsnap->context, capsnap->dirty_pages); if (capsnap->dirty_pages) { snapc = ceph_get_snap_context(capsnap->context); if (snap_size) *snap_size = capsnap->size; break; } } if (!snapc && ci->i_wrbuffer_ref_head) { snapc = ceph_get_snap_context(ci->i_head_snapc); dout(" head snapc %p has %d dirty pages\n", snapc, ci->i_wrbuffer_ref_head); } spin_unlock(&ci->i_ceph_lock); return snapc; } /* * Write a single page, but leave the page locked. * * If we get a write error, set the page error bit, but still adjust the * dirty page accounting (i.e., page is no longer dirty). */ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) { struct inode *inode; struct ceph_inode_info *ci; struct ceph_fs_client *fsc; struct ceph_osd_client *osdc; struct ceph_snap_context *snapc, *oldest; loff_t page_off = page_offset(page); long writeback_stat; u64 truncate_size, snap_size = 0; u32 truncate_seq; int err = 0, len = PAGE_CACHE_SIZE; dout("writepage %p idx %lu\n", page, page->index); if (!page->mapping || !page->mapping->host) { dout("writepage %p - no mapping\n", page); return -EFAULT; } inode = page->mapping->host; ci = ceph_inode(inode); fsc = ceph_inode_to_client(inode); osdc = &fsc->client->osdc; /* verify this is a writeable snap context */ snapc = page_snap_context(page); if (snapc == NULL) { dout("writepage %p page %p not dirty?\n", inode, page); goto out; } oldest = get_oldest_context(inode, &snap_size); if (snapc->seq > oldest->seq) { dout("writepage %p page %p snapc %p not writeable - noop\n", inode, page, snapc); /* we should only noop if called by kswapd */ WARN_ON((current->flags & PF_MEMALLOC) == 0); ceph_put_snap_context(oldest); goto out; } ceph_put_snap_context(oldest); spin_lock(&ci->i_ceph_lock); truncate_seq = ci->i_truncate_seq; truncate_size = ci->i_truncate_size; if (!snap_size) snap_size = i_size_read(inode); spin_unlock(&ci->i_ceph_lock); /* is this a partial page at end of file? */ if (page_off >= snap_size) { dout("%p page eof %llu\n", page, snap_size); goto out; } if (snap_size < page_off + len) len = snap_size - page_off; dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", inode, page, page->index, page_off, len, snapc); writeback_stat = atomic_long_inc_return(&fsc->writeback_count); if (writeback_stat > CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); ceph_readpage_to_fscache(inode, page); set_page_writeback(page); err = ceph_osdc_writepages(osdc, ceph_vino(inode), &ci->i_layout, snapc, page_off, len, truncate_seq, truncate_size, &inode->i_mtime, &page, 1); if (err < 0) { dout("writepage setting page/mapping error %d %p\n", err, page); SetPageError(page); mapping_set_error(&inode->i_data, err); if (wbc) wbc->pages_skipped++; } else { dout("writepage cleaned page %p\n", page); err = 0; /* vfs expects us to return 0 */ } page->private = 0; ClearPagePrivate(page); end_page_writeback(page); ceph_put_wrbuffer_cap_refs(ci, 1, snapc); ceph_put_snap_context(snapc); /* page's reference */ out: return err; } static int ceph_writepage(struct page *page, struct writeback_control *wbc) { int err; struct inode *inode = page->mapping->host; BUG_ON(!inode); ihold(inode); err = writepage_nounlock(page, wbc); unlock_page(page); iput(inode); return err; } /* * lame release_pages helper. release_pages() isn't exported to * modules. */ static void ceph_release_pages(struct page **pages, int num) { struct pagevec pvec; int i; pagevec_init(&pvec, 0); for (i = 0; i < num; i++) { if (pagevec_add(&pvec, pages[i]) == 0) pagevec_release(&pvec); } pagevec_release(&pvec); } /* * async writeback completion handler. * * If we get an error, set the mapping error bit, but not the individual * page error bits. */ static void writepages_finish(struct ceph_osd_request *req, struct ceph_msg *msg) { struct inode *inode = req->r_inode; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_data *osd_data; unsigned wrote; struct page *page; int num_pages; int i; struct ceph_snap_context *snapc = req->r_snapc; struct address_space *mapping = inode->i_mapping; int rc = req->r_result; u64 bytes = req->r_ops[0].extent.length; struct ceph_fs_client *fsc = ceph_inode_to_client(inode); long writeback_stat; unsigned issued = ceph_caps_issued(ci); osd_data = osd_req_op_extent_osd_data(req, 0); BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); num_pages = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); if (rc >= 0) { /* * Assume we wrote the pages we originally sent. The * osd might reply with fewer pages if our writeback * raced with a truncation and was adjusted at the osd, * so don't believe the reply. */ wrote = num_pages; } else { wrote = 0; mapping_set_error(mapping, rc); } dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n", inode, rc, bytes, wrote); /* clean all pages */ for (i = 0; i < num_pages; i++) { page = osd_data->pages[i]; BUG_ON(!page); WARN_ON(!PageUptodate(page)); writeback_stat = atomic_long_dec_return(&fsc->writeback_count); if (writeback_stat < CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) clear_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); ceph_put_snap_context(page_snap_context(page)); page->private = 0; ClearPagePrivate(page); dout("unlocking %d %p\n", i, page); end_page_writeback(page); /* * We lost the cache cap, need to truncate the page before * it is unlocked, otherwise we'd truncate it later in the * page truncation thread, possibly losing some data that * raced its way in */ if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) generic_error_remove_page(inode->i_mapping, page); unlock_page(page); } dout("%p wrote+cleaned %d pages\n", inode, wrote); ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc); ceph_release_pages(osd_data->pages, num_pages); if (osd_data->pages_from_pool) mempool_free(osd_data->pages, ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); else kfree(osd_data->pages); ceph_osdc_put_request(req); } /* * initiate async writeback */ static int ceph_writepages_start(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_vino vino = ceph_vino(inode); pgoff_t index, start, end; int range_whole = 0; int should_loop = 1; pgoff_t max_pages = 0, max_pages_ever = 0; struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; struct pagevec pvec; int done = 0; int rc = 0; unsigned wsize = 1 << inode->i_blkbits; struct ceph_osd_request *req = NULL; int do_sync = 0; u64 truncate_size, snap_size; u32 truncate_seq; /* * Include a 'sync' in the OSD request if this is a data * integrity write (e.g., O_SYNC write or fsync()), or if our * cap is being revoked. */ if ((wbc->sync_mode == WB_SYNC_ALL) || ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER)) do_sync = 1; dout("writepages_start %p dosync=%d (mode=%s)\n", inode, do_sync, wbc->sync_mode == WB_SYNC_NONE ? "NONE" : (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) { pr_warn("writepage_start %p on forced umount\n", inode); return -EIO; /* we're in a forced umount, don't write! */ } if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) wsize = fsc->mount_options->wsize; if (wsize < PAGE_CACHE_SIZE) wsize = PAGE_CACHE_SIZE; max_pages_ever = wsize >> PAGE_CACHE_SHIFT; pagevec_init(&pvec, 0); /* where to start/end? */ if (wbc->range_cyclic) { start = mapping->writeback_index; /* Start from prev offset */ end = -1; dout(" cyclic, start at %lu\n", start); } else { start = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; should_loop = 0; dout(" not cyclic, %lu to %lu\n", start, end); } index = start; retry: /* find oldest snap context with dirty data */ ceph_put_snap_context(snapc); snap_size = 0; snapc = get_oldest_context(inode, &snap_size); if (!snapc) { /* hmm, why does writepages get called when there is no dirty data? */ dout(" no snap context with dirty data?\n"); goto out; } if (snap_size == 0) snap_size = i_size_read(inode); dout(" oldest snapc is %p seq %lld (%d snaps)\n", snapc, snapc->seq, snapc->num_snaps); spin_lock(&ci->i_ceph_lock); truncate_seq = ci->i_truncate_seq; truncate_size = ci->i_truncate_size; if (!snap_size) snap_size = i_size_read(inode); spin_unlock(&ci->i_ceph_lock); if (last_snapc && snapc != last_snapc) { /* if we switched to a newer snapc, restart our scan at the * start of the original file range. */ dout(" snapc differs from last pass, restarting at %lu\n", index); index = start; } last_snapc = snapc; while (!done && index <= end) { int num_ops = do_sync ? 2 : 1; unsigned i; int first; pgoff_t next; int pvec_pages, locked_pages; struct page **pages = NULL; mempool_t *pool = NULL; /* Becomes non-null if mempool used */ struct page *page; int want; u64 offset, len; long writeback_stat; next = 0; locked_pages = 0; max_pages = max_pages_ever; get_more_pages: first = -1; want = min(end - index, min((pgoff_t)PAGEVEC_SIZE, max_pages - (pgoff_t)locked_pages) - 1) + 1; pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, want); dout("pagevec_lookup_tag got %d\n", pvec_pages); if (!pvec_pages && !locked_pages) break; for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { page = pvec.pages[i]; dout("? %p idx %lu\n", page, page->index); if (locked_pages == 0) lock_page(page); /* first page */ else if (!trylock_page(page)) break; /* only dirty pages, or our accounting breaks */ if (unlikely(!PageDirty(page)) || unlikely(page->mapping != mapping)) { dout("!dirty or !mapping %p\n", page); unlock_page(page); break; } if (!wbc->range_cyclic && page->index > end) { dout("end of range %p\n", page); done = 1; unlock_page(page); break; } if (next && (page->index != next)) { dout("not consecutive %p\n", page); unlock_page(page); break; } if (wbc->sync_mode != WB_SYNC_NONE) { dout("waiting on writeback %p\n", page); wait_on_page_writeback(page); } if (page_offset(page) >= snap_size) { dout("%p page eof %llu\n", page, snap_size); done = 1; unlock_page(page); break; } if (PageWriteback(page)) { dout("%p under writeback\n", page); unlock_page(page); break; } /* only if matching snap context */ pgsnapc = page_snap_context(page); if (pgsnapc->seq > snapc->seq) { dout("page snapc %p %lld > oldest %p %lld\n", pgsnapc, pgsnapc->seq, snapc, snapc->seq); unlock_page(page); if (!locked_pages) continue; /* keep looking for snap */ break; } if (!clear_page_dirty_for_io(page)) { dout("%p !clear_page_dirty_for_io\n", page); unlock_page(page); break; } /* * We have something to write. If this is * the first locked page this time through, * allocate an osd request and a page array * that it will use. */ if (locked_pages == 0) { BUG_ON(pages); /* prepare async write request */ offset = (u64)page_offset(page); len = wsize; req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, offset, &len, num_ops, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, snapc, truncate_seq, truncate_size, true); if (IS_ERR(req)) { rc = PTR_ERR(req); unlock_page(page); break; } req->r_callback = writepages_finish; req->r_inode = inode; max_pages = calc_pages_for(0, (u64)len); pages = kmalloc(max_pages * sizeof (*pages), GFP_NOFS); if (!pages) { pool = fsc->wb_pagevec_pool; pages = mempool_alloc(pool, GFP_NOFS); BUG_ON(!pages); } } /* note position of first page in pvec */ if (first < 0) first = i; dout("%p will write page %p idx %lu\n", inode, page, page->index); writeback_stat = atomic_long_inc_return(&fsc->writeback_count); if (writeback_stat > CONGESTION_ON_THRESH( fsc->mount_options->congestion_kb)) { set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); } set_page_writeback(page); pages[locked_pages] = page; locked_pages++; next = page->index + 1; } /* did we get anything? */ if (!locked_pages) goto release_pvec_pages; if (i) { int j; BUG_ON(!locked_pages || first < 0); if (pvec_pages && i == pvec_pages && locked_pages < max_pages) { dout("reached end pvec, trying for more\n"); pagevec_reinit(&pvec); goto get_more_pages; } /* shift unused pages over in the pvec... we * will need to release them below. */ for (j = i; j < pvec_pages; j++) { dout(" pvec leftover page %p\n", pvec.pages[j]); pvec.pages[j-i+first] = pvec.pages[j]; } pvec.nr -= i-first; } /* Format the osd request message and submit the write */ offset = page_offset(pages[0]); len = min(snap_size - offset, (u64)locked_pages << PAGE_CACHE_SHIFT); dout("writepages got %d pages at %llu~%llu\n", locked_pages, offset, len); osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, !!pool, false); pages = NULL; /* request message now owns the pages array */ pool = NULL; /* Update the write op length in case we changed it */ osd_req_op_extent_update(req, 0, len); vino = ceph_vino(inode); ceph_osdc_build_request(req, offset, snapc, vino.snap, &inode->i_mtime); rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); BUG_ON(rc); req = NULL; /* continue? */ index = next; wbc->nr_to_write -= locked_pages; if (wbc->nr_to_write <= 0) done = 1; release_pvec_pages: dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, pvec.nr ? pvec.pages[0] : NULL); pagevec_release(&pvec); if (locked_pages && !done) goto retry; } if (should_loop && !done) { /* more to do; loop back to beginning of file */ dout("writepages looping back to beginning of file\n"); should_loop = 0; index = 0; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; out: if (req) ceph_osdc_put_request(req); ceph_put_snap_context(snapc); dout("writepages done, rc = %d\n", rc); return rc; } /* * See if a given @snapc is either writeable, or already written. */ static int context_is_writeable_or_written(struct inode *inode, struct ceph_snap_context *snapc) { struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); int ret = !oldest || snapc->seq <= oldest->seq; ceph_put_snap_context(oldest); return ret; } /* * We are only allowed to write into/dirty the page if the page is * clean, or already dirty within the same snap context. * * called with page locked. * return success with page locked, * or any failure (incl -EAGAIN) with page unlocked. */ static int ceph_update_writeable_page(struct file *file, loff_t pos, unsigned len, struct page *page) { struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; loff_t page_off = pos & PAGE_CACHE_MASK; int pos_in_page = pos & ~PAGE_CACHE_MASK; int end_in_page = pos_in_page + len; loff_t i_size; int r; struct ceph_snap_context *snapc, *oldest; retry_locked: /* writepages currently holds page lock, but if we change that later, */ wait_on_page_writeback(page); /* check snap context */ BUG_ON(!ci->i_snap_realm); down_read(&mdsc->snap_rwsem); BUG_ON(!ci->i_snap_realm->cached_context); snapc = page_snap_context(page); if (snapc && snapc != ci->i_head_snapc) { /* * this page is already dirty in another (older) snap * context! is it writeable now? */ oldest = get_oldest_context(inode, NULL); up_read(&mdsc->snap_rwsem); if (snapc->seq > oldest->seq) { ceph_put_snap_context(oldest); dout(" page %p snapc %p not current or oldest\n", page, snapc); /* * queue for writeback, and wait for snapc to * be writeable or written */ snapc = ceph_get_snap_context(snapc); unlock_page(page); ceph_queue_writeback(inode); r = wait_event_interruptible(ci->i_cap_wq, context_is_writeable_or_written(inode, snapc)); ceph_put_snap_context(snapc); if (r == -ERESTARTSYS) return r; return -EAGAIN; } ceph_put_snap_context(oldest); /* yay, writeable, do it now (without dropping page lock) */ dout(" page %p snapc %p not current, but oldest\n", page, snapc); if (!clear_page_dirty_for_io(page)) goto retry_locked; r = writepage_nounlock(page, NULL); if (r < 0) goto fail_nosnap; goto retry_locked; } if (PageUptodate(page)) { dout(" page %p already uptodate\n", page); return 0; } /* full page? */ if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) return 0; /* past end of file? */ i_size = inode->i_size; /* caller holds i_mutex */ if (page_off >= i_size || (pos_in_page == 0 && (pos+len) >= i_size && end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { dout(" zeroing %p 0 - %d and %d - %d\n", page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); zero_user_segments(page, 0, pos_in_page, end_in_page, PAGE_CACHE_SIZE); return 0; } /* we need to read it. */ up_read(&mdsc->snap_rwsem); r = readpage_nounlock(file, page); if (r < 0) goto fail_nosnap; goto retry_locked; fail_nosnap: unlock_page(page); return r; } /* * We are only allowed to write into/dirty the page if the page is * clean, or already dirty within the same snap context. */ static int ceph_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = file_inode(file); struct page *page; pgoff_t index = pos >> PAGE_CACHE_SHIFT; int r; do { /* get a page */ page = grab_cache_page_write_begin(mapping, index, 0); if (!page) return -ENOMEM; *pagep = page; dout("write_begin file %p inode %p page %p %d~%d\n", file, inode, page, (int)pos, (int)len); r = ceph_update_writeable_page(file, pos, len, page); } while (r == -EAGAIN); return r; } /* * we don't do anything in here that simple_write_end doesn't do * except adjust dirty page accounting and drop read lock on * mdsc->snap_rwsem. */ static int ceph_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = file_inode(file); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_mds_client *mdsc = fsc->mdsc; unsigned from = pos & (PAGE_CACHE_SIZE - 1); int check_cap = 0; dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, inode, page, (int)pos, (int)copied, (int)len); /* zero the stale part of the page if we did a short copy */ if (copied < len) zero_user_segment(page, from+copied, len); /* did file size increase? */ /* (no need for i_size_read(); we caller holds i_mutex */ if (pos+copied > inode->i_size) check_cap = ceph_inode_set_size(inode, pos+copied); if (!PageUptodate(page)) SetPageUptodate(page); set_page_dirty(page); unlock_page(page); up_read(&mdsc->snap_rwsem); page_cache_release(page); if (check_cap) ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); return copied; } /* * we set .direct_IO to indicate direct io is supported, but since we * intercept O_DIRECT reads and writes early, this function should * never get called. */ static ssize_t ceph_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) { WARN_ON(1); return -EINVAL; } const struct address_space_operations ceph_aops = { .readpage = ceph_readpage, .readpages = ceph_readpages, .writepage = ceph_writepage, .writepages = ceph_writepages_start, .write_begin = ceph_write_begin, .write_end = ceph_write_end, .set_page_dirty = ceph_set_page_dirty, .invalidatepage = ceph_invalidatepage, .releasepage = ceph_releasepage, .direct_IO = ceph_direct_io, }; /* * vm ops */ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct inode *inode = file_inode(vma->vm_file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_file_info *fi = vma->vm_file->private_data; loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT; int want, got, ret; dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_CACHE; while (1) { got = 0; ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1); if (ret == 0) break; if (ret != -ERESTARTSYS) { WARN_ON(1); return VM_FAULT_SIGBUS; } } dout("filemap_fault %p %llu~%zd got cap refs on %s\n", inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got)); ret = filemap_fault(vma, vmf); dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret); ceph_put_cap_refs(ci, got); return ret; } /* * Reuse write_begin here for simplicity. */ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct inode *inode = file_inode(vma->vm_file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_file_info *fi = vma->vm_file->private_data; struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; struct page *page = vmf->page; loff_t off = page_offset(page); loff_t size = i_size_read(inode); size_t len; int want, got, ret; if (off + PAGE_CACHE_SIZE <= size) len = PAGE_CACHE_SIZE; else len = size & ~PAGE_CACHE_MASK; dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", inode, ceph_vinop(inode), off, len, size); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_BUFFER; while (1) { got = 0; ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, off + len); if (ret == 0) break; if (ret != -ERESTARTSYS) { WARN_ON(1); return VM_FAULT_SIGBUS; } } dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", inode, off, len, ceph_cap_string(got)); /* Update time before taking page lock */ file_update_time(vma->vm_file); lock_page(page); ret = VM_FAULT_NOPAGE; if ((off > size) || (page->mapping != inode->i_mapping)) goto out; ret = ceph_update_writeable_page(vma->vm_file, off, len, page); if (ret == 0) { /* success. we'll keep the page locked. */ set_page_dirty(page); up_read(&mdsc->snap_rwsem); ret = VM_FAULT_LOCKED; } else { if (ret == -ENOMEM) ret = VM_FAULT_OOM; else ret = VM_FAULT_SIGBUS; } out: if (ret != VM_FAULT_LOCKED) { unlock_page(page); } else { int dirty; spin_lock(&ci->i_ceph_lock); dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); spin_unlock(&ci->i_ceph_lock); if (dirty) __mark_inode_dirty(inode, dirty); } dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n", inode, off, len, ceph_cap_string(got), ret); ceph_put_cap_refs(ci, got); return ret; } static struct vm_operations_struct ceph_vmops = { .fault = ceph_filemap_fault, .page_mkwrite = ceph_page_mkwrite, .remap_pages = generic_file_remap_pages, }; int ceph_mmap(struct file *file, struct vm_area_struct *vma) { struct address_space *mapping = file->f_mapping; if (!mapping->a_ops->readpage) return -ENOEXEC; file_accessed(file); vma->vm_ops = &ceph_vmops; return 0; }
gpl-2.0
Asure/Dropad-kernel-2.6.32.9
drivers/hwmon/ads7828.c
742
8084
/* ads7828.c - lm_sensors driver for ads7828 12-bit 8-channel ADC (C) 2007 EADS Astrium This driver is based on the lm75 and other lm_sensors/hwmon drivers Written by Steve Hardy <steve@linuxrealtime.co.uk> Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads7828.pdf This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> /* The ADS7828 registers */ #define ADS7828_NCH 8 /* 8 channels of 12-bit A-D supported */ #define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */ #define ADS7828_CMD_SD_DIFF 0x00 /* Differential inputs */ #define ADS7828_CMD_PD0 0x0 /* Power Down between A-D conversions */ #define ADS7828_CMD_PD1 0x04 /* Internal ref OFF && A-D ON */ #define ADS7828_CMD_PD2 0x08 /* Internal ref ON && A-D OFF */ #define ADS7828_CMD_PD3 0x0C /* Internal ref ON && A-D ON */ #define ADS7828_INT_VREF_MV 2500 /* Internal vref is 2.5V, 2500mV */ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; /* Insmod parameters */ I2C_CLIENT_INSMOD_1(ads7828); /* Other module parameters */ static int se_input = 1; /* Default is SE, 0 == diff */ static int int_vref = 1; /* Default is internal ref ON */ static int vref_mv = ADS7828_INT_VREF_MV; /* set if vref != 2.5V */ module_param(se_input, bool, S_IRUGO); module_param(int_vref, bool, S_IRUGO); module_param(vref_mv, int, S_IRUGO); /* Global Variables */ static u8 ads7828_cmd_byte; /* cmd byte without channel bits */ static unsigned int ads7828_lsb_resol; /* resolution of the ADC sample lsb */ /* Each client has this additional data */ struct ads7828_data { struct device *hwmon_dev; struct mutex update_lock; /* mutex protect updates */ char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u16 adc_input[ADS7828_NCH]; /* ADS7828_NCH 12-bit samples */ }; /* Function declaration - necessary due to function dependencies */ static int ads7828_detect(struct i2c_client *client, int kind, struct i2c_board_info *info); static int ads7828_probe(struct i2c_client *client, const struct i2c_device_id *id); /* The ADS7828 returns the 12-bit sample in two bytes, these are read as a word then byte-swapped */ static u16 ads7828_read_value(struct i2c_client *client, u8 reg) { return swab16(i2c_smbus_read_word_data(client, reg)); } static inline u8 channel_cmd_byte(int ch) { /* cmd byte C2,C1,C0 - see datasheet */ u8 cmd = (((ch>>1) | (ch&0x01)<<2)<<4); cmd |= ads7828_cmd_byte; return cmd; } /* Update data for the device (all 8 channels) */ static struct ads7828_data *ads7828_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ads7828_data *data = i2c_get_clientdata(client); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { unsigned int ch; dev_dbg(&client->dev, "Starting ads7828 update\n"); for (ch = 0; ch < ADS7828_NCH; ch++) { u8 cmd = channel_cmd_byte(ch); data->adc_input[ch] = ads7828_read_value(client, cmd); } data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* sysfs callback function */ static ssize_t show_in(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ads7828_data *data = ads7828_update_device(dev); /* Print value (in mV as specified in sysfs-interface documentation) */ return sprintf(buf, "%d\n", (data->adc_input[attr->index] * ads7828_lsb_resol)/1000); } #define in_reg(offset)\ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in,\ NULL, offset) in_reg(0); in_reg(1); in_reg(2); in_reg(3); in_reg(4); in_reg(5); in_reg(6); in_reg(7); static struct attribute *ads7828_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, NULL }; static const struct attribute_group ads7828_group = { .attrs = ads7828_attributes, }; static int ads7828_remove(struct i2c_client *client) { struct ads7828_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &ads7828_group); kfree(i2c_get_clientdata(client)); return 0; } static const struct i2c_device_id ads7828_id[] = { { "ads7828", ads7828 }, { } }; MODULE_DEVICE_TABLE(i2c, ads7828_id); /* This is the driver that will be inserted */ static struct i2c_driver ads7828_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "ads7828", }, .probe = ads7828_probe, .remove = ads7828_remove, .id_table = ads7828_id, .detect = ads7828_detect, .address_data = &addr_data, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int ads7828_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; /* Check we have a valid client */ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) return -ENODEV; /* Now, we do the remaining detection. There is no identification dedicated register so attempt to sanity check using knowledge of the chip - Read from the 8 channel addresses - Check the top 4 bits of each result are not set (12 data bits) */ if (kind < 0) { int ch; for (ch = 0; ch < ADS7828_NCH; ch++) { u16 in_data; u8 cmd = channel_cmd_byte(ch); in_data = ads7828_read_value(client, cmd); if (in_data & 0xF000) { printk(KERN_DEBUG "%s : Doesn't look like an ads7828 device\n", __func__); return -ENODEV; } } } strlcpy(info->type, "ads7828", I2C_NAME_SIZE); return 0; } static int ads7828_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ads7828_data *data; int err; data = kzalloc(sizeof(struct ads7828_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &ads7828_group); if (err) goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove; } return 0; exit_remove: sysfs_remove_group(&client->dev.kobj, &ads7828_group); exit_free: kfree(data); exit: return err; } static int __init sensors_ads7828_init(void) { /* Initialize the command byte according to module parameters */ ads7828_cmd_byte = se_input ? ADS7828_CMD_SD_SE : ADS7828_CMD_SD_DIFF; ads7828_cmd_byte |= int_vref ? ADS7828_CMD_PD3 : ADS7828_CMD_PD1; /* Calculate the LSB resolution */ ads7828_lsb_resol = (vref_mv*1000)/4096; return i2c_add_driver(&ads7828_driver); } static void __exit sensors_ads7828_exit(void) { i2c_del_driver(&ads7828_driver); } MODULE_AUTHOR("Steve Hardy <steve@linuxrealtime.co.uk>"); MODULE_DESCRIPTION("ADS7828 driver"); MODULE_LICENSE("GPL"); module_init(sensors_ads7828_init); module_exit(sensors_ads7828_exit);
gpl-2.0
InfinitusROM/android_kernel_oppo_n1-old
drivers/misc/tsif.c
998
50293
/* * TSIF Driver * * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> /* Needed by all modules */ #include <linux/kernel.h> /* Needed for KERN_INFO */ #include <linux/init.h> /* Needed for the macros */ #include <linux/err.h> /* IS_ERR etc. */ #include <linux/platform_device.h> #include <linux/ioport.h> /* XXX_mem_region */ #include <linux/debugfs.h> #include <linux/dma-mapping.h> /* dma_XXX */ #include <linux/delay.h> /* msleep */ #include <linux/io.h> /* ioXXX */ #include <linux/uaccess.h> /* copy_from_user */ #include <linux/clk.h> #include <linux/wakelock.h> #include <linux/tsif_api.h> #include <linux/pm_runtime.h> #include <linux/slab.h> /* kfree, kzalloc */ #include <linux/gpio.h> #include <mach/dma.h> #include <mach/msm_tsif.h> /* * TSIF register offsets */ #define TSIF_STS_CTL_OFF (0x0) #define TSIF_TIME_LIMIT_OFF (0x4) #define TSIF_CLK_REF_OFF (0x8) #define TSIF_LPBK_FLAGS_OFF (0xc) #define TSIF_LPBK_DATA_OFF (0x10) #define TSIF_TEST_CTL_OFF (0x14) #define TSIF_TEST_MODE_OFF (0x18) #define TSIF_TEST_RESET_OFF (0x1c) #define TSIF_TEST_EXPORT_OFF (0x20) #define TSIF_TEST_CURRENT_OFF (0x24) #define TSIF_DATA_PORT_OFF (0x100) /* bits for TSIF_STS_CTL register */ #define TSIF_STS_CTL_EN_IRQ (1 << 28) #define TSIF_STS_CTL_PACK_AVAIL (1 << 27) #define TSIF_STS_CTL_1ST_PACKET (1 << 26) #define TSIF_STS_CTL_OVERFLOW (1 << 25) #define TSIF_STS_CTL_LOST_SYNC (1 << 24) #define TSIF_STS_CTL_TIMEOUT (1 << 23) #define TSIF_STS_CTL_INV_SYNC (1 << 21) #define TSIF_STS_CTL_INV_NULL (1 << 20) #define TSIF_STS_CTL_INV_ERROR (1 << 19) #define TSIF_STS_CTL_INV_ENABLE (1 << 18) #define TSIF_STS_CTL_INV_DATA (1 << 17) #define TSIF_STS_CTL_INV_CLOCK (1 << 16) #define TSIF_STS_CTL_SPARE (1 << 15) #define TSIF_STS_CTL_EN_NULL (1 << 11) #define TSIF_STS_CTL_EN_ERROR (1 << 10) #define TSIF_STS_CTL_LAST_BIT (1 << 9) #define TSIF_STS_CTL_EN_TIME_LIM (1 << 8) #define TSIF_STS_CTL_EN_TCR (1 << 7) #define TSIF_STS_CTL_TEST_MODE (3 << 5) #define TSIF_STS_CTL_EN_DM (1 << 4) #define TSIF_STS_CTL_STOP (1 << 3) #define TSIF_STS_CTL_START (1 << 0) /* * Data buffering parameters * * Data stored in cyclic buffer; * * Data organized in chunks of packets. * One chunk processed at a time by the data mover * */ #define TSIF_PKTS_IN_CHUNK_DEFAULT (16) /**< packets in one DM chunk */ #define TSIF_CHUNKS_IN_BUF_DEFAULT (8) #define TSIF_PKTS_IN_CHUNK (tsif_device->pkts_per_chunk) #define TSIF_CHUNKS_IN_BUF (tsif_device->chunks_per_buf) #define TSIF_PKTS_IN_BUF (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF) #define TSIF_BUF_SIZE (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE) #define TSIF_MAX_ID 1 #define ROW_RESET (MSM_CLK_CTL_BASE + 0x214) #define GLBL_CLK_ENA (MSM_CLK_CTL_BASE + 0x000) #define CLK_HALT_STATEB (MSM_CLK_CTL_BASE + 0x104) #define TSIF_NS_REG (MSM_CLK_CTL_BASE + 0x0b4) #define TV_NS_REG (MSM_CLK_CTL_BASE + 0x0bc) /* used to create debugfs entries */ static const struct { const char *name; mode_t mode; int offset; } debugfs_tsif_regs[] = { {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF}, {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF}, {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF}, {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF}, {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF}, {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF}, {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF}, {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF}, {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF}, {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF}, {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF}, }; /* structures for Data Mover */ struct tsif_dmov_cmd { dmov_box box; dma_addr_t box_ptr; }; struct msm_tsif_device; struct tsif_xfer { struct msm_dmov_cmd hdr; struct msm_tsif_device *tsif_device; int busy; int wi; /**< set devices's write index after xfer */ }; struct msm_tsif_device { struct list_head devlist; struct platform_device *pdev; struct resource *memres; void __iomem *base; unsigned int irq; int mode; u32 time_limit; int clock_inverse; int data_inverse; int sync_inverse; int enable_inverse; enum tsif_state state; struct wake_lock wake_lock; /* clocks */ struct clk *tsif_clk; struct clk *tsif_pclk; struct clk *tsif_ref_clk; /* debugfs */ struct dentry *dent_tsif; struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)]; struct dentry *debugfs_gpio; struct dentry *debugfs_action; struct dentry *debugfs_dma; struct dentry *debugfs_databuf; struct debugfs_blob_wrapper blob_wrapper_databuf; /* DMA related */ int dma; int crci; void *data_buffer; dma_addr_t data_buffer_dma; u32 pkts_per_chunk; u32 chunks_per_buf; int ri; int wi; int dmwi; /**< DataMover write index */ struct tsif_dmov_cmd *dmov_cmd[2]; dma_addr_t dmov_cmd_dma[2]; struct tsif_xfer xfer[2]; struct tasklet_struct dma_refill; struct tasklet_struct clocks_off; /* statistics */ u32 stat_rx; u32 stat_overflow; u32 stat_lost_sync; u32 stat_timeout; u32 stat_dmov_err; u32 stat_soft_drop; int stat_ifi; /* inter frame interval */ u32 stat0, stat1; /* client */ void *client_data; void (*client_notify)(void *client_data); }; /* ===clocks begin=== */ static void tsif_put_clocks(struct msm_tsif_device *tsif_device) { if (tsif_device->tsif_clk) { clk_put(tsif_device->tsif_clk); tsif_device->tsif_clk = NULL; } if (tsif_device->tsif_pclk) { clk_put(tsif_device->tsif_pclk); tsif_device->tsif_pclk = NULL; } if (tsif_device->tsif_ref_clk) { clk_put(tsif_device->tsif_ref_clk); tsif_device->tsif_ref_clk = NULL; } } static int tsif_get_clocks(struct msm_tsif_device *tsif_device) { struct msm_tsif_platform_data *pdata = tsif_device->pdev->dev.platform_data; int rc = 0; if (pdata->tsif_clk) { tsif_device->tsif_clk = clk_get(&tsif_device->pdev->dev, pdata->tsif_clk); if (IS_ERR(tsif_device->tsif_clk)) { dev_err(&tsif_device->pdev->dev, "failed to get %s\n", pdata->tsif_clk); rc = PTR_ERR(tsif_device->tsif_clk); tsif_device->tsif_clk = NULL; goto ret; } } if (pdata->tsif_pclk) { tsif_device->tsif_pclk = clk_get(&tsif_device->pdev->dev, pdata->tsif_pclk); if (IS_ERR(tsif_device->tsif_pclk)) { dev_err(&tsif_device->pdev->dev, "failed to get %s\n", pdata->tsif_pclk); rc = PTR_ERR(tsif_device->tsif_pclk); tsif_device->tsif_pclk = NULL; goto ret; } } if (pdata->tsif_ref_clk) { tsif_device->tsif_ref_clk = clk_get(&tsif_device->pdev->dev, pdata->tsif_ref_clk); if (IS_ERR(tsif_device->tsif_ref_clk)) { dev_err(&tsif_device->pdev->dev, "failed to get %s\n", pdata->tsif_ref_clk); rc = PTR_ERR(tsif_device->tsif_ref_clk); tsif_device->tsif_ref_clk = NULL; goto ret; } } return 0; ret: tsif_put_clocks(tsif_device); return rc; } static void tsif_clock(struct msm_tsif_device *tsif_device, int on) { if (on) { if (tsif_device->tsif_clk) clk_prepare_enable(tsif_device->tsif_clk); if (tsif_device->tsif_pclk) clk_prepare_enable(tsif_device->tsif_pclk); clk_prepare_enable(tsif_device->tsif_ref_clk); } else { if (tsif_device->tsif_clk) clk_disable_unprepare(tsif_device->tsif_clk); if (tsif_device->tsif_pclk) clk_disable_unprepare(tsif_device->tsif_pclk); clk_disable_unprepare(tsif_device->tsif_ref_clk); } } static void tsif_clocks_off(unsigned long data) { struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data; tsif_clock(tsif_device, 0); } /* ===clocks end=== */ /* ===gpio begin=== */ static int tsif_gpios_disable(const struct msm_gpio *table, int size) { int rc = 0; int i; const struct msm_gpio *g; for (i = size-1; i >= 0; i--) { int tmp; g = table + i; tmp = gpio_tlmm_config(GPIO_CFG(GPIO_PIN(g->gpio_cfg), 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG_DISABLE); if (tmp) { pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)" " <%s> failed: %d\n", g->gpio_cfg, g->label ?: "?", rc); pr_err("pin %d func %d dir %d pull %d drvstr %d\n", GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg), GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg), GPIO_DRVSTR(g->gpio_cfg)); if (!rc) rc = tmp; } } return rc; } static int tsif_gpios_enable(const struct msm_gpio *table, int size) { int rc; int i; const struct msm_gpio *g; for (i = 0; i < size; i++) { g = table + i; rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE); if (rc) { pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)" " <%s> failed: %d\n", g->gpio_cfg, g->label ?: "?", rc); pr_err("pin %d func %d dir %d pull %d drvstr %d\n", GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg), GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg), GPIO_DRVSTR(g->gpio_cfg)); goto err; } } return 0; err: tsif_gpios_disable(table, i); return rc; } static int tsif_gpios_request_enable(const struct msm_gpio *table, int size) { int rc; rc = tsif_gpios_enable(table, size); return rc; } static void tsif_gpios_disable_free(const struct msm_gpio *table, int size) { tsif_gpios_disable(table, size); } static int tsif_start_gpios(struct msm_tsif_device *tsif_device) { struct msm_tsif_platform_data *pdata = tsif_device->pdev->dev.platform_data; return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios); } static void tsif_stop_gpios(struct msm_tsif_device *tsif_device) { struct msm_tsif_platform_data *pdata = tsif_device->pdev->dev.platform_data; tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios); } /* ===gpio end=== */ static int tsif_start_hw(struct msm_tsif_device *tsif_device) { u32 ctl = TSIF_STS_CTL_EN_IRQ | TSIF_STS_CTL_EN_TIME_LIM | TSIF_STS_CTL_EN_TCR | TSIF_STS_CTL_EN_DM; if (tsif_device->clock_inverse) ctl |= TSIF_STS_CTL_INV_CLOCK; if (tsif_device->data_inverse) ctl |= TSIF_STS_CTL_INV_DATA; if (tsif_device->sync_inverse) ctl |= TSIF_STS_CTL_INV_SYNC; if (tsif_device->enable_inverse) ctl |= TSIF_STS_CTL_INV_ENABLE; dev_info(&tsif_device->pdev->dev, "%s\n", __func__); switch (tsif_device->mode) { case 1: /* mode 1 */ ctl |= (0 << 5); break; case 2: /* mode 2 */ ctl |= (1 << 5); break; case 3: /* manual - control from debugfs */ return 0; break; default: return -EINVAL; } iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF); iowrite32(tsif_device->time_limit, tsif_device->base + TSIF_TIME_LIMIT_OFF); wmb(); iowrite32(ctl | TSIF_STS_CTL_START, tsif_device->base + TSIF_STS_CTL_OFF); wmb(); ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF); return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT; } static void tsif_stop_hw(struct msm_tsif_device *tsif_device) { iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF); wmb(); } /* ===DMA begin=== */ /** * TSIF DMA theory of operation * * Circular memory buffer \a tsif_mem_buffer allocated; * 4 pointers points to and moved forward on: * - \a ri index of first ready to read packet. * Updated by client's call to tsif_reclaim_packets() * - \a wi points to the next packet to be written by DM. * Data below is valid and will not be overriden by DMA. * Moved on DM callback * - \a dmwi points to the next packet not scheduled yet for DM * moved when packet scheduled for DM * * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi * at time immediately after scheduling. * * Initially, 2 packets get scheduled for the DM. * * Upon packet receive, DM writes packet to the pre-programmed * location and invoke its callback. * * DM callback moves sets wi pointer to \a xfer->wi; * then it schedules next packet for DM and moves \a dmwi pointer. * * Buffer overflow handling * * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced. * DMA re-scheduled to the same index. * Callback check and not move \a wi to become equal to \a ri * * On \a read request, data between \a ri and \a wi pointers may be read; * \ri pointer moved accordingly. * * It is always granted, on modulo sizeof(tsif_mem_buffer), that * \a wi is between [\a ri, \a dmwi] * * Amount of data available is (wi-ri)*TSIF_PKT_SIZE * * Number of scheduled packets for DM: (dmwi-wi) */ /** * tsif_dma_schedule - schedule DMA transfers * * @tsif_device: device * * Executed from process context on init, or from tasklet when * re-scheduling upon DMA completion. * This prevent concurrent execution from several CPU's */ static void tsif_dma_schedule(struct msm_tsif_device *tsif_device) { int i, dmwi0, dmwi1, found = 0; /* find free entry */ for (i = 0; i < 2; i++) { struct tsif_xfer *xfer = &tsif_device->xfer[i]; if (xfer->busy) continue; found++; xfer->busy = 1; dmwi0 = tsif_device->dmwi; tsif_device->dmov_cmd[i]->box.dst_row_addr = tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0; /* proposed value for dmwi */ dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF; /** * If dmwi going to overlap with ri, * overflow occurs because data was not read. * Still get this packet, to not interrupt TSIF * hardware, but do not advance dmwi. * * Upon receive, packet will be dropped. */ if (dmwi1 != tsif_device->ri) { tsif_device->dmwi = dmwi1; } else { dev_info(&tsif_device->pdev->dev, "Overflow detected\n"); } xfer->wi = tsif_device->dmwi; #ifdef CONFIG_TSIF_DEBUG dev_info(&tsif_device->pdev->dev, "schedule xfer[%d] -> [%2d]{%2d}\n", i, dmwi0, xfer->wi); #endif /* complete all the writes to box */ dma_coherent_pre_ops(); msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr); } if (!found) dev_info(&tsif_device->pdev->dev, "All xfer entries are busy\n"); } /** * tsif_dmov_complete_func - DataMover completion callback * * @cmd: original DM command * @result: DM result * @err: optional error buffer * * Executed in IRQ context (Data Mover's IRQ) * DataMover's spinlock @msm_dmov_lock held. */ static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd, unsigned int result, struct msm_dmov_errdata *err) { int i; u32 data_offset; struct tsif_xfer *xfer; struct msm_tsif_device *tsif_device; int reschedule = 0; if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */ pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd); return; } /* restore original context */ xfer = container_of(cmd, struct tsif_xfer, hdr); tsif_device = xfer->tsif_device; i = xfer - tsif_device->xfer; data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr - tsif_device->data_buffer_dma; /* order reads from the xferred buffer */ dma_coherent_post_ops(); if (result & DMOV_RSLT_DONE) { int w = data_offset / TSIF_PKT_SIZE; tsif_device->stat_rx++; /* * sowtware overflow when I was scheduled? * * @w is where this xfer was actually written to; * @xfer->wi is where device's @wi will be set; * * if these 2 are equal, we are short in space and * going to overwrite this xfer - this is "soft drop" */ if (w == xfer->wi) tsif_device->stat_soft_drop++; reschedule = (tsif_device->state == tsif_state_running); #ifdef CONFIG_TSIF_DEBUG /* IFI calculation */ /* * update stat_ifi (inter frame interval) * * Calculate time difference between last and 1-st * packets in chunk * * To be removed after tuning */ if (TSIF_PKTS_IN_CHUNK > 1) { void *ptr = tsif_device->data_buffer + data_offset; u32 *p0 = ptr; u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) * TSIF_PKT_SIZE; u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 = tsif_pkt_status(p0)); u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 = tsif_pkt_status(p1)); tsif_device->stat_ifi = (tts1 - tts0) / (TSIF_PKTS_IN_CHUNK - 1); } #endif } else { /** * Error or flush * * To recover - re-open TSIF device. */ /* mark status "not valid" in data buffer */ int n; void *ptr = tsif_device->data_buffer + data_offset; for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) { u32 *p = ptr + (n * TSIF_PKT_SIZE); /* last dword is status + TTS */ p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0; } if (result & DMOV_RSLT_ERROR) { dev_err(&tsif_device->pdev->dev, "DMA error (0x%08x)\n", result); tsif_device->stat_dmov_err++; /* force device close */ if (tsif_device->state == tsif_state_running) { tsif_stop_hw(tsif_device); /* * This branch is taken only in case of * severe hardware problem (I don't even know * what should happen for DMOV_RSLT_ERROR); * thus I prefer code simplicity over * performance. * Clocks are turned off from outside the * interrupt context. */ tasklet_schedule(&tsif_device->clocks_off); tsif_device->state = tsif_state_flushing; } } if (result & DMOV_RSLT_FLUSH) { /* * Flushing normally happens in process of * @tsif_stop(), when we are waiting for outstanding * DMA commands to be flushed. */ dev_info(&tsif_device->pdev->dev, "DMA channel flushed (0x%08x)\n", result); if (tsif_device->state == tsif_state_flushing) { if ((!tsif_device->xfer[0].busy) && (!tsif_device->xfer[1].busy)) { tsif_device->state = tsif_state_stopped; } } } if (err) dev_err(&tsif_device->pdev->dev, "Flush data: %08x %08x %08x %08x %08x %08x\n", err->flush[0], err->flush[1], err->flush[2], err->flush[3], err->flush[4], err->flush[5]); } tsif_device->wi = xfer->wi; xfer->busy = 0; if (tsif_device->client_notify) tsif_device->client_notify(tsif_device->client_data); /* * Can't schedule next DMA - * DataMover driver still hold its semaphore, * deadlock will occur. */ if (reschedule) tasklet_schedule(&tsif_device->dma_refill); } /** * tsif_dma_refill - tasklet function for tsif_device->dma_refill * * @data: tsif_device * * Reschedule DMA requests * * Executed in tasklet */ static void tsif_dma_refill(unsigned long data) { struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data; if (tsif_device->state == tsif_state_running) tsif_dma_schedule(tsif_device); } /** * tsif_dma_flush - flush DMA channel * * @tsif_device: * * busy wait till DMA flushed */ static void tsif_dma_flush(struct msm_tsif_device *tsif_device) { if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) { tsif_device->state = tsif_state_flushing; while (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) { msm_dmov_flush(tsif_device->dma, 1); usleep(10000); } } tsif_device->state = tsif_state_stopped; if (tsif_device->client_notify) tsif_device->client_notify(tsif_device->client_data); } static void tsif_dma_exit(struct msm_tsif_device *tsif_device) { int i; tsif_device->state = tsif_state_flushing; tasklet_kill(&tsif_device->dma_refill); tsif_dma_flush(tsif_device); for (i = 0; i < 2; i++) { if (tsif_device->dmov_cmd[i]) { dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd), tsif_device->dmov_cmd[i], tsif_device->dmov_cmd_dma[i]); tsif_device->dmov_cmd[i] = NULL; } } if (tsif_device->data_buffer) { tsif_device->blob_wrapper_databuf.data = NULL; tsif_device->blob_wrapper_databuf.size = 0; dma_free_coherent(NULL, TSIF_BUF_SIZE, tsif_device->data_buffer, tsif_device->data_buffer_dma); tsif_device->data_buffer = NULL; } } static int tsif_dma_init(struct msm_tsif_device *tsif_device) { int i; /* TODO: allocate all DMA memory in one buffer */ /* Note: don't pass device, it require coherent_dma_mask id device definition */ tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE, &tsif_device->data_buffer_dma, GFP_KERNEL); if (!tsif_device->data_buffer) goto err; dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n", tsif_device->data_buffer, tsif_device->data_buffer_dma); tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer; tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE; tsif_device->ri = 0; tsif_device->wi = 0; tsif_device->dmwi = 0; for (i = 0; i < 2; i++) { dmov_box *box; struct msm_dmov_cmd *hdr; tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL, sizeof(struct tsif_dmov_cmd), &tsif_device->dmov_cmd_dma[i], GFP_KERNEL); if (!tsif_device->dmov_cmd[i]) goto err; dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n", i, tsif_device->dmov_cmd[i], tsif_device->dmov_cmd_dma[i]); /* dst in 16 LSB, src in 16 MSB */ box = &(tsif_device->dmov_cmd[i]->box); box->cmd = CMD_MODE_BOX | CMD_LC | CMD_SRC_CRCI(tsif_device->crci); box->src_row_addr = tsif_device->memres->start + TSIF_DATA_PORT_OFF; box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE; box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK; box->row_offset = (0 << 16) | TSIF_PKT_SIZE; tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] + offsetof(struct tsif_dmov_cmd, box)); tsif_device->xfer[i].tsif_device = tsif_device; hdr = &tsif_device->xfer[i].hdr; hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] + offsetof(struct tsif_dmov_cmd, box_ptr)); hdr->complete_func = tsif_dmov_complete_func; } msm_dmov_flush(tsif_device->dma, 1); return 0; err: dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n"); tsif_dma_exit(tsif_device); return -ENOMEM; } /* ===DMA end=== */ /* ===IRQ begin=== */ static irqreturn_t tsif_irq(int irq, void *dev_id) { struct msm_tsif_device *tsif_device = dev_id; u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF); if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL | TSIF_STS_CTL_OVERFLOW | TSIF_STS_CTL_LOST_SYNC | TSIF_STS_CTL_TIMEOUT))) { dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n"); return IRQ_NONE; } if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) { dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n"); tsif_device->stat_rx++; } if (sts_ctl & TSIF_STS_CTL_OVERFLOW) { dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n"); tsif_device->stat_overflow++; } if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) { dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n"); tsif_device->stat_lost_sync++; } if (sts_ctl & TSIF_STS_CTL_TIMEOUT) { dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n"); tsif_device->stat_timeout++; } iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF); wmb(); return IRQ_HANDLED; } /* ===IRQ end=== */ /* ===Device attributes begin=== */ static ssize_t show_stats(struct device *dev, struct device_attribute *attr, char *buf) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); char *state_string; switch (tsif_device->state) { case tsif_state_stopped: state_string = "stopped"; break; case tsif_state_running: state_string = "running"; break; case tsif_state_flushing: state_string = "flushing"; break; default: state_string = "???"; } return snprintf(buf, PAGE_SIZE, "Device %s\n" "Mode = %d\n" "Time limit = %d\n" "State %s\n" "Client = %p\n" "Pkt/Buf = %d\n" "Pkt/chunk = %d\n" "Clock inv = %d\n" "Data inv = %d\n" "Sync inv = %d\n" "Enable inv = %d\n" "--statistics--\n" "Rx chunks = %d\n" "Overflow = %d\n" "Lost sync = %d\n" "Timeout = %d\n" "DMA error = %d\n" "Soft drop = %d\n" "IFI = %d\n" "(0x%08x - 0x%08x) / %d\n" "--debug--\n" "GLBL_CLK_ENA = 0x%08x\n" "ROW_RESET = 0x%08x\n" "CLK_HALT_STATEB = 0x%08x\n" "TV_NS_REG = 0x%08x\n" "TSIF_NS_REG = 0x%08x\n", dev_name(dev), tsif_device->mode, tsif_device->time_limit, state_string, tsif_device->client_data, TSIF_PKTS_IN_BUF, TSIF_PKTS_IN_CHUNK, tsif_device->clock_inverse, tsif_device->data_inverse, tsif_device->sync_inverse, tsif_device->enable_inverse, tsif_device->stat_rx, tsif_device->stat_overflow, tsif_device->stat_lost_sync, tsif_device->stat_timeout, tsif_device->stat_dmov_err, tsif_device->stat_soft_drop, tsif_device->stat_ifi, tsif_device->stat1, tsif_device->stat0, TSIF_PKTS_IN_CHUNK - 1, ioread32(GLBL_CLK_ENA), ioread32(ROW_RESET), ioread32(CLK_HALT_STATEB), ioread32(TV_NS_REG), ioread32(TSIF_NS_REG) ); } /** * set_stats - reset statistics on write * * @dev: * @attr: * @buf: * @count: */ static ssize_t set_stats(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); tsif_device->stat_rx = 0; tsif_device->stat_overflow = 0; tsif_device->stat_lost_sync = 0; tsif_device->stat_timeout = 0; tsif_device->stat_dmov_err = 0; tsif_device->stat_soft_drop = 0; tsif_device->stat_ifi = 0; return count; } static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats); static ssize_t show_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode); } static ssize_t set_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); int value; int rc; if (1 != sscanf(buf, "%d", &value)) { dev_err(&tsif_device->pdev->dev, "Failed to parse integer: <%s>\n", buf); return -EINVAL; } rc = tsif_set_mode(tsif_device, value); if (!rc) rc = count; return rc; } static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode); static ssize_t show_time_limit(struct device *dev, struct device_attribute *attr, char *buf) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit); } static ssize_t set_time_limit(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); int value; int rc; if (1 != sscanf(buf, "%d", &value)) { dev_err(&tsif_device->pdev->dev, "Failed to parse integer: <%s>\n", buf); return -EINVAL; } rc = tsif_set_time_limit(tsif_device, value); if (!rc) rc = count; return rc; } static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR, show_time_limit, set_time_limit); static ssize_t show_buf_config(struct device *dev, struct device_attribute *attr, char *buf) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d * %d\n", tsif_device->pkts_per_chunk, tsif_device->chunks_per_buf); } static ssize_t set_buf_config(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); u32 p, c; int rc; if (2 != sscanf(buf, "%d * %d", &p, &c)) { dev_err(&tsif_device->pdev->dev, "Failed to parse integer: <%s>\n", buf); return -EINVAL; } rc = tsif_set_buf_config(tsif_device, p, c); if (!rc) rc = count; return rc; } static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR, show_buf_config, set_buf_config); static ssize_t show_clk_inverse(struct device *dev, struct device_attribute *attr, char *buf) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->clock_inverse); } static ssize_t set_clk_inverse(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); int value; int rc; if (1 != sscanf(buf, "%d", &value)) { dev_err(&tsif_device->pdev->dev, "Failed to parse integer: <%s>\n", buf); return -EINVAL; } rc = tsif_set_clk_inverse(tsif_device, value); if (!rc) rc = count; return rc; } static DEVICE_ATTR(clk_inverse, S_IRUGO | S_IWUSR, show_clk_inverse, set_clk_inverse); static ssize_t show_data_inverse(struct device *dev, struct device_attribute *attr, char *buf) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->data_inverse); } static ssize_t set_data_inverse(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); int value; int rc; if (1 != sscanf(buf, "%d", &value)) { dev_err(&tsif_device->pdev->dev, "Failed to parse integer: <%s>\n", buf); return -EINVAL; } rc = tsif_set_data_inverse(tsif_device, value); if (!rc) rc = count; return rc; } static DEVICE_ATTR(data_inverse, S_IRUGO | S_IWUSR, show_data_inverse, set_data_inverse); static ssize_t show_sync_inverse(struct device *dev, struct device_attribute *attr, char *buf) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->sync_inverse); } static ssize_t set_sync_inverse(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); int value; int rc; if (1 != sscanf(buf, "%d", &value)) { dev_err(&tsif_device->pdev->dev, "Failed to parse integer: <%s>\n", buf); return -EINVAL; } rc = tsif_set_sync_inverse(tsif_device, value); if (!rc) rc = count; return rc; } static DEVICE_ATTR(sync_inverse, S_IRUGO | S_IWUSR, show_sync_inverse, set_sync_inverse); static ssize_t show_enable_inverse(struct device *dev, struct device_attribute *attr, char *buf) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->enable_inverse); } static ssize_t set_enable_inverse(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_tsif_device *tsif_device = dev_get_drvdata(dev); int value; int rc; if (1 != sscanf(buf, "%d", &value)) { dev_err(&tsif_device->pdev->dev, "Failed to parse integer: <%s>\n", buf); return -EINVAL; } rc = tsif_set_enable_inverse(tsif_device, value); if (!rc) rc = count; return rc; } static DEVICE_ATTR(enable_inverse, S_IRUGO | S_IWUSR, show_enable_inverse, set_enable_inverse); static struct attribute *dev_attrs[] = { &dev_attr_stats.attr, &dev_attr_mode.attr, &dev_attr_time_limit.attr, &dev_attr_buf_config.attr, &dev_attr_clk_inverse.attr, &dev_attr_data_inverse.attr, &dev_attr_sync_inverse.attr, &dev_attr_enable_inverse.attr, NULL, }; static struct attribute_group dev_attr_grp = { .attrs = dev_attrs, }; /* ===Device attributes end=== */ /* ===debugfs begin=== */ static int debugfs_iomem_x32_set(void *data, u64 val) { iowrite32(val, data); wmb(); return 0; } static int debugfs_iomem_x32_get(void *data, u64 *val) { *val = ioread32(data); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get, debugfs_iomem_x32_set, "0x%08llx\n"); struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode, struct dentry *parent, u32 *value) { return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32); } static int action_open(struct msm_tsif_device *tsif_device) { int rc = -EINVAL; int result; struct msm_tsif_platform_data *pdata = tsif_device->pdev->dev.platform_data; dev_info(&tsif_device->pdev->dev, "%s\n", __func__); if (tsif_device->state != tsif_state_stopped) return -EAGAIN; rc = tsif_dma_init(tsif_device); if (rc) { dev_err(&tsif_device->pdev->dev, "failed to init DMA\n"); return rc; } tsif_device->state = tsif_state_running; /* * DMA should be scheduled prior to TSIF hardware initialization, * otherwise "bus error" will be reported by Data Mover */ enable_irq(tsif_device->irq); tsif_clock(tsif_device, 1); tsif_dma_schedule(tsif_device); /* * init the device if required */ if (pdata->init) pdata->init(pdata); rc = tsif_start_hw(tsif_device); if (rc) { dev_err(&tsif_device->pdev->dev, "Unable to start HW\n"); tsif_dma_exit(tsif_device); tsif_clock(tsif_device, 0); disable_irq(tsif_device->irq); return rc; } /* make sure the GPIO's are set up */ rc = tsif_start_gpios(tsif_device); if (rc) { dev_err(&tsif_device->pdev->dev, "failed to start GPIOs\n"); tsif_stop_hw(tsif_device); tsif_dma_exit(tsif_device); tsif_clock(tsif_device, 0); disable_irq(tsif_device->irq); return rc; } result = pm_runtime_get(&tsif_device->pdev->dev); if (result < 0) { dev_err(&tsif_device->pdev->dev, "Runtime PM: Unable to wake up the device, rc = %d\n", result); tsif_stop_gpios(tsif_device); tsif_stop_hw(tsif_device); tsif_dma_exit(tsif_device); tsif_clock(tsif_device, 0); disable_irq(tsif_device->irq); return result; } wake_lock(&tsif_device->wake_lock); return 0; } static int action_close(struct msm_tsif_device *tsif_device) { dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__, (int)tsif_device->state); /* turn off the GPIO's to prevent new data from entering */ tsif_stop_gpios(tsif_device); /* we unfortunately must sleep here to give the ADM time to * complete any outstanding reads after the GPIO's are turned * off. There is no indication from the ADM hardware that * there are any outstanding reads on the bus, and if we * stop the TSIF too quickly, it can cause a bus error. */ msleep(250); /* now we can stop the core */ tsif_stop_hw(tsif_device); tsif_dma_exit(tsif_device); tsif_clock(tsif_device, 0); disable_irq(tsif_device->irq); pm_runtime_put(&tsif_device->pdev->dev); wake_unlock(&tsif_device->wake_lock); return 0; } static struct { int (*func)(struct msm_tsif_device *); const char *name; } actions[] = { { action_open, "open"}, { action_close, "close"}, }; static ssize_t tsif_debugfs_action_write(struct file *filp, const char __user *userbuf, size_t count, loff_t *f_pos) { int i; struct msm_tsif_device *tsif_device = filp->private_data; char s[40]; int len = min(sizeof(s) - 1, count); if (copy_from_user(s, userbuf, len)) return -EFAULT; s[len] = '\0'; dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s); for (i = 0; i < ARRAY_SIZE(actions); i++) { if (!strncmp(s, actions[i].name, min(count, strlen(actions[i].name)))) { int rc = actions[i].func(tsif_device); if (!rc) rc = count; return rc; } } return -EINVAL; } static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp) { filp->private_data = inode->i_private; return 0; } static const struct file_operations fops_debugfs_action = { .open = tsif_debugfs_generic_open, .write = tsif_debugfs_action_write, }; static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf, size_t count, loff_t *f_pos) { static char bufa[200]; static char *buf = bufa; int sz = sizeof(bufa); struct msm_tsif_device *tsif_device = filp->private_data; int len = 0; if (tsif_device) { int i; len += snprintf(buf + len, sz - len, "ri %3d | wi %3d | dmwi %3d |", tsif_device->ri, tsif_device->wi, tsif_device->dmwi); for (i = 0; i < 2; i++) { struct tsif_xfer *xfer = &tsif_device->xfer[i]; if (xfer->busy) { u32 dst = tsif_device->dmov_cmd[i]->box.dst_row_addr; u32 base = tsif_device->data_buffer_dma; int w = (dst - base) / TSIF_PKT_SIZE; len += snprintf(buf + len, sz - len, " [%3d]{%3d}", w, xfer->wi); } else { len += snprintf(buf + len, sz - len, " ---idle---"); } } len += snprintf(buf + len, sz - len, "\n"); } else { len += snprintf(buf + len, sz - len, "No TSIF device???\n"); } return simple_read_from_buffer(userbuf, count, f_pos, buf, len); } static const struct file_operations fops_debugfs_dma = { .open = tsif_debugfs_generic_open, .read = tsif_debugfs_dma_read, }; static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf, size_t count, loff_t *f_pos) { static char bufa[300]; static char *buf = bufa; int sz = sizeof(bufa); struct msm_tsif_device *tsif_device = filp->private_data; int len = 0; if (tsif_device) { struct msm_tsif_platform_data *pdata = tsif_device->pdev->dev.platform_data; int i; for (i = 0; i < pdata->num_gpios; i++) { if (pdata->gpios[i].gpio_cfg) { int x = !!gpio_get_value(GPIO_PIN( pdata->gpios[i].gpio_cfg)); len += snprintf(buf + len, sz - len, "%15s: %d\n", pdata->gpios[i].label, x); } } } else { len += snprintf(buf + len, sz - len, "No TSIF device???\n"); } return simple_read_from_buffer(userbuf, count, f_pos, buf, len); } static const struct file_operations fops_debugfs_gpios = { .open = tsif_debugfs_generic_open, .read = tsif_debugfs_gpios_read, }; static void tsif_debugfs_init(struct msm_tsif_device *tsif_device) { tsif_device->dent_tsif = debugfs_create_dir( dev_name(&tsif_device->pdev->dev), NULL); if (tsif_device->dent_tsif) { int i; void __iomem *base = tsif_device->base; for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) { tsif_device->debugfs_tsif_regs[i] = debugfs_create_iomem_x32( debugfs_tsif_regs[i].name, debugfs_tsif_regs[i].mode, tsif_device->dent_tsif, base + debugfs_tsif_regs[i].offset); } tsif_device->debugfs_gpio = debugfs_create_file("gpios", S_IRUGO, tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios); tsif_device->debugfs_action = debugfs_create_file("action", S_IWUSR, tsif_device->dent_tsif, tsif_device, &fops_debugfs_action); tsif_device->debugfs_dma = debugfs_create_file("dma", S_IRUGO, tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma); tsif_device->debugfs_databuf = debugfs_create_blob("data_buf", S_IRUGO, tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf); } } static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device) { if (tsif_device->dent_tsif) { int i; debugfs_remove_recursive(tsif_device->dent_tsif); tsif_device->dent_tsif = NULL; for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) tsif_device->debugfs_tsif_regs[i] = NULL; tsif_device->debugfs_gpio = NULL; tsif_device->debugfs_action = NULL; tsif_device->debugfs_dma = NULL; tsif_device->debugfs_databuf = NULL; } } /* ===debugfs end=== */ /* ===module begin=== */ static LIST_HEAD(tsif_devices); static struct msm_tsif_device *tsif_find_by_id(int id) { struct msm_tsif_device *tsif_device; list_for_each_entry(tsif_device, &tsif_devices, devlist) { if (tsif_device->pdev->id == id) return tsif_device; } return NULL; } static int __devinit msm_tsif_probe(struct platform_device *pdev) { int rc = -ENODEV; struct msm_tsif_platform_data *plat = pdev->dev.platform_data; struct msm_tsif_device *tsif_device; struct resource *res; /* check device validity */ /* must have platform data */ if (!plat) { dev_err(&pdev->dev, "Platform data not available\n"); rc = -EINVAL; goto out; } if ((pdev->id < 0) || (pdev->id > TSIF_MAX_ID)) { dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id); rc = -EINVAL; goto out; } /* OK, we will use this device */ tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL); if (!tsif_device) { dev_err(&pdev->dev, "Failed to allocate memory for device\n"); rc = -ENOMEM; goto out; } /* cross links */ tsif_device->pdev = pdev; platform_set_drvdata(pdev, tsif_device); tsif_device->mode = 1; tsif_device->clock_inverse = 0; tsif_device->data_inverse = 0; tsif_device->sync_inverse = 0; tsif_device->enable_inverse = 0; tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT; tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT; tasklet_init(&tsif_device->dma_refill, tsif_dma_refill, (unsigned long)tsif_device); tasklet_init(&tsif_device->clocks_off, tsif_clocks_off, (unsigned long)tsif_device); if (tsif_get_clocks(tsif_device)) goto err_clocks; /* map I/O memory */ tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!tsif_device->memres) { dev_err(&pdev->dev, "Missing MEM resource\n"); rc = -ENXIO; goto err_rgn; } res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(&pdev->dev, "Missing DMA resource\n"); rc = -ENXIO; goto err_rgn; } tsif_device->dma = res->start; tsif_device->crci = res->end; tsif_device->base = ioremap(tsif_device->memres->start, resource_size(tsif_device->memres)); if (!tsif_device->base) { dev_err(&pdev->dev, "ioremap failed\n"); goto err_ioremap; } dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n", tsif_device->memres->start, tsif_device->base); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); tsif_debugfs_init(tsif_device); rc = platform_get_irq(pdev, 0); if (rc > 0) { tsif_device->irq = rc; rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED, dev_name(&pdev->dev), tsif_device); disable_irq(tsif_device->irq); } if (rc) { dev_err(&pdev->dev, "failed to request IRQ %d : %d\n", tsif_device->irq, rc); goto err_irq; } rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp); if (rc) { dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc); goto err_attrs; } wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND, dev_name(&pdev->dev)); dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n", tsif_device->irq, tsif_device->memres->start, tsif_device->dma, tsif_device->crci); list_add(&tsif_device->devlist, &tsif_devices); return 0; /* error path */ sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp); err_attrs: free_irq(tsif_device->irq, tsif_device); err_irq: tsif_debugfs_exit(tsif_device); iounmap(tsif_device->base); err_ioremap: err_rgn: tsif_put_clocks(tsif_device); err_clocks: kfree(tsif_device); out: return rc; } static int __devexit msm_tsif_remove(struct platform_device *pdev) { struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev); dev_info(&pdev->dev, "Unload\n"); list_del(&tsif_device->devlist); wake_lock_destroy(&tsif_device->wake_lock); sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp); free_irq(tsif_device->irq, tsif_device); tsif_debugfs_exit(tsif_device); tsif_dma_exit(tsif_device); tsif_stop_gpios(tsif_device); iounmap(tsif_device->base); tsif_put_clocks(tsif_device); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); kfree(tsif_device); return 0; } static int tsif_runtime_suspend(struct device *dev) { dev_dbg(dev, "pm_runtime: suspending...\n"); return 0; } static int tsif_runtime_resume(struct device *dev) { dev_dbg(dev, "pm_runtime: resuming...\n"); return 0; } static const struct dev_pm_ops tsif_dev_pm_ops = { .runtime_suspend = tsif_runtime_suspend, .runtime_resume = tsif_runtime_resume, }; static struct platform_driver msm_tsif_driver = { .probe = msm_tsif_probe, .remove = __exit_p(msm_tsif_remove), .driver = { .name = "msm_tsif", .pm = &tsif_dev_pm_ops, }, }; static int __init mod_init(void) { int rc = platform_driver_register(&msm_tsif_driver); if (rc) pr_err("TSIF: platform_driver_register failed: %d\n", rc); return rc; } static void __exit mod_exit(void) { platform_driver_unregister(&msm_tsif_driver); } /* ===module end=== */ /* public API */ int tsif_get_active(void) { struct msm_tsif_device *tsif_device; list_for_each_entry(tsif_device, &tsif_devices, devlist) { return tsif_device->pdev->id; } return -ENODEV; } EXPORT_SYMBOL(tsif_get_active); void *tsif_attach(int id, void (*notify)(void *client_data), void *data) { struct msm_tsif_device *tsif_device = tsif_find_by_id(id); if (!tsif_device) return ERR_PTR(-ENODEV); if (tsif_device->client_notify || tsif_device->client_data) return ERR_PTR(-EBUSY); tsif_device->client_notify = notify; tsif_device->client_data = data; /* prevent from unloading */ get_device(&tsif_device->pdev->dev); return tsif_device; } EXPORT_SYMBOL(tsif_attach); void tsif_detach(void *cookie) { struct msm_tsif_device *tsif_device = cookie; tsif_device->client_notify = NULL; tsif_device->client_data = NULL; put_device(&tsif_device->pdev->dev); } EXPORT_SYMBOL(tsif_detach); void tsif_get_info(void *cookie, void **pdata, int *psize) { struct msm_tsif_device *tsif_device = cookie; if (pdata) *pdata = tsif_device->data_buffer; if (psize) *psize = TSIF_PKTS_IN_BUF; } EXPORT_SYMBOL(tsif_get_info); int tsif_set_mode(void *cookie, int mode) { struct msm_tsif_device *tsif_device = cookie; if (tsif_device->state != tsif_state_stopped) { dev_err(&tsif_device->pdev->dev, "Can't change mode while device is active\n"); return -EBUSY; } switch (mode) { case 1: case 2: case 3: tsif_device->mode = mode; break; default: dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode); return -EINVAL; } return 0; } EXPORT_SYMBOL(tsif_set_mode); int tsif_set_time_limit(void *cookie, u32 value) { struct msm_tsif_device *tsif_device = cookie; if (tsif_device->state != tsif_state_stopped) { dev_err(&tsif_device->pdev->dev, "Can't change time limit while device is active\n"); return -EBUSY; } if (value != (value & 0xFFFFFF)) { dev_err(&tsif_device->pdev->dev, "Invalid time limit (should be 24 bit): %#x\n", value); return -EINVAL; } tsif_device->time_limit = value; return 0; } EXPORT_SYMBOL(tsif_set_time_limit); int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf) { struct msm_tsif_device *tsif_device = cookie; if (tsif_device->data_buffer) { dev_err(&tsif_device->pdev->dev, "Data buffer already allocated: %p\n", tsif_device->data_buffer); return -EBUSY; } /* check for crazy user */ if (pkts_in_chunk * chunks_in_buf > 10240) { dev_err(&tsif_device->pdev->dev, "Buffer requested is too large: %d * %d\n", pkts_in_chunk, chunks_in_buf); return -EINVAL; } /* parameters are OK, execute */ tsif_device->pkts_per_chunk = pkts_in_chunk; tsif_device->chunks_per_buf = chunks_in_buf; return 0; } EXPORT_SYMBOL(tsif_set_buf_config); int tsif_set_clk_inverse(void *cookie, int value) { struct msm_tsif_device *tsif_device = cookie; if (tsif_device->state != tsif_state_stopped) { dev_err(&tsif_device->pdev->dev, "Can't change clock inverse while device is active\n"); return -EBUSY; } if ((value != 0) && (value != 1)) { dev_err(&tsif_device->pdev->dev, "Invalid parameter, either 0 or 1: %#x\n", value); return -EINVAL; } tsif_device->clock_inverse = value; return 0; } EXPORT_SYMBOL(tsif_set_clk_inverse); int tsif_set_data_inverse(void *cookie, int value) { struct msm_tsif_device *tsif_device = cookie; if (tsif_device->state != tsif_state_stopped) { dev_err(&tsif_device->pdev->dev, "Can't change data inverse while device is active\n"); return -EBUSY; } if ((value != 0) && (value != 1)) { dev_err(&tsif_device->pdev->dev, "Invalid parameter, either 0 or 1: %#x\n", value); return -EINVAL; } tsif_device->data_inverse = value; return 0; } EXPORT_SYMBOL(tsif_set_data_inverse); int tsif_set_sync_inverse(void *cookie, int value) { struct msm_tsif_device *tsif_device = cookie; if (tsif_device->state != tsif_state_stopped) { dev_err(&tsif_device->pdev->dev, "Can't change sync inverse while device is active\n"); return -EBUSY; } if ((value != 0) && (value != 1)) { dev_err(&tsif_device->pdev->dev, "Invalid parameter, either 0 or 1: %#x\n", value); return -EINVAL; } tsif_device->sync_inverse = value; return 0; } EXPORT_SYMBOL(tsif_set_sync_inverse); int tsif_set_enable_inverse(void *cookie, int value) { struct msm_tsif_device *tsif_device = cookie; if (tsif_device->state != tsif_state_stopped) { dev_err(&tsif_device->pdev->dev, "Can't change enable inverse while device is active\n"); return -EBUSY; } if ((value != 0) && (value != 1)) { dev_err(&tsif_device->pdev->dev, "Invalid parameter, either 0 or 1: %#x\n", value); return -EINVAL; } tsif_device->enable_inverse = value; return 0; } EXPORT_SYMBOL(tsif_set_enable_inverse); void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state) { struct msm_tsif_device *tsif_device = cookie; if (ri) *ri = tsif_device->ri; if (wi) *wi = tsif_device->wi; if (state) *state = tsif_device->state; } EXPORT_SYMBOL(tsif_get_state); int tsif_start(void *cookie) { struct msm_tsif_device *tsif_device = cookie; return action_open(tsif_device); } EXPORT_SYMBOL(tsif_start); void tsif_stop(void *cookie) { struct msm_tsif_device *tsif_device = cookie; action_close(tsif_device); } EXPORT_SYMBOL(tsif_stop); int tsif_get_ref_clk_counter(void *cookie, u32 *tcr_counter) { struct msm_tsif_device *tsif_device = cookie; if (!tsif_device || !tcr_counter) return -EINVAL; if (tsif_device->state == tsif_state_running) *tcr_counter = ioread32(tsif_device->base + TSIF_CLK_REF_OFF); else *tcr_counter = 0; return 0; } EXPORT_SYMBOL(tsif_get_ref_clk_counter); void tsif_reclaim_packets(void *cookie, int read_index) { struct msm_tsif_device *tsif_device = cookie; tsif_device->ri = read_index; } EXPORT_SYMBOL(tsif_reclaim_packets); module_init(mod_init); module_exit(mod_exit); MODULE_DESCRIPTION("TSIF (Transport Stream Interface)" " Driver for the MSM chipset"); MODULE_LICENSE("GPL v2");
gpl-2.0
aksalj/kernel_rpi
drivers/media/platform/exynos4-is/fimc-lite-reg.c
998
9600
/* * Register interface file for EXYNOS FIMC-LITE (camera interface) driver * * Copyright (C) 2012 Samsung Electronics Co., Ltd. * Author: Sylwester Nawrocki <s.nawrocki@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/io.h> #include <media/exynos-fimc.h> #include "fimc-lite-reg.h" #include "fimc-lite.h" #include "fimc-core.h" #define FLITE_RESET_TIMEOUT 50 /* in ms */ void flite_hw_reset(struct fimc_lite *dev) { unsigned long end = jiffies + msecs_to_jiffies(FLITE_RESET_TIMEOUT); u32 cfg; cfg = readl(dev->regs + FLITE_REG_CIGCTRL); cfg |= FLITE_REG_CIGCTRL_SWRST_REQ; writel(cfg, dev->regs + FLITE_REG_CIGCTRL); while (time_is_after_jiffies(end)) { cfg = readl(dev->regs + FLITE_REG_CIGCTRL); if (cfg & FLITE_REG_CIGCTRL_SWRST_RDY) break; usleep_range(1000, 5000); } cfg |= FLITE_REG_CIGCTRL_SWRST; writel(cfg, dev->regs + FLITE_REG_CIGCTRL); } void flite_hw_clear_pending_irq(struct fimc_lite *dev) { u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS); cfg &= ~FLITE_REG_CISTATUS_IRQ_CAM; writel(cfg, dev->regs + FLITE_REG_CISTATUS); } u32 flite_hw_get_interrupt_source(struct fimc_lite *dev) { u32 intsrc = readl(dev->regs + FLITE_REG_CISTATUS); return intsrc & FLITE_REG_CISTATUS_IRQ_MASK; } void flite_hw_clear_last_capture_end(struct fimc_lite *dev) { u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS2); cfg &= ~FLITE_REG_CISTATUS2_LASTCAPEND; writel(cfg, dev->regs + FLITE_REG_CISTATUS2); } void flite_hw_set_interrupt_mask(struct fimc_lite *dev) { u32 cfg, intsrc; /* Select interrupts to be enabled for each output mode */ if (atomic_read(&dev->out_path) == FIMC_IO_DMA) { intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN | FLITE_REG_CIGCTRL_IRQ_LASTEN | FLITE_REG_CIGCTRL_IRQ_STARTEN | FLITE_REG_CIGCTRL_IRQ_ENDEN; } else { /* An output to the FIMC-IS */ intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN | FLITE_REG_CIGCTRL_IRQ_LASTEN; } cfg = readl(dev->regs + FLITE_REG_CIGCTRL); cfg |= FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK; cfg &= ~intsrc; writel(cfg, dev->regs + FLITE_REG_CIGCTRL); } void flite_hw_capture_start(struct fimc_lite *dev) { u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT); cfg |= FLITE_REG_CIIMGCPT_IMGCPTEN; writel(cfg, dev->regs + FLITE_REG_CIIMGCPT); } void flite_hw_capture_stop(struct fimc_lite *dev) { u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT); cfg &= ~FLITE_REG_CIIMGCPT_IMGCPTEN; writel(cfg, dev->regs + FLITE_REG_CIIMGCPT); } /* * Test pattern (color bars) enable/disable. External sensor * pixel clock must be active for the test pattern to work. */ void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on) { u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL); if (on) cfg |= FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR; else cfg &= ~FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR; writel(cfg, dev->regs + FLITE_REG_CIGCTRL); } static const u32 src_pixfmt_map[8][3] = { { MEDIA_BUS_FMT_YUYV8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR, FLITE_REG_CIGCTRL_YUV422_1P }, { MEDIA_BUS_FMT_YVYU8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB, FLITE_REG_CIGCTRL_YUV422_1P }, { MEDIA_BUS_FMT_UYVY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY, FLITE_REG_CIGCTRL_YUV422_1P }, { MEDIA_BUS_FMT_VYUY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY, FLITE_REG_CIGCTRL_YUV422_1P }, { MEDIA_BUS_FMT_SGRBG8_1X8, 0, FLITE_REG_CIGCTRL_RAW8 }, { MEDIA_BUS_FMT_SGRBG10_1X10, 0, FLITE_REG_CIGCTRL_RAW10 }, { MEDIA_BUS_FMT_SGRBG12_1X12, 0, FLITE_REG_CIGCTRL_RAW12 }, { MEDIA_BUS_FMT_JPEG_1X8, 0, FLITE_REG_CIGCTRL_USER(1) }, }; /* Set camera input pixel format and resolution */ void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f) { u32 pixelcode = f->fmt->mbus_code; int i = ARRAY_SIZE(src_pixfmt_map); u32 cfg; while (--i) { if (src_pixfmt_map[i][0] == pixelcode) break; } if (i == 0 && src_pixfmt_map[i][0] != pixelcode) { v4l2_err(&dev->ve.vdev, "Unsupported pixel code, falling back to %#08x\n", src_pixfmt_map[i][0]); } cfg = readl(dev->regs + FLITE_REG_CIGCTRL); cfg &= ~FLITE_REG_CIGCTRL_FMT_MASK; cfg |= src_pixfmt_map[i][2]; writel(cfg, dev->regs + FLITE_REG_CIGCTRL); cfg = readl(dev->regs + FLITE_REG_CISRCSIZE); cfg &= ~(FLITE_REG_CISRCSIZE_ORDER422_MASK | FLITE_REG_CISRCSIZE_SIZE_CAM_MASK); cfg |= (f->f_width << 16) | f->f_height; cfg |= src_pixfmt_map[i][1]; writel(cfg, dev->regs + FLITE_REG_CISRCSIZE); } /* Set the camera host input window offsets (cropping) */ void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f) { u32 hoff2, voff2; u32 cfg; cfg = readl(dev->regs + FLITE_REG_CIWDOFST); cfg &= ~FLITE_REG_CIWDOFST_OFST_MASK; cfg |= (f->rect.left << 16) | f->rect.top; cfg |= FLITE_REG_CIWDOFST_WINOFSEN; writel(cfg, dev->regs + FLITE_REG_CIWDOFST); hoff2 = f->f_width - f->rect.width - f->rect.left; voff2 = f->f_height - f->rect.height - f->rect.top; cfg = (hoff2 << 16) | voff2; writel(cfg, dev->regs + FLITE_REG_CIWDOFST2); } /* Select camera port (A, B) */ static void flite_hw_set_camera_port(struct fimc_lite *dev, int id) { u32 cfg = readl(dev->regs + FLITE_REG_CIGENERAL); if (id == 0) cfg &= ~FLITE_REG_CIGENERAL_CAM_B; else cfg |= FLITE_REG_CIGENERAL_CAM_B; writel(cfg, dev->regs + FLITE_REG_CIGENERAL); } /* Select serial or parallel bus, camera port (A,B) and set signals polarity */ void flite_hw_set_camera_bus(struct fimc_lite *dev, struct fimc_source_info *si) { u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL); unsigned int flags = si->flags; if (si->sensor_bus_type != FIMC_BUS_TYPE_MIPI_CSI2) { cfg &= ~(FLITE_REG_CIGCTRL_SELCAM_MIPI | FLITE_REG_CIGCTRL_INVPOLPCLK | FLITE_REG_CIGCTRL_INVPOLVSYNC | FLITE_REG_CIGCTRL_INVPOLHREF); if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) cfg |= FLITE_REG_CIGCTRL_INVPOLPCLK; if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) cfg |= FLITE_REG_CIGCTRL_INVPOLVSYNC; if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) cfg |= FLITE_REG_CIGCTRL_INVPOLHREF; } else { cfg |= FLITE_REG_CIGCTRL_SELCAM_MIPI; } writel(cfg, dev->regs + FLITE_REG_CIGCTRL); flite_hw_set_camera_port(dev, si->mux_id); } static void flite_hw_set_pack12(struct fimc_lite *dev, int on) { u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT); cfg &= ~FLITE_REG_CIODMAFMT_PACK12; if (on) cfg |= FLITE_REG_CIODMAFMT_PACK12; writel(cfg, dev->regs + FLITE_REG_CIODMAFMT); } static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f) { static const u32 pixcode[4][2] = { { MEDIA_BUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR }, { MEDIA_BUS_FMT_YVYU8_2X8, FLITE_REG_CIODMAFMT_YCRYCB }, { MEDIA_BUS_FMT_UYVY8_2X8, FLITE_REG_CIODMAFMT_CBYCRY }, { MEDIA_BUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY }, }; u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT); int i = ARRAY_SIZE(pixcode); while (--i) if (pixcode[i][0] == f->fmt->mbus_code) break; cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK; writel(cfg | pixcode[i][1], dev->regs + FLITE_REG_CIODMAFMT); } void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f) { u32 cfg; /* Maximum output pixel size */ cfg = readl(dev->regs + FLITE_REG_CIOCAN); cfg &= ~FLITE_REG_CIOCAN_MASK; cfg = (f->f_height << 16) | f->f_width; writel(cfg, dev->regs + FLITE_REG_CIOCAN); /* DMA offsets */ cfg = readl(dev->regs + FLITE_REG_CIOOFF); cfg &= ~FLITE_REG_CIOOFF_MASK; cfg |= (f->rect.top << 16) | f->rect.left; writel(cfg, dev->regs + FLITE_REG_CIOOFF); } void flite_hw_set_dma_buffer(struct fimc_lite *dev, struct flite_buffer *buf) { unsigned int index; u32 cfg; if (dev->dd->max_dma_bufs == 1) index = 0; else index = buf->index; if (index == 0) writel(buf->paddr, dev->regs + FLITE_REG_CIOSA); else writel(buf->paddr, dev->regs + FLITE_REG_CIOSAN(index - 1)); cfg = readl(dev->regs + FLITE_REG_CIFCNTSEQ); cfg |= BIT(index); writel(cfg, dev->regs + FLITE_REG_CIFCNTSEQ); } void flite_hw_mask_dma_buffer(struct fimc_lite *dev, u32 index) { u32 cfg; if (dev->dd->max_dma_bufs == 1) index = 0; cfg = readl(dev->regs + FLITE_REG_CIFCNTSEQ); cfg &= ~BIT(index); writel(cfg, dev->regs + FLITE_REG_CIFCNTSEQ); } /* Enable/disable output DMA, set output pixel size and offsets (composition) */ void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f, bool enable) { u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL); if (!enable) { cfg |= FLITE_REG_CIGCTRL_ODMA_DISABLE; writel(cfg, dev->regs + FLITE_REG_CIGCTRL); return; } cfg &= ~FLITE_REG_CIGCTRL_ODMA_DISABLE; writel(cfg, dev->regs + FLITE_REG_CIGCTRL); flite_hw_set_out_order(dev, f); flite_hw_set_dma_window(dev, f); flite_hw_set_pack12(dev, 0); } void flite_hw_dump_regs(struct fimc_lite *dev, const char *label) { struct { u32 offset; const char * const name; } registers[] = { { 0x00, "CISRCSIZE" }, { 0x04, "CIGCTRL" }, { 0x08, "CIIMGCPT" }, { 0x0c, "CICPTSEQ" }, { 0x10, "CIWDOFST" }, { 0x14, "CIWDOFST2" }, { 0x18, "CIODMAFMT" }, { 0x20, "CIOCAN" }, { 0x24, "CIOOFF" }, { 0x30, "CIOSA" }, { 0x40, "CISTATUS" }, { 0x44, "CISTATUS2" }, { 0xf0, "CITHOLD" }, { 0xfc, "CIGENERAL" }, }; u32 i; v4l2_info(&dev->subdev, "--- %s ---\n", label); for (i = 0; i < ARRAY_SIZE(registers); i++) { u32 cfg = readl(dev->regs + registers[i].offset); v4l2_info(&dev->subdev, "%9s: 0x%08x\n", registers[i].name, cfg); } }
gpl-2.0