repo_name
string
path
string
copies
string
size
string
content
string
license
string
scottellis/linux-pansenti
sound/isa/wavefront/wavefront_fx.c
5237
6319
/* * Copyright (c) 1998-2002 by Paul Davis <pbd@op.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/io.h> #include <linux/init.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/firmware.h> #include <sound/core.h> #include <sound/snd_wavefront.h> #include <sound/initval.h> /* Control bits for the Load Control Register */ #define FX_LSB_TRANSFER 0x01 /* transfer after DSP LSB byte written */ #define FX_MSB_TRANSFER 0x02 /* transfer after DSP MSB byte written */ #define FX_AUTO_INCR 0x04 /* auto-increment DSP address after transfer */ #define WAIT_IDLE 0xff static int wavefront_fx_idle (snd_wavefront_t *dev) { int i; unsigned int x = 0x80; for (i = 0; i < 1000; i++) { x = inb (dev->fx_status); if ((x & 0x80) == 0) { break; } } if (x & 0x80) { snd_printk ("FX device never idle.\n"); return 0; } return (1); } static void wavefront_fx_mute (snd_wavefront_t *dev, int onoff) { if (!wavefront_fx_idle(dev)) { return; } outb (onoff ? 0x02 : 0x00, dev->fx_op); } static int wavefront_fx_memset (snd_wavefront_t *dev, int page, int addr, int cnt, unsigned short *data) { if (page < 0 || page > 7) { snd_printk ("FX memset: " "page must be >= 0 and <= 7\n"); return -(EINVAL); } if (addr < 0 || addr > 0x7f) { snd_printk ("FX memset: " "addr must be >= 0 and <= 7f\n"); return -(EINVAL); } if (cnt == 1) { outb (FX_LSB_TRANSFER, dev->fx_lcr); outb (page, dev->fx_dsp_page); outb (addr, dev->fx_dsp_addr); outb ((data[0] >> 8), dev->fx_dsp_msb); outb ((data[0] & 0xff), dev->fx_dsp_lsb); snd_printk ("FX: addr %d:%x set to 0x%x\n", page, addr, data[0]); } else { int i; outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev->fx_lcr); outb (page, dev->fx_dsp_page); outb (addr, dev->fx_dsp_addr); for (i = 0; i < cnt; i++) { outb ((data[i] >> 8), dev->fx_dsp_msb); outb ((data[i] & 0xff), dev->fx_dsp_lsb); if (!wavefront_fx_idle (dev)) { break; } } if (i != cnt) { snd_printk ("FX memset " "(0x%x, 0x%x, 0x%lx, %d) incomplete\n", page, addr, (unsigned long) data, cnt); return -(EIO); } } return 0; } int snd_wavefront_fx_detect (snd_wavefront_t *dev) { /* This is a crude check, but its the best one I have for now. Certainly on the Maui and the Tropez, wavefront_fx_idle() will report "never idle", which suggests that this test should work OK. */ if (inb (dev->fx_status) & 0x80) { snd_printk ("Hmm, probably a Maui or Tropez.\n"); return -1; } return 0; } int snd_wavefront_fx_open (struct snd_hwdep *hw, struct file *file) { if (!try_module_get(hw->card->module)) return -EFAULT; file->private_data = hw; return 0; } int snd_wavefront_fx_release (struct snd_hwdep *hw, struct file *file) { module_put(hw->card->module); return 0; } int snd_wavefront_fx_ioctl (struct snd_hwdep *sdev, struct file *file, unsigned int cmd, unsigned long arg) { struct snd_card *card; snd_wavefront_card_t *acard; snd_wavefront_t *dev; wavefront_fx_info r; unsigned short *page_data = NULL; unsigned short *pd; int err = 0; card = sdev->card; if (snd_BUG_ON(!card)) return -ENODEV; if (snd_BUG_ON(!card->private_data)) return -ENODEV; acard = card->private_data; dev = &acard->wavefront; if (copy_from_user (&r, (void __user *)arg, sizeof (wavefront_fx_info))) return -EFAULT; switch (r.request) { case WFFX_MUTE: wavefront_fx_mute (dev, r.data[0]); return -EIO; case WFFX_MEMSET: if (r.data[2] <= 0) { snd_printk ("cannot write " "<= 0 bytes to FX\n"); return -EIO; } else if (r.data[2] == 1) { pd = (unsigned short *) &r.data[3]; } else { if (r.data[2] > 256) { snd_printk ("cannot write " "> 512 bytes to FX\n"); return -EIO; } page_data = memdup_user((unsigned char __user *) r.data[3], r.data[2] * sizeof(short)); if (IS_ERR(page_data)) return PTR_ERR(page_data); pd = page_data; } err = wavefront_fx_memset (dev, r.data[0], /* page */ r.data[1], /* addr */ r.data[2], /* cnt */ pd); kfree(page_data); break; default: snd_printk ("FX: ioctl %d not yet supported\n", r.request); return -ENOTTY; } return err; } /* YSS225 initialization. This code was developed using DOSEMU. The Turtle Beach SETUPSND utility was run with I/O tracing in DOSEMU enabled, and a reconstruction of the port I/O done, using the Yamaha faxback document as a guide to add more logic to the code. Its really pretty weird. This is the approach of just dumping the whole I/O sequence as a series of port/value pairs and a simple loop that outputs it. */ int __devinit snd_wavefront_fx_start (snd_wavefront_t *dev) { unsigned int i; int err; const struct firmware *firmware = NULL; if (dev->fx_initialized) return 0; err = request_firmware(&firmware, "yamaha/yss225_registers.bin", dev->card->dev); if (err < 0) { err = -1; goto out; } for (i = 0; i + 1 < firmware->size; i += 2) { if (firmware->data[i] >= 8 && firmware->data[i] < 16) { outb(firmware->data[i + 1], dev->base + firmware->data[i]); } else if (firmware->data[i] == WAIT_IDLE) { if (!wavefront_fx_idle(dev)) { err = -1; goto out; } } else { snd_printk(KERN_ERR "invalid address" " in register data\n"); err = -1; goto out; } } dev->fx_initialized = 1; err = 0; out: release_firmware(firmware); return err; } MODULE_FIRMWARE("yamaha/yss225_registers.bin");
gpl-2.0
tiny4579/android_kernel_oneplus_msm8974
drivers/scsi/qlogicfas408.c
12917
14930
/*----------------------------------------------------------------*/ /* Qlogic linux driver - work in progress. No Warranty express or implied. Use at your own risk. Support Tort Reform so you won't have to read all these silly disclaimers. Copyright 1994, Tom Zerucha. tz@execpc.com Additional Code, and much appreciated help by Michael A. Griffith grif@cs.ucr.edu Thanks to Eric Youngdale and Dave Hinds for loadable module and PCMCIA help respectively, and for suffering through my foolishness during the debugging process. Reference Qlogic FAS408 Technical Manual, 53408-510-00A, May 10, 1994 (you can reference it, but it is incomplete and inaccurate in places) Version 0.46 1/30/97 - kernel 1.2.0+ Functions as standalone, loadable, and PCMCIA driver, the latter from Dave Hinds' PCMCIA package. Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5 SCSI driver cleanup and audit. This driver still needs work on the following - Non terminating hardware waits - Some layering violations with its pcmcia stub Redistributable under terms of the GNU General Public License For the avoidance of doubt the "preferred form" of this code is one which is in an open non patent encumbered format. Where cryptographic key signing forms part of the process of creating an executable the information including keys needed to generate an equivalently functional executable are deemed to be part of the source code. */ #include <linux/module.h> #include <linux/blkdev.h> /* to get disk capacity */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/unistd.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/dma.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "qlogicfas408.h" /*----------------------------------------------------------------*/ static int qlcfg5 = (XTALFREQ << 5); /* 15625/512 */ static int qlcfg6 = SYNCXFRPD; static int qlcfg7 = SYNCOFFST; static int qlcfg8 = (SLOWCABLE << 7) | (QL_ENABLE_PARITY << 4); static int qlcfg9 = ((XTALFREQ + 4) / 5); static int qlcfgc = (FASTCLK << 3) | (FASTSCSI << 4); /*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/ /* local functions */ /*----------------------------------------------------------------*/ /* error recovery - reset everything */ static void ql_zap(struct qlogicfas408_priv *priv) { int x; int qbase = priv->qbase; int int_type = priv->int_type; x = inb(qbase + 0xd); REG0; outb(3, qbase + 3); /* reset SCSI */ outb(2, qbase + 3); /* reset chip */ if (x & 0x80) REG1; } /* * Do a pseudo-dma tranfer */ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int reqlen) { int j; int qbase = priv->qbase; j = 0; if (phase & 1) { /* in */ #if QL_TURBO_PDMA rtrc(4) /* empty fifo in large chunks */ if (reqlen >= 128 && (inb(qbase + 8) & 2)) { /* full */ insl(qbase + 4, request, 32); reqlen -= 128; request += 128; } while (reqlen >= 84 && !(j & 0xc0)) /* 2/3 */ if ((j = inb(qbase + 8)) & 4) { insl(qbase + 4, request, 21); reqlen -= 84; request += 84; } if (reqlen >= 44 && (inb(qbase + 8) & 8)) { /* 1/3 */ insl(qbase + 4, request, 11); reqlen -= 44; request += 44; } #endif /* until both empty and int (or until reclen is 0) */ rtrc(7) j = 0; while (reqlen && !((j & 0x10) && (j & 0xc0))) { /* while bytes to receive and not empty */ j &= 0xc0; while (reqlen && !((j = inb(qbase + 8)) & 0x10)) { *request++ = inb(qbase + 4); reqlen--; } if (j & 0x10) j = inb(qbase + 8); } } else { /* out */ #if QL_TURBO_PDMA rtrc(4) if (reqlen >= 128 && inb(qbase + 8) & 0x10) { /* empty */ outsl(qbase + 4, request, 32); reqlen -= 128; request += 128; } while (reqlen >= 84 && !(j & 0xc0)) /* 1/3 */ if (!((j = inb(qbase + 8)) & 8)) { outsl(qbase + 4, request, 21); reqlen -= 84; request += 84; } if (reqlen >= 40 && !(inb(qbase + 8) & 4)) { /* 2/3 */ outsl(qbase + 4, request, 10); reqlen -= 40; request += 40; } #endif /* until full and int (or until reclen is 0) */ rtrc(7) j = 0; while (reqlen && !((j & 2) && (j & 0xc0))) { /* while bytes to send and not full */ while (reqlen && !((j = inb(qbase + 8)) & 2)) { outb(*request++, qbase + 4); reqlen--; } if (j & 2) j = inb(qbase + 8); } } /* maybe return reqlen */ return inb(qbase + 8) & 0xc0; } /* * Wait for interrupt flag (polled - not real hardware interrupt) */ static int ql_wai(struct qlogicfas408_priv *priv) { int k; int qbase = priv->qbase; unsigned long i; k = 0; i = jiffies + WATCHDOG; while (time_before(jiffies, i) && !priv->qabort && !((k = inb(qbase + 4)) & 0xe0)) { barrier(); cpu_relax(); } if (time_after_eq(jiffies, i)) return (DID_TIME_OUT); if (priv->qabort) return (priv->qabort == 1 ? DID_ABORT : DID_RESET); if (k & 0x60) ql_zap(priv); if (k & 0x20) return (DID_PARITY); if (k & 0x40) return (DID_ERROR); return 0; } /* * Initiate scsi command - queueing handler * caller must hold host lock */ static void ql_icmd(struct scsi_cmnd *cmd) { struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); int qbase = priv->qbase; int int_type = priv->int_type; unsigned int i; priv->qabort = 0; REG0; /* clearing of interrupts and the fifo is needed */ inb(qbase + 5); /* clear interrupts */ if (inb(qbase + 5)) /* if still interrupting */ outb(2, qbase + 3); /* reset chip */ else if (inb(qbase + 7) & 0x1f) outb(1, qbase + 3); /* clear fifo */ while (inb(qbase + 5)); /* clear ints */ REG1; outb(1, qbase + 8); /* set for PIO pseudo DMA */ outb(0, qbase + 0xb); /* disable ints */ inb(qbase + 8); /* clear int bits */ REG0; outb(0x40, qbase + 0xb); /* enable features */ /* configurables */ outb(qlcfgc, qbase + 0xc); /* config: no reset interrupt, (initiator) bus id */ outb(0x40 | qlcfg8 | priv->qinitid, qbase + 8); outb(qlcfg7, qbase + 7); outb(qlcfg6, qbase + 6); /**/ outb(qlcfg5, qbase + 5); /* select timer */ outb(qlcfg9 & 7, qbase + 9); /* prescaler */ /* outb(0x99, qbase + 5); */ outb(scmd_id(cmd), qbase + 4); for (i = 0; i < cmd->cmd_len; i++) outb(cmd->cmnd[i], qbase + 2); priv->qlcmd = cmd; outb(0x41, qbase + 3); /* select and send command */ } /* * Process scsi command - usually after interrupt */ static unsigned int ql_pcmd(struct scsi_cmnd *cmd) { unsigned int i, j; unsigned long k; unsigned int result; /* ultimate return result */ unsigned int status; /* scsi returned status */ unsigned int message; /* scsi returned message */ unsigned int phase; /* recorded scsi phase */ unsigned int reqlen; /* total length of transfer */ char *buf; struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); int qbase = priv->qbase; int int_type = priv->int_type; rtrc(1) j = inb(qbase + 6); i = inb(qbase + 5); if (i == 0x20) { return (DID_NO_CONNECT << 16); } i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */ if (i != 0x18) { printk(KERN_ERR "Ql:Bad Interrupt status:%02x\n", i); ql_zap(priv); return (DID_BAD_INTR << 16); } j &= 7; /* j = inb( qbase + 7 ) >> 5; */ /* correct status is supposed to be step 4 */ /* it sometimes returns step 3 but with 0 bytes left to send */ /* We can try stuffing the FIFO with the max each time, but we will get a sequence of 3 if any bytes are left (but we do flush the FIFO anyway */ if (j != 3 && j != 4) { printk(KERN_ERR "Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n", j, i, inb(qbase + 7) & 0x1f); ql_zap(priv); return (DID_ERROR << 16); } result = DID_OK; if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */ outb(1, qbase + 3); /* clear fifo */ /* note that request_bufflen is the total xfer size when sg is used */ reqlen = scsi_bufflen(cmd); /* note that it won't work if transfers > 16M are requested */ if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */ struct scatterlist *sg; rtrc(2) outb(reqlen, qbase); /* low-mid xfer cnt */ outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */ outb(reqlen >> 16, qbase + 0xe); /* high xfer cnt */ outb(0x90, qbase + 3); /* command do xfer */ /* PIO pseudo DMA to buffer or sglist */ REG1; scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) { if (priv->qabort) { REG0; return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16); } buf = sg_virt(sg); if (ql_pdma(priv, phase, buf, sg->length)) break; } REG0; rtrc(2) /* * Wait for irq (split into second state of irq handler * if this can take time) */ if ((k = ql_wai(priv))) return (k << 16); k = inb(qbase + 5); /* should be 0x10, bus service */ } /* * Enter Status (and Message In) Phase */ k = jiffies + WATCHDOG; while (time_before(jiffies, k) && !priv->qabort && !(inb(qbase + 4) & 6)) cpu_relax(); /* wait for status phase */ if (time_after_eq(jiffies, k)) { ql_zap(priv); return (DID_TIME_OUT << 16); } /* FIXME: timeout ?? */ while (inb(qbase + 5)) cpu_relax(); /* clear pending ints */ if (priv->qabort) return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16); outb(0x11, qbase + 3); /* get status and message */ if ((k = ql_wai(priv))) return (k << 16); i = inb(qbase + 5); /* get chip irq stat */ j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */ status = inb(qbase + 2); message = inb(qbase + 2); /* * Should get function complete int if Status and message, else * bus serv if only status */ if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) { printk(KERN_ERR "Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j); result = DID_ERROR; } outb(0x12, qbase + 3); /* done, disconnect */ rtrc(1) if ((k = ql_wai(priv))) return (k << 16); /* * Should get bus service interrupt and disconnect interrupt */ i = inb(qbase + 5); /* should be bus service */ while (!priv->qabort && ((i & 0x20) != 0x20)) { barrier(); cpu_relax(); i |= inb(qbase + 5); } rtrc(0) if (priv->qabort) return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16); return (result << 16) | (message << 8) | (status & STATUS_MASK); } /* * Interrupt handler */ static void ql_ihandl(void *dev_id) { struct scsi_cmnd *icmd; struct Scsi_Host *host = dev_id; struct qlogicfas408_priv *priv = get_priv_by_host(host); int qbase = priv->qbase; REG0; if (!(inb(qbase + 4) & 0x80)) /* false alarm? */ return; if (priv->qlcmd == NULL) { /* no command to process? */ int i; i = 16; while (i-- && inb(qbase + 5)); /* maybe also ql_zap() */ return; } icmd = priv->qlcmd; icmd->result = ql_pcmd(icmd); priv->qlcmd = NULL; /* * If result is CHECK CONDITION done calls qcommand to request * sense */ (icmd->scsi_done) (icmd); } irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id) { unsigned long flags; struct Scsi_Host *host = dev_id; spin_lock_irqsave(host->host_lock, flags); ql_ihandl(dev_id); spin_unlock_irqrestore(host->host_lock, flags); return IRQ_HANDLED; } /* * Queued command */ static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd, void (*done) (struct scsi_cmnd *)) { struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); if (scmd_id(cmd) == priv->qinitid) { cmd->result = DID_BAD_TARGET << 16; done(cmd); return 0; } cmd->scsi_done = done; /* wait for the last command's interrupt to finish */ while (priv->qlcmd != NULL) { barrier(); cpu_relax(); } ql_icmd(cmd); return 0; } DEF_SCSI_QCMD(qlogicfas408_queuecommand) /* * Return bios parameters */ int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev, sector_t capacity, int ip[]) { /* This should mimic the DOS Qlogic driver's behavior exactly */ ip[0] = 0x40; ip[1] = 0x20; ip[2] = (unsigned long) capacity / (ip[0] * ip[1]); if (ip[2] > 1024) { ip[0] = 0xff; ip[1] = 0x3f; ip[2] = (unsigned long) capacity / (ip[0] * ip[1]); #if 0 if (ip[2] > 1023) ip[2] = 1023; #endif } return 0; } /* * Abort a command in progress */ int qlogicfas408_abort(struct scsi_cmnd *cmd) { struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); priv->qabort = 1; ql_zap(priv); return SUCCESS; } /* * Reset SCSI bus * FIXME: This function is invoked with cmd = NULL directly by * the PCMCIA qlogic_stub code. This wants fixing */ int qlogicfas408_bus_reset(struct scsi_cmnd *cmd) { struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); unsigned long flags; priv->qabort = 2; spin_lock_irqsave(cmd->device->host->host_lock, flags); ql_zap(priv); spin_unlock_irqrestore(cmd->device->host->host_lock, flags); return SUCCESS; } /* * Return info string */ const char *qlogicfas408_info(struct Scsi_Host *host) { struct qlogicfas408_priv *priv = get_priv_by_host(host); return priv->qinfo; } /* * Get type of chip */ int qlogicfas408_get_chip_type(int qbase, int int_type) { REG1; return inb(qbase + 0xe) & 0xf8; } /* * Perform initialization tasks */ void qlogicfas408_setup(int qbase, int id, int int_type) { outb(1, qbase + 8); /* set for PIO pseudo DMA */ REG0; outb(0x40 | qlcfg8 | id, qbase + 8); /* (ini) bus id, disable scsi rst */ outb(qlcfg5, qbase + 5); /* select timer */ outb(qlcfg9, qbase + 9); /* prescaler */ #if QL_RESET_AT_START outb(3, qbase + 3); REG1; /* FIXME: timeout */ while (inb(qbase + 0xf) & 4) cpu_relax(); REG0; #endif } /* * Checks if this is a QLogic FAS 408 */ int qlogicfas408_detect(int qbase, int int_type) { REG1; return (((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7) && ((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7)); } /* * Disable interrupts */ void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv) { int qbase = priv->qbase; int int_type = priv->int_type; REG1; outb(0, qbase + 0xb); /* disable ints */ } /* * Init and exit functions */ static int __init qlogicfas408_init(void) { return 0; } static void __exit qlogicfas408_exit(void) { } MODULE_AUTHOR("Tom Zerucha, Michael Griffith"); MODULE_DESCRIPTION("Driver for the Qlogic FAS SCSI controllers"); MODULE_LICENSE("GPL"); module_init(qlogicfas408_init); module_exit(qlogicfas408_exit); EXPORT_SYMBOL(qlogicfas408_info); EXPORT_SYMBOL(qlogicfas408_queuecommand); EXPORT_SYMBOL(qlogicfas408_abort); EXPORT_SYMBOL(qlogicfas408_bus_reset); EXPORT_SYMBOL(qlogicfas408_biosparam); EXPORT_SYMBOL(qlogicfas408_ihandl); EXPORT_SYMBOL(qlogicfas408_get_chip_type); EXPORT_SYMBOL(qlogicfas408_setup); EXPORT_SYMBOL(qlogicfas408_detect); EXPORT_SYMBOL(qlogicfas408_disable_ints);
gpl-2.0
djmatt604/android_kernel_samsung_note2jb
drivers/scsi/qlogicfas408.c
12917
14930
/*----------------------------------------------------------------*/ /* Qlogic linux driver - work in progress. No Warranty express or implied. Use at your own risk. Support Tort Reform so you won't have to read all these silly disclaimers. Copyright 1994, Tom Zerucha. tz@execpc.com Additional Code, and much appreciated help by Michael A. Griffith grif@cs.ucr.edu Thanks to Eric Youngdale and Dave Hinds for loadable module and PCMCIA help respectively, and for suffering through my foolishness during the debugging process. Reference Qlogic FAS408 Technical Manual, 53408-510-00A, May 10, 1994 (you can reference it, but it is incomplete and inaccurate in places) Version 0.46 1/30/97 - kernel 1.2.0+ Functions as standalone, loadable, and PCMCIA driver, the latter from Dave Hinds' PCMCIA package. Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5 SCSI driver cleanup and audit. This driver still needs work on the following - Non terminating hardware waits - Some layering violations with its pcmcia stub Redistributable under terms of the GNU General Public License For the avoidance of doubt the "preferred form" of this code is one which is in an open non patent encumbered format. Where cryptographic key signing forms part of the process of creating an executable the information including keys needed to generate an equivalently functional executable are deemed to be part of the source code. */ #include <linux/module.h> #include <linux/blkdev.h> /* to get disk capacity */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/unistd.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/dma.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "qlogicfas408.h" /*----------------------------------------------------------------*/ static int qlcfg5 = (XTALFREQ << 5); /* 15625/512 */ static int qlcfg6 = SYNCXFRPD; static int qlcfg7 = SYNCOFFST; static int qlcfg8 = (SLOWCABLE << 7) | (QL_ENABLE_PARITY << 4); static int qlcfg9 = ((XTALFREQ + 4) / 5); static int qlcfgc = (FASTCLK << 3) | (FASTSCSI << 4); /*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/ /* local functions */ /*----------------------------------------------------------------*/ /* error recovery - reset everything */ static void ql_zap(struct qlogicfas408_priv *priv) { int x; int qbase = priv->qbase; int int_type = priv->int_type; x = inb(qbase + 0xd); REG0; outb(3, qbase + 3); /* reset SCSI */ outb(2, qbase + 3); /* reset chip */ if (x & 0x80) REG1; } /* * Do a pseudo-dma tranfer */ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int reqlen) { int j; int qbase = priv->qbase; j = 0; if (phase & 1) { /* in */ #if QL_TURBO_PDMA rtrc(4) /* empty fifo in large chunks */ if (reqlen >= 128 && (inb(qbase + 8) & 2)) { /* full */ insl(qbase + 4, request, 32); reqlen -= 128; request += 128; } while (reqlen >= 84 && !(j & 0xc0)) /* 2/3 */ if ((j = inb(qbase + 8)) & 4) { insl(qbase + 4, request, 21); reqlen -= 84; request += 84; } if (reqlen >= 44 && (inb(qbase + 8) & 8)) { /* 1/3 */ insl(qbase + 4, request, 11); reqlen -= 44; request += 44; } #endif /* until both empty and int (or until reclen is 0) */ rtrc(7) j = 0; while (reqlen && !((j & 0x10) && (j & 0xc0))) { /* while bytes to receive and not empty */ j &= 0xc0; while (reqlen && !((j = inb(qbase + 8)) & 0x10)) { *request++ = inb(qbase + 4); reqlen--; } if (j & 0x10) j = inb(qbase + 8); } } else { /* out */ #if QL_TURBO_PDMA rtrc(4) if (reqlen >= 128 && inb(qbase + 8) & 0x10) { /* empty */ outsl(qbase + 4, request, 32); reqlen -= 128; request += 128; } while (reqlen >= 84 && !(j & 0xc0)) /* 1/3 */ if (!((j = inb(qbase + 8)) & 8)) { outsl(qbase + 4, request, 21); reqlen -= 84; request += 84; } if (reqlen >= 40 && !(inb(qbase + 8) & 4)) { /* 2/3 */ outsl(qbase + 4, request, 10); reqlen -= 40; request += 40; } #endif /* until full and int (or until reclen is 0) */ rtrc(7) j = 0; while (reqlen && !((j & 2) && (j & 0xc0))) { /* while bytes to send and not full */ while (reqlen && !((j = inb(qbase + 8)) & 2)) { outb(*request++, qbase + 4); reqlen--; } if (j & 2) j = inb(qbase + 8); } } /* maybe return reqlen */ return inb(qbase + 8) & 0xc0; } /* * Wait for interrupt flag (polled - not real hardware interrupt) */ static int ql_wai(struct qlogicfas408_priv *priv) { int k; int qbase = priv->qbase; unsigned long i; k = 0; i = jiffies + WATCHDOG; while (time_before(jiffies, i) && !priv->qabort && !((k = inb(qbase + 4)) & 0xe0)) { barrier(); cpu_relax(); } if (time_after_eq(jiffies, i)) return (DID_TIME_OUT); if (priv->qabort) return (priv->qabort == 1 ? DID_ABORT : DID_RESET); if (k & 0x60) ql_zap(priv); if (k & 0x20) return (DID_PARITY); if (k & 0x40) return (DID_ERROR); return 0; } /* * Initiate scsi command - queueing handler * caller must hold host lock */ static void ql_icmd(struct scsi_cmnd *cmd) { struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); int qbase = priv->qbase; int int_type = priv->int_type; unsigned int i; priv->qabort = 0; REG0; /* clearing of interrupts and the fifo is needed */ inb(qbase + 5); /* clear interrupts */ if (inb(qbase + 5)) /* if still interrupting */ outb(2, qbase + 3); /* reset chip */ else if (inb(qbase + 7) & 0x1f) outb(1, qbase + 3); /* clear fifo */ while (inb(qbase + 5)); /* clear ints */ REG1; outb(1, qbase + 8); /* set for PIO pseudo DMA */ outb(0, qbase + 0xb); /* disable ints */ inb(qbase + 8); /* clear int bits */ REG0; outb(0x40, qbase + 0xb); /* enable features */ /* configurables */ outb(qlcfgc, qbase + 0xc); /* config: no reset interrupt, (initiator) bus id */ outb(0x40 | qlcfg8 | priv->qinitid, qbase + 8); outb(qlcfg7, qbase + 7); outb(qlcfg6, qbase + 6); /**/ outb(qlcfg5, qbase + 5); /* select timer */ outb(qlcfg9 & 7, qbase + 9); /* prescaler */ /* outb(0x99, qbase + 5); */ outb(scmd_id(cmd), qbase + 4); for (i = 0; i < cmd->cmd_len; i++) outb(cmd->cmnd[i], qbase + 2); priv->qlcmd = cmd; outb(0x41, qbase + 3); /* select and send command */ } /* * Process scsi command - usually after interrupt */ static unsigned int ql_pcmd(struct scsi_cmnd *cmd) { unsigned int i, j; unsigned long k; unsigned int result; /* ultimate return result */ unsigned int status; /* scsi returned status */ unsigned int message; /* scsi returned message */ unsigned int phase; /* recorded scsi phase */ unsigned int reqlen; /* total length of transfer */ char *buf; struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); int qbase = priv->qbase; int int_type = priv->int_type; rtrc(1) j = inb(qbase + 6); i = inb(qbase + 5); if (i == 0x20) { return (DID_NO_CONNECT << 16); } i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */ if (i != 0x18) { printk(KERN_ERR "Ql:Bad Interrupt status:%02x\n", i); ql_zap(priv); return (DID_BAD_INTR << 16); } j &= 7; /* j = inb( qbase + 7 ) >> 5; */ /* correct status is supposed to be step 4 */ /* it sometimes returns step 3 but with 0 bytes left to send */ /* We can try stuffing the FIFO with the max each time, but we will get a sequence of 3 if any bytes are left (but we do flush the FIFO anyway */ if (j != 3 && j != 4) { printk(KERN_ERR "Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n", j, i, inb(qbase + 7) & 0x1f); ql_zap(priv); return (DID_ERROR << 16); } result = DID_OK; if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */ outb(1, qbase + 3); /* clear fifo */ /* note that request_bufflen is the total xfer size when sg is used */ reqlen = scsi_bufflen(cmd); /* note that it won't work if transfers > 16M are requested */ if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */ struct scatterlist *sg; rtrc(2) outb(reqlen, qbase); /* low-mid xfer cnt */ outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */ outb(reqlen >> 16, qbase + 0xe); /* high xfer cnt */ outb(0x90, qbase + 3); /* command do xfer */ /* PIO pseudo DMA to buffer or sglist */ REG1; scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) { if (priv->qabort) { REG0; return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16); } buf = sg_virt(sg); if (ql_pdma(priv, phase, buf, sg->length)) break; } REG0; rtrc(2) /* * Wait for irq (split into second state of irq handler * if this can take time) */ if ((k = ql_wai(priv))) return (k << 16); k = inb(qbase + 5); /* should be 0x10, bus service */ } /* * Enter Status (and Message In) Phase */ k = jiffies + WATCHDOG; while (time_before(jiffies, k) && !priv->qabort && !(inb(qbase + 4) & 6)) cpu_relax(); /* wait for status phase */ if (time_after_eq(jiffies, k)) { ql_zap(priv); return (DID_TIME_OUT << 16); } /* FIXME: timeout ?? */ while (inb(qbase + 5)) cpu_relax(); /* clear pending ints */ if (priv->qabort) return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16); outb(0x11, qbase + 3); /* get status and message */ if ((k = ql_wai(priv))) return (k << 16); i = inb(qbase + 5); /* get chip irq stat */ j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */ status = inb(qbase + 2); message = inb(qbase + 2); /* * Should get function complete int if Status and message, else * bus serv if only status */ if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) { printk(KERN_ERR "Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j); result = DID_ERROR; } outb(0x12, qbase + 3); /* done, disconnect */ rtrc(1) if ((k = ql_wai(priv))) return (k << 16); /* * Should get bus service interrupt and disconnect interrupt */ i = inb(qbase + 5); /* should be bus service */ while (!priv->qabort && ((i & 0x20) != 0x20)) { barrier(); cpu_relax(); i |= inb(qbase + 5); } rtrc(0) if (priv->qabort) return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16); return (result << 16) | (message << 8) | (status & STATUS_MASK); } /* * Interrupt handler */ static void ql_ihandl(void *dev_id) { struct scsi_cmnd *icmd; struct Scsi_Host *host = dev_id; struct qlogicfas408_priv *priv = get_priv_by_host(host); int qbase = priv->qbase; REG0; if (!(inb(qbase + 4) & 0x80)) /* false alarm? */ return; if (priv->qlcmd == NULL) { /* no command to process? */ int i; i = 16; while (i-- && inb(qbase + 5)); /* maybe also ql_zap() */ return; } icmd = priv->qlcmd; icmd->result = ql_pcmd(icmd); priv->qlcmd = NULL; /* * If result is CHECK CONDITION done calls qcommand to request * sense */ (icmd->scsi_done) (icmd); } irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id) { unsigned long flags; struct Scsi_Host *host = dev_id; spin_lock_irqsave(host->host_lock, flags); ql_ihandl(dev_id); spin_unlock_irqrestore(host->host_lock, flags); return IRQ_HANDLED; } /* * Queued command */ static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd, void (*done) (struct scsi_cmnd *)) { struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); if (scmd_id(cmd) == priv->qinitid) { cmd->result = DID_BAD_TARGET << 16; done(cmd); return 0; } cmd->scsi_done = done; /* wait for the last command's interrupt to finish */ while (priv->qlcmd != NULL) { barrier(); cpu_relax(); } ql_icmd(cmd); return 0; } DEF_SCSI_QCMD(qlogicfas408_queuecommand) /* * Return bios parameters */ int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev, sector_t capacity, int ip[]) { /* This should mimic the DOS Qlogic driver's behavior exactly */ ip[0] = 0x40; ip[1] = 0x20; ip[2] = (unsigned long) capacity / (ip[0] * ip[1]); if (ip[2] > 1024) { ip[0] = 0xff; ip[1] = 0x3f; ip[2] = (unsigned long) capacity / (ip[0] * ip[1]); #if 0 if (ip[2] > 1023) ip[2] = 1023; #endif } return 0; } /* * Abort a command in progress */ int qlogicfas408_abort(struct scsi_cmnd *cmd) { struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); priv->qabort = 1; ql_zap(priv); return SUCCESS; } /* * Reset SCSI bus * FIXME: This function is invoked with cmd = NULL directly by * the PCMCIA qlogic_stub code. This wants fixing */ int qlogicfas408_bus_reset(struct scsi_cmnd *cmd) { struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); unsigned long flags; priv->qabort = 2; spin_lock_irqsave(cmd->device->host->host_lock, flags); ql_zap(priv); spin_unlock_irqrestore(cmd->device->host->host_lock, flags); return SUCCESS; } /* * Return info string */ const char *qlogicfas408_info(struct Scsi_Host *host) { struct qlogicfas408_priv *priv = get_priv_by_host(host); return priv->qinfo; } /* * Get type of chip */ int qlogicfas408_get_chip_type(int qbase, int int_type) { REG1; return inb(qbase + 0xe) & 0xf8; } /* * Perform initialization tasks */ void qlogicfas408_setup(int qbase, int id, int int_type) { outb(1, qbase + 8); /* set for PIO pseudo DMA */ REG0; outb(0x40 | qlcfg8 | id, qbase + 8); /* (ini) bus id, disable scsi rst */ outb(qlcfg5, qbase + 5); /* select timer */ outb(qlcfg9, qbase + 9); /* prescaler */ #if QL_RESET_AT_START outb(3, qbase + 3); REG1; /* FIXME: timeout */ while (inb(qbase + 0xf) & 4) cpu_relax(); REG0; #endif } /* * Checks if this is a QLogic FAS 408 */ int qlogicfas408_detect(int qbase, int int_type) { REG1; return (((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7) && ((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7)); } /* * Disable interrupts */ void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv) { int qbase = priv->qbase; int int_type = priv->int_type; REG1; outb(0, qbase + 0xb); /* disable ints */ } /* * Init and exit functions */ static int __init qlogicfas408_init(void) { return 0; } static void __exit qlogicfas408_exit(void) { } MODULE_AUTHOR("Tom Zerucha, Michael Griffith"); MODULE_DESCRIPTION("Driver for the Qlogic FAS SCSI controllers"); MODULE_LICENSE("GPL"); module_init(qlogicfas408_init); module_exit(qlogicfas408_exit); EXPORT_SYMBOL(qlogicfas408_info); EXPORT_SYMBOL(qlogicfas408_queuecommand); EXPORT_SYMBOL(qlogicfas408_abort); EXPORT_SYMBOL(qlogicfas408_bus_reset); EXPORT_SYMBOL(qlogicfas408_biosparam); EXPORT_SYMBOL(qlogicfas408_ihandl); EXPORT_SYMBOL(qlogicfas408_get_chip_type); EXPORT_SYMBOL(qlogicfas408_setup); EXPORT_SYMBOL(qlogicfas408_detect); EXPORT_SYMBOL(qlogicfas408_disable_ints);
gpl-2.0
budi79/deka-kernel-msm7x30-3.0
drivers/video/aty/mach64_gx.c
14709
20884
/* * ATI Mach64 GX Support */ #include <linux/delay.h> #include <linux/fb.h> #include <asm/io.h> #include <video/mach64.h> #include "atyfb.h" /* Definitions for the ICS 2595 == ATI 18818_1 Clockchip */ #define REF_FREQ_2595 1432 /* 14.33 MHz (exact 14.31818) */ #define REF_DIV_2595 46 /* really 43 on ICS 2595 !!! */ /* ohne Prescaler */ #define MAX_FREQ_2595 15938 /* 159.38 MHz (really 170.486) */ #define MIN_FREQ_2595 8000 /* 80.00 MHz ( 85.565) */ /* mit Prescaler 2, 4, 8 */ #define ABS_MIN_FREQ_2595 1000 /* 10.00 MHz (really 10.697) */ #define N_ADJ_2595 257 #define STOP_BITS_2595 0x1800 #define MIN_N_408 2 #define MIN_N_1703 6 #define MIN_M 2 #define MAX_M 30 #define MIN_N 35 #define MAX_N 255-8 /* * Support Functions */ static void aty_dac_waste4(const struct atyfb_par *par) { (void) aty_ld_8(DAC_REGS, par); (void) aty_ld_8(DAC_REGS + 2, par); (void) aty_ld_8(DAC_REGS + 2, par); (void) aty_ld_8(DAC_REGS + 2, par); (void) aty_ld_8(DAC_REGS + 2, par); } static void aty_StrobeClock(const struct atyfb_par *par) { u8 tmp; udelay(26); tmp = aty_ld_8(CLOCK_CNTL, par); aty_st_8(CLOCK_CNTL + par->clk_wr_offset, tmp | CLOCK_STROBE, par); return; } /* * IBM RGB514 DAC and Clock Chip */ static void aty_st_514(int offset, u8 val, const struct atyfb_par *par) { aty_st_8(DAC_CNTL, 1, par); /* right addr byte */ aty_st_8(DAC_W_INDEX, offset & 0xff, par); /* left addr byte */ aty_st_8(DAC_DATA, (offset >> 8) & 0xff, par); aty_st_8(DAC_MASK, val, par); aty_st_8(DAC_CNTL, 0, par); } static int aty_set_dac_514(const struct fb_info *info, const union aty_pll *pll, u32 bpp, u32 accel) { struct atyfb_par *par = (struct atyfb_par *) info->par; static struct { u8 pixel_dly; u8 misc2_cntl; u8 pixel_rep; u8 pixel_cntl_index; u8 pixel_cntl_v1; } tab[3] = { { 0, 0x41, 0x03, 0x71, 0x45}, /* 8 bpp */ { 0, 0x45, 0x04, 0x0c, 0x01}, /* 555 */ { 0, 0x45, 0x06, 0x0e, 0x00}, /* XRGB */ }; int i; switch (bpp) { case 8: default: i = 0; break; case 16: i = 1; break; case 32: i = 2; break; } aty_st_514(0x90, 0x00, par); /* VRAM Mask Low */ aty_st_514(0x04, tab[i].pixel_dly, par); /* Horizontal Sync Control */ aty_st_514(0x05, 0x00, par); /* Power Management */ aty_st_514(0x02, 0x01, par); /* Misc Clock Control */ aty_st_514(0x71, tab[i].misc2_cntl, par); /* Misc Control 2 */ aty_st_514(0x0a, tab[i].pixel_rep, par); /* Pixel Format */ aty_st_514(tab[i].pixel_cntl_index, tab[i].pixel_cntl_v1, par); /* Misc Control 2 / 16 BPP Control / 32 BPP Control */ return 0; } static int aty_var_to_pll_514(const struct fb_info *info, u32 vclk_per, u32 bpp, union aty_pll *pll) { /* * FIXME: use real calculations instead of using fixed values from the old * driver */ static struct { u32 limit; /* pixlock rounding limit (arbitrary) */ u8 m; /* (df<<6) | vco_div_count */ u8 n; /* ref_div_count */ } RGB514_clocks[7] = { { 8000, (3 << 6) | 20, 9}, /* 7395 ps / 135.2273 MHz */ { 10000, (1 << 6) | 19, 3}, /* 9977 ps / 100.2273 MHz */ { 13000, (1 << 6) | 2, 3}, /* 12509 ps / 79.9432 MHz */ { 14000, (2 << 6) | 8, 7}, /* 13394 ps / 74.6591 MHz */ { 16000, (1 << 6) | 44, 6}, /* 15378 ps / 65.0284 MHz */ { 25000, (1 << 6) | 15, 5}, /* 17460 ps / 57.2727 MHz */ { 50000, (0 << 6) | 53, 7}, /* 33145 ps / 30.1705 MHz */ }; int i; for (i = 0; i < ARRAY_SIZE(RGB514_clocks); i++) if (vclk_per <= RGB514_clocks[i].limit) { pll->ibm514.m = RGB514_clocks[i].m; pll->ibm514.n = RGB514_clocks[i].n; return 0; } return -EINVAL; } static u32 aty_pll_514_to_var(const struct fb_info *info, const union aty_pll *pll) { struct atyfb_par *par = (struct atyfb_par *) info->par; u8 df, vco_div_count, ref_div_count; df = pll->ibm514.m >> 6; vco_div_count = pll->ibm514.m & 0x3f; ref_div_count = pll->ibm514.n; return ((par->ref_clk_per * ref_div_count) << (3 - df))/ (vco_div_count + 65); } static void aty_set_pll_514(const struct fb_info *info, const union aty_pll *pll) { struct atyfb_par *par = (struct atyfb_par *) info->par; aty_st_514(0x06, 0x02, par); /* DAC Operation */ aty_st_514(0x10, 0x01, par); /* PLL Control 1 */ aty_st_514(0x70, 0x01, par); /* Misc Control 1 */ aty_st_514(0x8f, 0x1f, par); /* PLL Ref. Divider Input */ aty_st_514(0x03, 0x00, par); /* Sync Control */ aty_st_514(0x05, 0x00, par); /* Power Management */ aty_st_514(0x20, pll->ibm514.m, par); /* F0 / M0 */ aty_st_514(0x21, pll->ibm514.n, par); /* F1 / N0 */ } const struct aty_dac_ops aty_dac_ibm514 = { .set_dac = aty_set_dac_514, }; const struct aty_pll_ops aty_pll_ibm514 = { .var_to_pll = aty_var_to_pll_514, .pll_to_var = aty_pll_514_to_var, .set_pll = aty_set_pll_514, }; /* * ATI 68860-B DAC */ static int aty_set_dac_ATI68860_B(const struct fb_info *info, const union aty_pll *pll, u32 bpp, u32 accel) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 gModeReg, devSetupRegA, temp, mask; gModeReg = 0; devSetupRegA = 0; switch (bpp) { case 8: gModeReg = 0x83; devSetupRegA = 0x60 | 0x00 /*(info->mach64DAC8Bit ? 0x00 : 0x01) */ ; break; case 15: gModeReg = 0xA0; devSetupRegA = 0x60; break; case 16: gModeReg = 0xA1; devSetupRegA = 0x60; break; case 24: gModeReg = 0xC0; devSetupRegA = 0x60; break; case 32: gModeReg = 0xE3; devSetupRegA = 0x60; break; } if (!accel) { gModeReg = 0x80; devSetupRegA = 0x61; } temp = aty_ld_8(DAC_CNTL, par); aty_st_8(DAC_CNTL, (temp & ~DAC_EXT_SEL_RS2) | DAC_EXT_SEL_RS3, par); aty_st_8(DAC_REGS + 2, 0x1D, par); aty_st_8(DAC_REGS + 3, gModeReg, par); aty_st_8(DAC_REGS, 0x02, par); temp = aty_ld_8(DAC_CNTL, par); aty_st_8(DAC_CNTL, temp | DAC_EXT_SEL_RS2 | DAC_EXT_SEL_RS3, par); if (info->fix.smem_len < ONE_MB) mask = 0x04; else if (info->fix.smem_len == ONE_MB) mask = 0x08; else mask = 0x0C; /* The following assumes that the BIOS has correctly set R7 of the * Device Setup Register A at boot time. */ #define A860_DELAY_L 0x80 temp = aty_ld_8(DAC_REGS, par); aty_st_8(DAC_REGS, (devSetupRegA | mask) | (temp & A860_DELAY_L), par); temp = aty_ld_8(DAC_CNTL, par); aty_st_8(DAC_CNTL, (temp & ~(DAC_EXT_SEL_RS2 | DAC_EXT_SEL_RS3)), par); aty_st_le32(BUS_CNTL, 0x890e20f1, par); aty_st_le32(DAC_CNTL, 0x47052100, par); return 0; } const struct aty_dac_ops aty_dac_ati68860b = { .set_dac = aty_set_dac_ATI68860_B, }; /* * AT&T 21C498 DAC */ static int aty_set_dac_ATT21C498(const struct fb_info *info, const union aty_pll *pll, u32 bpp, u32 accel) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 dotClock; int muxmode = 0; int DACMask = 0; dotClock = 100000000 / pll->ics2595.period_in_ps; switch (bpp) { case 8: if (dotClock > 8000) { DACMask = 0x24; muxmode = 1; } else DACMask = 0x04; break; case 15: DACMask = 0x16; break; case 16: DACMask = 0x36; break; case 24: DACMask = 0xE6; break; case 32: DACMask = 0xE6; break; } if (1 /* info->mach64DAC8Bit */ ) DACMask |= 0x02; aty_dac_waste4(par); aty_st_8(DAC_REGS + 2, DACMask, par); aty_st_le32(BUS_CNTL, 0x890e20f1, par); aty_st_le32(DAC_CNTL, 0x00072000, par); return muxmode; } const struct aty_dac_ops aty_dac_att21c498 = { .set_dac = aty_set_dac_ATT21C498, }; /* * ATI 18818 / ICS 2595 Clock Chip */ static int aty_var_to_pll_18818(const struct fb_info *info, u32 vclk_per, u32 bpp, union aty_pll *pll) { u32 MHz100; /* in 0.01 MHz */ u32 program_bits; u32 post_divider; /* Calculate the programming word */ MHz100 = 100000000 / vclk_per; program_bits = -1; post_divider = 1; if (MHz100 > MAX_FREQ_2595) { MHz100 = MAX_FREQ_2595; return -EINVAL; } else if (MHz100 < ABS_MIN_FREQ_2595) { program_bits = 0; /* MHz100 = 257 */ return -EINVAL; } else { while (MHz100 < MIN_FREQ_2595) { MHz100 *= 2; post_divider *= 2; } } MHz100 *= 1000; MHz100 = (REF_DIV_2595 * MHz100) / REF_FREQ_2595; MHz100 += 500; /* + 0.5 round */ MHz100 /= 1000; if (program_bits == -1) { program_bits = MHz100 - N_ADJ_2595; switch (post_divider) { case 1: program_bits |= 0x0600; break; case 2: program_bits |= 0x0400; break; case 4: program_bits |= 0x0200; break; case 8: default: break; } } program_bits |= STOP_BITS_2595; pll->ics2595.program_bits = program_bits; pll->ics2595.locationAddr = 0; pll->ics2595.post_divider = post_divider; pll->ics2595.period_in_ps = vclk_per; return 0; } static u32 aty_pll_18818_to_var(const struct fb_info *info, const union aty_pll *pll) { return (pll->ics2595.period_in_ps); /* default for now */ } static void aty_ICS2595_put1bit(u8 data, const struct atyfb_par *par) { u8 tmp; data &= 0x01; tmp = aty_ld_8(CLOCK_CNTL, par); aty_st_8(CLOCK_CNTL + par->clk_wr_offset, (tmp & ~0x04) | (data << 2), par); tmp = aty_ld_8(CLOCK_CNTL, par); aty_st_8(CLOCK_CNTL + par->clk_wr_offset, (tmp & ~0x08) | (0 << 3), par); aty_StrobeClock(par); tmp = aty_ld_8(CLOCK_CNTL, par); aty_st_8(CLOCK_CNTL + par->clk_wr_offset, (tmp & ~0x08) | (1 << 3), par); aty_StrobeClock(par); return; } static void aty_set_pll18818(const struct fb_info *info, const union aty_pll *pll) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 program_bits; u32 locationAddr; u32 i; u8 old_clock_cntl; u8 old_crtc_ext_disp; old_clock_cntl = aty_ld_8(CLOCK_CNTL, par); aty_st_8(CLOCK_CNTL + par->clk_wr_offset, 0, par); old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par); aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par); mdelay(15); /* delay for 50 (15) ms */ program_bits = pll->ics2595.program_bits; locationAddr = pll->ics2595.locationAddr; /* Program the clock chip */ aty_st_8(CLOCK_CNTL + par->clk_wr_offset, 0, par); /* Strobe = 0 */ aty_StrobeClock(par); aty_st_8(CLOCK_CNTL + par->clk_wr_offset, 1, par); /* Strobe = 0 */ aty_StrobeClock(par); aty_ICS2595_put1bit(1, par); /* Send start bits */ aty_ICS2595_put1bit(0, par); /* Start bit */ aty_ICS2595_put1bit(0, par); /* Read / ~Write */ for (i = 0; i < 5; i++) { /* Location 0..4 */ aty_ICS2595_put1bit(locationAddr & 1, par); locationAddr >>= 1; } for (i = 0; i < 8 + 1 + 2 + 2; i++) { aty_ICS2595_put1bit(program_bits & 1, par); program_bits >>= 1; } mdelay(1); /* delay for 1 ms */ (void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */ aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par); aty_st_8(CLOCK_CNTL + par->clk_wr_offset, old_clock_cntl | CLOCK_STROBE, par); mdelay(50); /* delay for 50 (15) ms */ aty_st_8(CLOCK_CNTL + par->clk_wr_offset, ((pll->ics2595.locationAddr & 0x0F) | CLOCK_STROBE), par); return; } const struct aty_pll_ops aty_pll_ati18818_1 = { .var_to_pll = aty_var_to_pll_18818, .pll_to_var = aty_pll_18818_to_var, .set_pll = aty_set_pll18818, }; /* * STG 1703 Clock Chip */ static int aty_var_to_pll_1703(const struct fb_info *info, u32 vclk_per, u32 bpp, union aty_pll *pll) { u32 mhz100; /* in 0.01 MHz */ u32 program_bits; /* u32 post_divider; */ u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq; u32 temp, tempB; u16 remainder, preRemainder; short divider = 0, tempA; /* Calculate the programming word */ mhz100 = 100000000 / vclk_per; mach64MinFreq = MIN_FREQ_2595; mach64MaxFreq = MAX_FREQ_2595; mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */ /* Calculate program word */ if (mhz100 == 0) program_bits = 0xE0; else { if (mhz100 < mach64MinFreq) mhz100 = mach64MinFreq; if (mhz100 > mach64MaxFreq) mhz100 = mach64MaxFreq; divider = 0; while (mhz100 < (mach64MinFreq << 3)) { mhz100 <<= 1; divider += 0x20; } temp = (unsigned int) (mhz100); temp = (unsigned int) (temp * (MIN_N_1703 + 2)); temp -= (short) (mach64RefFreq << 1); tempA = MIN_N_1703; preRemainder = 0xffff; do { tempB = temp; remainder = tempB % mach64RefFreq; tempB = tempB / mach64RefFreq; if ((tempB & 0xffff) <= 127 && (remainder <= preRemainder)) { preRemainder = remainder; divider &= ~0x1f; divider |= tempA; divider = (divider & 0x00ff) + ((tempB & 0xff) << 8); } temp += mhz100; tempA++; } while (tempA <= (MIN_N_1703 << 1)); program_bits = divider; } pll->ics2595.program_bits = program_bits; pll->ics2595.locationAddr = 0; pll->ics2595.post_divider = divider; /* fuer nix */ pll->ics2595.period_in_ps = vclk_per; return 0; } static u32 aty_pll_1703_to_var(const struct fb_info *info, const union aty_pll *pll) { return (pll->ics2595.period_in_ps); /* default for now */ } static void aty_set_pll_1703(const struct fb_info *info, const union aty_pll *pll) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 program_bits; u32 locationAddr; char old_crtc_ext_disp; old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par); aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par); program_bits = pll->ics2595.program_bits; locationAddr = pll->ics2595.locationAddr; /* Program clock */ aty_dac_waste4(par); (void) aty_ld_8(DAC_REGS + 2, par); aty_st_8(DAC_REGS + 2, (locationAddr << 1) + 0x20, par); aty_st_8(DAC_REGS + 2, 0, par); aty_st_8(DAC_REGS + 2, (program_bits & 0xFF00) >> 8, par); aty_st_8(DAC_REGS + 2, (program_bits & 0xFF), par); (void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */ aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par); return; } const struct aty_pll_ops aty_pll_stg1703 = { .var_to_pll = aty_var_to_pll_1703, .pll_to_var = aty_pll_1703_to_var, .set_pll = aty_set_pll_1703, }; /* * Chrontel 8398 Clock Chip */ static int aty_var_to_pll_8398(const struct fb_info *info, u32 vclk_per, u32 bpp, union aty_pll *pll) { u32 tempA, tempB, fOut, longMHz100, diff, preDiff; u32 mhz100; /* in 0.01 MHz */ u32 program_bits; /* u32 post_divider; */ u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq; u16 m, n, k = 0, save_m, save_n, twoToKth; /* Calculate the programming word */ mhz100 = 100000000 / vclk_per; mach64MinFreq = MIN_FREQ_2595; mach64MaxFreq = MAX_FREQ_2595; mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */ save_m = 0; save_n = 0; /* Calculate program word */ if (mhz100 == 0) program_bits = 0xE0; else { if (mhz100 < mach64MinFreq) mhz100 = mach64MinFreq; if (mhz100 > mach64MaxFreq) mhz100 = mach64MaxFreq; longMHz100 = mhz100 * 256 / 100; /* 8 bit scale this */ while (mhz100 < (mach64MinFreq << 3)) { mhz100 <<= 1; k++; } twoToKth = 1 << k; diff = 0; preDiff = 0xFFFFFFFF; for (m = MIN_M; m <= MAX_M; m++) { for (n = MIN_N; n <= MAX_N; n++) { tempA = 938356; /* 14.31818 * 65536 */ tempA *= (n + 8); /* 43..256 */ tempB = twoToKth * 256; tempB *= (m + 2); /* 4..32 */ fOut = tempA / tempB; /* 8 bit scale */ if (longMHz100 > fOut) diff = longMHz100 - fOut; else diff = fOut - longMHz100; if (diff < preDiff) { save_m = m; save_n = n; preDiff = diff; } } } program_bits = (k << 6) + (save_m) + (save_n << 8); } pll->ics2595.program_bits = program_bits; pll->ics2595.locationAddr = 0; pll->ics2595.post_divider = 0; pll->ics2595.period_in_ps = vclk_per; return 0; } static u32 aty_pll_8398_to_var(const struct fb_info *info, const union aty_pll *pll) { return (pll->ics2595.period_in_ps); /* default for now */ } static void aty_set_pll_8398(const struct fb_info *info, const union aty_pll *pll) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 program_bits; u32 locationAddr; char old_crtc_ext_disp; char tmp; old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par); aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par); program_bits = pll->ics2595.program_bits; locationAddr = pll->ics2595.locationAddr; /* Program clock */ tmp = aty_ld_8(DAC_CNTL, par); aty_st_8(DAC_CNTL, tmp | DAC_EXT_SEL_RS2 | DAC_EXT_SEL_RS3, par); aty_st_8(DAC_REGS, locationAddr, par); aty_st_8(DAC_REGS + 1, (program_bits & 0xff00) >> 8, par); aty_st_8(DAC_REGS + 1, (program_bits & 0xff), par); tmp = aty_ld_8(DAC_CNTL, par); aty_st_8(DAC_CNTL, (tmp & ~DAC_EXT_SEL_RS2) | DAC_EXT_SEL_RS3, par); (void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */ aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par); return; } const struct aty_pll_ops aty_pll_ch8398 = { .var_to_pll = aty_var_to_pll_8398, .pll_to_var = aty_pll_8398_to_var, .set_pll = aty_set_pll_8398, }; /* * AT&T 20C408 Clock Chip */ static int aty_var_to_pll_408(const struct fb_info *info, u32 vclk_per, u32 bpp, union aty_pll *pll) { u32 mhz100; /* in 0.01 MHz */ u32 program_bits; /* u32 post_divider; */ u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq; u32 temp, tempB; u16 remainder, preRemainder; short divider = 0, tempA; /* Calculate the programming word */ mhz100 = 100000000 / vclk_per; mach64MinFreq = MIN_FREQ_2595; mach64MaxFreq = MAX_FREQ_2595; mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */ /* Calculate program word */ if (mhz100 == 0) program_bits = 0xFF; else { if (mhz100 < mach64MinFreq) mhz100 = mach64MinFreq; if (mhz100 > mach64MaxFreq) mhz100 = mach64MaxFreq; while (mhz100 < (mach64MinFreq << 3)) { mhz100 <<= 1; divider += 0x40; } temp = (unsigned int) mhz100; temp = (unsigned int) (temp * (MIN_N_408 + 2)); temp -= ((short) (mach64RefFreq << 1)); tempA = MIN_N_408; preRemainder = 0xFFFF; do { tempB = temp; remainder = tempB % mach64RefFreq; tempB = tempB / mach64RefFreq; if (((tempB & 0xFFFF) <= 255) && (remainder <= preRemainder)) { preRemainder = remainder; divider &= ~0x3f; divider |= tempA; divider = (divider & 0x00FF) + ((tempB & 0xFF) << 8); } temp += mhz100; tempA++; } while (tempA <= 32); program_bits = divider; } pll->ics2595.program_bits = program_bits; pll->ics2595.locationAddr = 0; pll->ics2595.post_divider = divider; /* fuer nix */ pll->ics2595.period_in_ps = vclk_per; return 0; } static u32 aty_pll_408_to_var(const struct fb_info *info, const union aty_pll *pll) { return (pll->ics2595.period_in_ps); /* default for now */ } static void aty_set_pll_408(const struct fb_info *info, const union aty_pll *pll) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 program_bits; u32 locationAddr; u8 tmpA, tmpB, tmpC; char old_crtc_ext_disp; old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par); aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par); program_bits = pll->ics2595.program_bits; locationAddr = pll->ics2595.locationAddr; /* Program clock */ aty_dac_waste4(par); tmpB = aty_ld_8(DAC_REGS + 2, par) | 1; aty_dac_waste4(par); aty_st_8(DAC_REGS + 2, tmpB, par); tmpA = tmpB; tmpC = tmpA; tmpA |= 8; tmpB = 1; aty_st_8(DAC_REGS, tmpB, par); aty_st_8(DAC_REGS + 2, tmpA, par); udelay(400); /* delay for 400 us */ locationAddr = (locationAddr << 2) + 0x40; tmpB = locationAddr; tmpA = program_bits >> 8; aty_st_8(DAC_REGS, tmpB, par); aty_st_8(DAC_REGS + 2, tmpA, par); tmpB = locationAddr + 1; tmpA = (u8) program_bits; aty_st_8(DAC_REGS, tmpB, par); aty_st_8(DAC_REGS + 2, tmpA, par); tmpB = locationAddr + 2; tmpA = 0x77; aty_st_8(DAC_REGS, tmpB, par); aty_st_8(DAC_REGS + 2, tmpA, par); udelay(400); /* delay for 400 us */ tmpA = tmpC & (~(1 | 8)); tmpB = 1; aty_st_8(DAC_REGS, tmpB, par); aty_st_8(DAC_REGS + 2, tmpA, par); (void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */ aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par); return; } const struct aty_pll_ops aty_pll_att20c408 = { .var_to_pll = aty_var_to_pll_408, .pll_to_var = aty_pll_408_to_var, .set_pll = aty_set_pll_408, }; /* * Unsupported DAC and Clock Chip */ static int aty_set_dac_unsupported(const struct fb_info *info, const union aty_pll *pll, u32 bpp, u32 accel) { struct atyfb_par *par = (struct atyfb_par *) info->par; aty_st_le32(BUS_CNTL, 0x890e20f1, par); aty_st_le32(DAC_CNTL, 0x47052100, par); /* new in 2.2.3p1 from Geert. ???????? */ aty_st_le32(BUS_CNTL, 0x590e10ff, par); aty_st_le32(DAC_CNTL, 0x47012100, par); return 0; } static int dummy(void) { return 0; } const struct aty_dac_ops aty_dac_unsupported = { .set_dac = aty_set_dac_unsupported, }; const struct aty_pll_ops aty_pll_unsupported = { .var_to_pll = (void *) dummy, .pll_to_var = (void *) dummy, .set_pll = (void *) dummy, };
gpl-2.0
niker/elitekernel_oxp_kk
fs/exfat/exfat_blkdev.c
374
6060
/* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /************************************************************************/ /* */ /* PROJECT : exFAT & FAT12/16/32 File System */ /* FILE : exfat_blkdev.c */ /* PURPOSE : exFAT Block Device Driver Glue Layer */ /* */ /*----------------------------------------------------------------------*/ /* NOTES */ /* */ /*----------------------------------------------------------------------*/ /* REVISION HISTORY (Ver 0.9) */ /* */ /* - 2010.11.15 [Joosun Hahn] : first writing */ /* */ /************************************************************************/ #include <linux/blkdev.h> #include <linux/log2.h> #include "exfat_config.h" #include "exfat_blkdev.h" #include "exfat_data.h" #include "exfat_api.h" #include "exfat_super.h" /*----------------------------------------------------------------------*/ /* Constant & Macro Definitions */ /*----------------------------------------------------------------------*/ /*----------------------------------------------------------------------*/ /* Global Variable Definitions */ /*----------------------------------------------------------------------*/ /*----------------------------------------------------------------------*/ /* Local Variable Definitions */ /*----------------------------------------------------------------------*/ /*======================================================================*/ /* Function Definitions */ /*======================================================================*/ s32 bdev_init(void) { return FFS_SUCCESS; } s32 bdev_shutdown(void) { return FFS_SUCCESS; } s32 bdev_open(struct super_block *sb) { BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); if (p_bd->opened) return FFS_SUCCESS; p_bd->sector_size = bdev_logical_block_size(sb->s_bdev); p_bd->sector_size_bits = ilog2(p_bd->sector_size); p_bd->sector_size_mask = p_bd->sector_size - 1; p_bd->num_sectors = i_size_read(sb->s_bdev->bd_inode) >> p_bd->sector_size_bits; p_bd->opened = TRUE; return FFS_SUCCESS; } s32 bdev_close(struct super_block *sb) { BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); if (!p_bd->opened) return FFS_SUCCESS; p_bd->opened = FALSE; return FFS_SUCCESS; } s32 bdev_read(struct super_block *sb, u32 secno, struct buffer_head **bh, u32 num_secs, s32 read) { BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); #ifdef CONFIG_EXFAT_KERNEL_DEBUG struct exfat_sb_info *sbi = EXFAT_SB(sb); long flags = sbi->debug_flags; if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) return FFS_MEDIAERR; #endif /* CONFIG_EXFAT_KERNEL_DEBUG */ if (!p_bd->opened) return FFS_MEDIAERR; if (*bh) __brelse(*bh); if (read) *bh = __bread(sb->s_bdev, secno, num_secs << p_bd->sector_size_bits); else *bh = __getblk(sb->s_bdev, secno, num_secs << p_bd->sector_size_bits); if (*bh) return FFS_SUCCESS; WARN(!p_fs->dev_ejected, "[EXFAT] No bh, device seems wrong or to be ejected.\n"); return FFS_MEDIAERR; } s32 bdev_write(struct super_block *sb, u32 secno, struct buffer_head *bh, u32 num_secs, s32 sync) { s32 count; struct buffer_head *bh2; BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); #ifdef CONFIG_EXFAT_KERNEL_DEBUG struct exfat_sb_info *sbi = EXFAT_SB(sb); long flags = sbi->debug_flags; if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) return FFS_MEDIAERR; #endif /* CONFIG_EXFAT_KERNEL_DEBUG */ if (!p_bd->opened) return FFS_MEDIAERR; if (secno == bh->b_blocknr) { lock_buffer(bh); set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); if (sync && (sync_dirty_buffer(bh) != 0)) return FFS_MEDIAERR; } else { count = num_secs << p_bd->sector_size_bits; bh2 = __getblk(sb->s_bdev, secno, count); if (bh2 == NULL) goto no_bh; lock_buffer(bh2); memcpy(bh2->b_data, bh->b_data, count); set_buffer_uptodate(bh2); mark_buffer_dirty(bh2); unlock_buffer(bh2); if (sync && (sync_dirty_buffer(bh2) != 0)) { __brelse(bh2); goto no_bh; } __brelse(bh2); } return FFS_SUCCESS; no_bh: WARN(!p_fs->dev_ejected, "[EXFAT] No bh, device seems wrong or to be ejected.\n"); return FFS_MEDIAERR; } s32 bdev_sync(struct super_block *sb) { BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); #ifdef CONFIG_EXFAT_KERNEL_DEBUG struct exfat_sb_info *sbi = EXFAT_SB(sb); long flags = sbi->debug_flags; if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) return FFS_MEDIAERR; #endif /* CONFIG_EXFAT_KERNEL_DEBUG */ if (!p_bd->opened) return FFS_MEDIAERR; return sync_blockdev(sb->s_bdev); }
gpl-2.0
akhirasip/kernel_SEMC_Shakira_N7_kitkat
drivers/net/sfc/efx.c
374
71881
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2005-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/notifier.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/in.h> #include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/topology.h> #include <linux/gfp.h> #include <linux/cpu_rmap.h> #include "net_driver.h" #include "efx.h" #include "nic.h" #include "mcdi.h" #include "workarounds.h" /************************************************************************** * * Type name strings * ************************************************************************** */ /* Loopback mode names (see LOOPBACK_MODE()) */ const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; const char *efx_loopback_mode_names[] = { [LOOPBACK_NONE] = "NONE", [LOOPBACK_DATA] = "DATAPATH", [LOOPBACK_GMAC] = "GMAC", [LOOPBACK_XGMII] = "XGMII", [LOOPBACK_XGXS] = "XGXS", [LOOPBACK_XAUI] = "XAUI", [LOOPBACK_GMII] = "GMII", [LOOPBACK_SGMII] = "SGMII", [LOOPBACK_XGBR] = "XGBR", [LOOPBACK_XFI] = "XFI", [LOOPBACK_XAUI_FAR] = "XAUI_FAR", [LOOPBACK_GMII_FAR] = "GMII_FAR", [LOOPBACK_SGMII_FAR] = "SGMII_FAR", [LOOPBACK_XFI_FAR] = "XFI_FAR", [LOOPBACK_GPHY] = "GPHY", [LOOPBACK_PHYXS] = "PHYXS", [LOOPBACK_PCS] = "PCS", [LOOPBACK_PMAPMD] = "PMA/PMD", [LOOPBACK_XPORT] = "XPORT", [LOOPBACK_XGMII_WS] = "XGMII_WS", [LOOPBACK_XAUI_WS] = "XAUI_WS", [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", [LOOPBACK_GMII_WS] = "GMII_WS", [LOOPBACK_XFI_WS] = "XFI_WS", [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", [LOOPBACK_PHYXS_WS] = "PHYXS_WS", }; const unsigned int efx_reset_type_max = RESET_TYPE_MAX; const char *efx_reset_type_names[] = { [RESET_TYPE_INVISIBLE] = "INVISIBLE", [RESET_TYPE_ALL] = "ALL", [RESET_TYPE_WORLD] = "WORLD", [RESET_TYPE_DISABLE] = "DISABLE", [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", [RESET_TYPE_INT_ERROR] = "INT_ERROR", [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", [RESET_TYPE_TX_SKIP] = "TX_SKIP", [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", }; #define EFX_MAX_MTU (9 * 1024) /* Reset workqueue. If any NIC has a hardware failure then a reset will be * queued onto this work queue. This is not a per-nic work queue, because * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. */ static struct workqueue_struct *reset_workqueue; /************************************************************************** * * Configurable values * *************************************************************************/ /* * Use separate channels for TX and RX events * * Set this to 1 to use separate channels for TX and RX. It allows us * to control interrupt affinity separately for TX and RX. * * This is only used in MSI-X interrupt mode */ static unsigned int separate_tx_channels; module_param(separate_tx_channels, uint, 0444); MODULE_PARM_DESC(separate_tx_channels, "Use separate channels for TX and RX"); /* This is the weight assigned to each of the (per-channel) virtual * NAPI devices. */ static int napi_weight = 64; /* This is the time (in jiffies) between invocations of the hardware * monitor. On Falcon-based NICs, this will: * - Check the on-board hardware monitor; * - Poll the link state and reconfigure the hardware as necessary. */ static unsigned int efx_monitor_interval = 1 * HZ; /* This controls whether or not the driver will initialise devices * with invalid MAC addresses stored in the EEPROM or flash. If true, * such devices will be initialised with a random locally-generated * MAC address. This allows for loading the sfc_mtd driver to * reprogram the flash, even if the flash contents (including the MAC * address) have previously been erased. */ static unsigned int allow_bad_hwaddr; /* Initial interrupt moderation settings. They can be modified after * module load with ethtool. * * The default for RX should strike a balance between increasing the * round-trip latency and reducing overhead. */ static unsigned int rx_irq_mod_usec = 60; /* Initial interrupt moderation settings. They can be modified after * module load with ethtool. * * This default is chosen to ensure that a 10G link does not go idle * while a TX queue is stopped after it has become full. A queue is * restarted when it drops below half full. The time this takes (assuming * worst case 3 descriptors per packet and 1024 descriptors) is * 512 / 3 * 1.2 = 205 usec. */ static unsigned int tx_irq_mod_usec = 150; /* This is the first interrupt mode to try out of: * 0 => MSI-X * 1 => MSI * 2 => legacy */ static unsigned int interrupt_mode; /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), * i.e. the number of CPUs among which we may distribute simultaneous * interrupt handling. * * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. * The default (0) means to assign an interrupt to each package (level II cache) */ static unsigned int rss_cpus; module_param(rss_cpus, uint, 0444); MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); static int phy_flash_cfg; module_param(phy_flash_cfg, int, 0644); MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); static unsigned irq_adapt_low_thresh = 10000; module_param(irq_adapt_low_thresh, uint, 0644); MODULE_PARM_DESC(irq_adapt_low_thresh, "Threshold score for reducing IRQ moderation"); static unsigned irq_adapt_high_thresh = 20000; module_param(irq_adapt_high_thresh, uint, 0644); MODULE_PARM_DESC(irq_adapt_high_thresh, "Threshold score for increasing IRQ moderation"); static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR | NETIF_MSG_HW); module_param(debug, uint, 0); MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); /************************************************************************** * * Utility functions and prototypes * *************************************************************************/ static void efx_remove_channels(struct efx_nic *efx); static void efx_remove_port(struct efx_nic *efx); static void efx_init_napi(struct efx_nic *efx); static void efx_fini_napi(struct efx_nic *efx); static void efx_fini_napi_channel(struct efx_channel *channel); static void efx_fini_struct(struct efx_nic *efx); static void efx_start_all(struct efx_nic *efx); static void efx_stop_all(struct efx_nic *efx); #define EFX_ASSERT_RESET_SERIALISED(efx) \ do { \ if ((efx->state == STATE_RUNNING) || \ (efx->state == STATE_DISABLED)) \ ASSERT_RTNL(); \ } while (0) /************************************************************************** * * Event queue processing * *************************************************************************/ /* Process channel's event queue * * This function is responsible for processing the event queue of a * single channel. The caller must guarantee that this function will * never be concurrently called more than once on the same channel, * though different channels may be being processed concurrently. */ static int efx_process_channel(struct efx_channel *channel, int budget) { struct efx_nic *efx = channel->efx; int spent; if (unlikely(efx->reset_pending || !channel->enabled)) return 0; spent = efx_nic_process_eventq(channel, budget); if (spent == 0) return 0; /* Deliver last RX packet. */ if (channel->rx_pkt) { __efx_rx_packet(channel, channel->rx_pkt, channel->rx_pkt_csummed); channel->rx_pkt = NULL; } efx_rx_strategy(channel); efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); return spent; } /* Mark channel as finished processing * * Note that since we will not receive further interrupts for this * channel before we finish processing and call the eventq_read_ack() * method, there is no need to use the interrupt hold-off timers. */ static inline void efx_channel_processed(struct efx_channel *channel) { /* The interrupt handler for this channel may set work_pending * as soon as we acknowledge the events we've seen. Make sure * it's cleared before then. */ channel->work_pending = false; smp_wmb(); efx_nic_eventq_read_ack(channel); } /* NAPI poll handler * * NAPI guarantees serialisation of polls of the same device, which * provides the guarantee required by efx_process_channel(). */ static int efx_poll(struct napi_struct *napi, int budget) { struct efx_channel *channel = container_of(napi, struct efx_channel, napi_str); struct efx_nic *efx = channel->efx; int spent; netif_vdbg(efx, intr, efx->net_dev, "channel %d NAPI poll executing on CPU %d\n", channel->channel, raw_smp_processor_id()); spent = efx_process_channel(channel, budget); if (spent < budget) { if (channel->channel < efx->n_rx_channels && efx->irq_rx_adaptive && unlikely(++channel->irq_count == 1000)) { if (unlikely(channel->irq_mod_score < irq_adapt_low_thresh)) { if (channel->irq_moderation > 1) { channel->irq_moderation -= 1; efx->type->push_irq_moderation(channel); } } else if (unlikely(channel->irq_mod_score > irq_adapt_high_thresh)) { if (channel->irq_moderation < efx->irq_rx_moderation) { channel->irq_moderation += 1; efx->type->push_irq_moderation(channel); } } channel->irq_count = 0; channel->irq_mod_score = 0; } efx_filter_rfs_expire(channel); /* There is no race here; although napi_disable() will * only wait for napi_complete(), this isn't a problem * since efx_channel_processed() will have no effect if * interrupts have already been disabled. */ napi_complete(napi); efx_channel_processed(channel); } return spent; } /* Process the eventq of the specified channel immediately on this CPU * * Disable hardware generated interrupts, wait for any existing * processing to finish, then directly poll (and ack ) the eventq. * Finally reenable NAPI and interrupts. * * This is for use only during a loopback self-test. It must not * deliver any packets up the stack as this can result in deadlock. */ void efx_process_channel_now(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; BUG_ON(channel->channel >= efx->n_channels); BUG_ON(!channel->enabled); BUG_ON(!efx->loopback_selftest); /* Disable interrupts and wait for ISRs to complete */ efx_nic_disable_interrupts(efx); if (efx->legacy_irq) { synchronize_irq(efx->legacy_irq); efx->legacy_irq_enabled = false; } if (channel->irq) synchronize_irq(channel->irq); /* Wait for any NAPI processing to complete */ napi_disable(&channel->napi_str); /* Poll the channel */ efx_process_channel(channel, channel->eventq_mask + 1); /* Ack the eventq. This may cause an interrupt to be generated * when they are reenabled */ efx_channel_processed(channel); napi_enable(&channel->napi_str); if (efx->legacy_irq) efx->legacy_irq_enabled = true; efx_nic_enable_interrupts(efx); } /* Create event queue * Event queue memory allocations are done only once. If the channel * is reset, the memory buffer will be reused; this guards against * errors during channel reset and also simplifies interrupt handling. */ static int efx_probe_eventq(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; unsigned long entries; netif_dbg(channel->efx, probe, channel->efx->net_dev, "chan %d create event queue\n", channel->channel); /* Build an event queue with room for one event per tx and rx buffer, * plus some extra for link state events and MCDI completions. */ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; return efx_nic_probe_eventq(channel); } /* Prepare channel's event queue */ static void efx_init_eventq(struct efx_channel *channel) { netif_dbg(channel->efx, drv, channel->efx->net_dev, "chan %d init event queue\n", channel->channel); channel->eventq_read_ptr = 0; efx_nic_init_eventq(channel); } static void efx_fini_eventq(struct efx_channel *channel) { netif_dbg(channel->efx, drv, channel->efx->net_dev, "chan %d fini event queue\n", channel->channel); efx_nic_fini_eventq(channel); } static void efx_remove_eventq(struct efx_channel *channel) { netif_dbg(channel->efx, drv, channel->efx->net_dev, "chan %d remove event queue\n", channel->channel); efx_nic_remove_eventq(channel); } /************************************************************************** * * Channel handling * *************************************************************************/ /* Allocate and initialise a channel structure, optionally copying * parameters (but not resources) from an old channel structure. */ static struct efx_channel * efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) { struct efx_channel *channel; struct efx_rx_queue *rx_queue; struct efx_tx_queue *tx_queue; int j; if (old_channel) { channel = kmalloc(sizeof(*channel), GFP_KERNEL); if (!channel) return NULL; *channel = *old_channel; channel->napi_dev = NULL; memset(&channel->eventq, 0, sizeof(channel->eventq)); rx_queue = &channel->rx_queue; rx_queue->buffer = NULL; memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); for (j = 0; j < EFX_TXQ_TYPES; j++) { tx_queue = &channel->tx_queue[j]; if (tx_queue->channel) tx_queue->channel = channel; tx_queue->buffer = NULL; memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); } } else { channel = kzalloc(sizeof(*channel), GFP_KERNEL); if (!channel) return NULL; channel->efx = efx; channel->channel = i; for (j = 0; j < EFX_TXQ_TYPES; j++) { tx_queue = &channel->tx_queue[j]; tx_queue->efx = efx; tx_queue->queue = i * EFX_TXQ_TYPES + j; tx_queue->channel = channel; } } rx_queue = &channel->rx_queue; rx_queue->efx = efx; setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, (unsigned long)rx_queue); return channel; } static int efx_probe_channel(struct efx_channel *channel) { struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; int rc; netif_dbg(channel->efx, probe, channel->efx->net_dev, "creating channel %d\n", channel->channel); rc = efx_probe_eventq(channel); if (rc) goto fail1; efx_for_each_channel_tx_queue(tx_queue, channel) { rc = efx_probe_tx_queue(tx_queue); if (rc) goto fail2; } efx_for_each_channel_rx_queue(rx_queue, channel) { rc = efx_probe_rx_queue(rx_queue); if (rc) goto fail3; } channel->n_rx_frm_trunc = 0; return 0; fail3: efx_for_each_channel_rx_queue(rx_queue, channel) efx_remove_rx_queue(rx_queue); fail2: efx_for_each_channel_tx_queue(tx_queue, channel) efx_remove_tx_queue(tx_queue); fail1: return rc; } static void efx_set_channel_names(struct efx_nic *efx) { struct efx_channel *channel; const char *type = ""; int number; efx_for_each_channel(channel, efx) { number = channel->channel; if (efx->n_channels > efx->n_rx_channels) { if (channel->channel < efx->n_rx_channels) { type = "-rx"; } else { type = "-tx"; number -= efx->n_rx_channels; } } snprintf(efx->channel_name[channel->channel], sizeof(efx->channel_name[0]), "%s%s-%d", efx->name, type, number); } } static int efx_probe_channels(struct efx_nic *efx) { struct efx_channel *channel; int rc; /* Restart special buffer allocation */ efx->next_buffer_table = 0; efx_for_each_channel(channel, efx) { rc = efx_probe_channel(channel); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create channel %d\n", channel->channel); goto fail; } } efx_set_channel_names(efx); return 0; fail: efx_remove_channels(efx); return rc; } /* Channels are shutdown and reinitialised whilst the NIC is running * to propagate configuration changes (mtu, checksum offload), or * to clear hardware error conditions */ static void efx_init_channels(struct efx_nic *efx) { struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; struct efx_channel *channel; /* Calculate the rx buffer allocation parameters required to * support the current MTU, including padding for header * alignment and overruns. */ efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + efx->type->rx_buffer_hash_size + efx->type->rx_buffer_padding); efx->rx_buffer_order = get_order(efx->rx_buffer_len + sizeof(struct efx_rx_page_state)); /* Initialise the channels */ efx_for_each_channel(channel, efx) { netif_dbg(channel->efx, drv, channel->efx->net_dev, "init chan %d\n", channel->channel); efx_init_eventq(channel); efx_for_each_channel_tx_queue(tx_queue, channel) efx_init_tx_queue(tx_queue); /* The rx buffer allocation strategy is MTU dependent */ efx_rx_strategy(channel); efx_for_each_channel_rx_queue(rx_queue, channel) efx_init_rx_queue(rx_queue); WARN_ON(channel->rx_pkt != NULL); efx_rx_strategy(channel); } } /* This enables event queue processing and packet transmission. * * Note that this function is not allowed to fail, since that would * introduce too much complexity into the suspend/resume path. */ static void efx_start_channel(struct efx_channel *channel) { struct efx_rx_queue *rx_queue; netif_dbg(channel->efx, ifup, channel->efx->net_dev, "starting chan %d\n", channel->channel); /* The interrupt handler for this channel may set work_pending * as soon as we enable it. Make sure it's cleared before * then. Similarly, make sure it sees the enabled flag set. */ channel->work_pending = false; channel->enabled = true; smp_wmb(); /* Fill the queues before enabling NAPI */ efx_for_each_channel_rx_queue(rx_queue, channel) efx_fast_push_rx_descriptors(rx_queue); napi_enable(&channel->napi_str); } /* This disables event queue processing and packet transmission. * This function does not guarantee that all queue processing * (e.g. RX refill) is complete. */ static void efx_stop_channel(struct efx_channel *channel) { if (!channel->enabled) return; netif_dbg(channel->efx, ifdown, channel->efx->net_dev, "stop chan %d\n", channel->channel); channel->enabled = false; napi_disable(&channel->napi_str); } static void efx_fini_channels(struct efx_nic *efx) { struct efx_channel *channel; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; int rc; EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->port_enabled); rc = efx_nic_flush_queues(efx); if (rc && EFX_WORKAROUND_7803(efx)) { /* Schedule a reset to recover from the flush failure. The * descriptor caches reference memory we're about to free, * but falcon_reconfigure_mac_wrapper() won't reconnect * the MACs because of the pending reset. */ netif_err(efx, drv, efx->net_dev, "Resetting to recover from flush failure\n"); efx_schedule_reset(efx, RESET_TYPE_ALL); } else if (rc) { netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); } else { netif_dbg(efx, drv, efx->net_dev, "successfully flushed all queues\n"); } efx_for_each_channel(channel, efx) { netif_dbg(channel->efx, drv, channel->efx->net_dev, "shut down chan %d\n", channel->channel); efx_for_each_channel_rx_queue(rx_queue, channel) efx_fini_rx_queue(rx_queue); efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_fini_tx_queue(tx_queue); efx_fini_eventq(channel); } } static void efx_remove_channel(struct efx_channel *channel) { struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; netif_dbg(channel->efx, drv, channel->efx->net_dev, "destroy chan %d\n", channel->channel); efx_for_each_channel_rx_queue(rx_queue, channel) efx_remove_rx_queue(rx_queue); efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_remove_tx_queue(tx_queue); efx_remove_eventq(channel); } static void efx_remove_channels(struct efx_nic *efx) { struct efx_channel *channel; efx_for_each_channel(channel, efx) efx_remove_channel(channel); } int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) { struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; u32 old_rxq_entries, old_txq_entries; unsigned i; int rc; efx_stop_all(efx); efx_fini_channels(efx); /* Clone channels */ memset(other_channel, 0, sizeof(other_channel)); for (i = 0; i < efx->n_channels; i++) { channel = efx_alloc_channel(efx, i, efx->channel[i]); if (!channel) { rc = -ENOMEM; goto out; } other_channel[i] = channel; } /* Swap entry counts and channel pointers */ old_rxq_entries = efx->rxq_entries; old_txq_entries = efx->txq_entries; efx->rxq_entries = rxq_entries; efx->txq_entries = txq_entries; for (i = 0; i < efx->n_channels; i++) { channel = efx->channel[i]; efx->channel[i] = other_channel[i]; other_channel[i] = channel; } rc = efx_probe_channels(efx); if (rc) goto rollback; efx_init_napi(efx); /* Destroy old channels */ for (i = 0; i < efx->n_channels; i++) { efx_fini_napi_channel(other_channel[i]); efx_remove_channel(other_channel[i]); } out: /* Free unused channel structures */ for (i = 0; i < efx->n_channels; i++) kfree(other_channel[i]); efx_init_channels(efx); efx_start_all(efx); return rc; rollback: /* Swap back */ efx->rxq_entries = old_rxq_entries; efx->txq_entries = old_txq_entries; for (i = 0; i < efx->n_channels; i++) { channel = efx->channel[i]; efx->channel[i] = other_channel[i]; other_channel[i] = channel; } goto out; } void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) { mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); } /************************************************************************** * * Port handling * **************************************************************************/ /* This ensures that the kernel is kept informed (via * netif_carrier_on/off) of the link status, and also maintains the * link status's stop on the port's TX queue. */ void efx_link_status_changed(struct efx_nic *efx) { struct efx_link_state *link_state = &efx->link_state; /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure * that no events are triggered between unregister_netdev() and the * driver unloading. A more general condition is that NETDEV_CHANGE * can only be generated between NETDEV_UP and NETDEV_DOWN */ if (!netif_running(efx->net_dev)) return; if (link_state->up != netif_carrier_ok(efx->net_dev)) { efx->n_link_state_changes++; if (link_state->up) netif_carrier_on(efx->net_dev); else netif_carrier_off(efx->net_dev); } /* Status message for kernel log */ if (link_state->up) { netif_info(efx, link, efx->net_dev, "link up at %uMbps %s-duplex (MTU %d)%s\n", link_state->speed, link_state->fd ? "full" : "half", efx->net_dev->mtu, (efx->promiscuous ? " [PROMISC]" : "")); } else { netif_info(efx, link, efx->net_dev, "link down\n"); } } void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) { efx->link_advertising = advertising; if (advertising) { if (advertising & ADVERTISED_Pause) efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); else efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); if (advertising & ADVERTISED_Asym_Pause) efx->wanted_fc ^= EFX_FC_TX; } } void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) { efx->wanted_fc = wanted_fc; if (efx->link_advertising) { if (wanted_fc & EFX_FC_RX) efx->link_advertising |= (ADVERTISED_Pause | ADVERTISED_Asym_Pause); else efx->link_advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); if (wanted_fc & EFX_FC_TX) efx->link_advertising ^= ADVERTISED_Asym_Pause; } } static void efx_fini_port(struct efx_nic *efx); /* Push loopback/power/transmit disable settings to the PHY, and reconfigure * the MAC appropriately. All other PHY configuration changes are pushed * through phy_op->set_settings(), and pushed asynchronously to the MAC * through efx_monitor(). * * Callers must hold the mac_lock */ int __efx_reconfigure_port(struct efx_nic *efx) { enum efx_phy_mode phy_mode; int rc; WARN_ON(!mutex_is_locked(&efx->mac_lock)); /* Serialise the promiscuous flag with efx_set_multicast_list. */ if (efx_dev_registered(efx)) { netif_addr_lock_bh(efx->net_dev); netif_addr_unlock_bh(efx->net_dev); } /* Disable PHY transmit in mac level loopbacks */ phy_mode = efx->phy_mode; if (LOOPBACK_INTERNAL(efx)) efx->phy_mode |= PHY_MODE_TX_DISABLED; else efx->phy_mode &= ~PHY_MODE_TX_DISABLED; rc = efx->type->reconfigure_port(efx); if (rc) efx->phy_mode = phy_mode; return rc; } /* Reinitialise the MAC to pick up new PHY settings, even if the port is * disabled. */ int efx_reconfigure_port(struct efx_nic *efx) { int rc; EFX_ASSERT_RESET_SERIALISED(efx); mutex_lock(&efx->mac_lock); rc = __efx_reconfigure_port(efx); mutex_unlock(&efx->mac_lock); return rc; } /* Asynchronous work item for changing MAC promiscuity and multicast * hash. Avoid a drain/rx_ingress enable by reconfiguring the current * MAC directly. */ static void efx_mac_work(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); mutex_lock(&efx->mac_lock); if (efx->port_enabled) { efx->type->push_multicast_hash(efx); efx->mac_op->reconfigure(efx); } mutex_unlock(&efx->mac_lock); } static int efx_probe_port(struct efx_nic *efx) { unsigned char *perm_addr; int rc; netif_dbg(efx, probe, efx->net_dev, "create port\n"); if (phy_flash_cfg) efx->phy_mode = PHY_MODE_SPECIAL; /* Connect up MAC/PHY operations table */ rc = efx->type->probe_port(efx); if (rc) return rc; /* Sanity check MAC address */ perm_addr = efx->net_dev->perm_addr; if (is_valid_ether_addr(perm_addr)) { memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN); } else { netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", perm_addr); if (!allow_bad_hwaddr) { rc = -EINVAL; goto err; } random_ether_addr(efx->net_dev->dev_addr); netif_info(efx, probe, efx->net_dev, "using locally-generated MAC %pM\n", efx->net_dev->dev_addr); } return 0; err: efx->type->remove_port(efx); return rc; } static int efx_init_port(struct efx_nic *efx) { int rc; netif_dbg(efx, drv, efx->net_dev, "init port\n"); mutex_lock(&efx->mac_lock); rc = efx->phy_op->init(efx); if (rc) goto fail1; efx->port_initialized = true; /* Reconfigure the MAC before creating dma queues (required for * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ efx->mac_op->reconfigure(efx); /* Ensure the PHY advertises the correct flow control settings */ rc = efx->phy_op->reconfigure(efx); if (rc) goto fail2; mutex_unlock(&efx->mac_lock); return 0; fail2: efx->phy_op->fini(efx); fail1: mutex_unlock(&efx->mac_lock); return rc; } static void efx_start_port(struct efx_nic *efx) { netif_dbg(efx, ifup, efx->net_dev, "start port\n"); BUG_ON(efx->port_enabled); mutex_lock(&efx->mac_lock); efx->port_enabled = true; /* efx_mac_work() might have been scheduled after efx_stop_port(), * and then cancelled by efx_flush_all() */ efx->type->push_multicast_hash(efx); efx->mac_op->reconfigure(efx); mutex_unlock(&efx->mac_lock); } /* Prevent efx_mac_work() and efx_monitor() from working */ static void efx_stop_port(struct efx_nic *efx) { netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); mutex_lock(&efx->mac_lock); efx->port_enabled = false; mutex_unlock(&efx->mac_lock); /* Serialise against efx_set_multicast_list() */ if (efx_dev_registered(efx)) { netif_addr_lock_bh(efx->net_dev); netif_addr_unlock_bh(efx->net_dev); } } static void efx_fini_port(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); if (!efx->port_initialized) return; efx->phy_op->fini(efx); efx->port_initialized = false; efx->link_state.up = false; efx_link_status_changed(efx); } static void efx_remove_port(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); efx->type->remove_port(efx); } /************************************************************************** * * NIC handling * **************************************************************************/ /* This configures the PCI device to enable I/O and DMA. */ static int efx_init_io(struct efx_nic *efx) { struct pci_dev *pci_dev = efx->pci_dev; dma_addr_t dma_mask = efx->type->max_dma_mask; int rc; netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); rc = pci_enable_device(pci_dev); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to enable PCI device\n"); goto fail1; } pci_set_master(pci_dev); /* Set the PCI DMA mask. Try all possibilities from our * genuine mask down to 32 bits, because some architectures * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit * masks event though they reject 46 bit masks. */ while (dma_mask > 0x7fffffffUL) { if (pci_dma_supported(pci_dev, dma_mask) && ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) break; dma_mask >>= 1; } if (rc) { netif_err(efx, probe, efx->net_dev, "could not find a suitable DMA mask\n"); goto fail2; } netif_dbg(efx, probe, efx->net_dev, "using DMA mask %llx\n", (unsigned long long) dma_mask); rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); if (rc) { /* pci_set_consistent_dma_mask() is not *allowed* to * fail with a mask that pci_set_dma_mask() accepted, * but just in case... */ netif_err(efx, probe, efx->net_dev, "failed to set consistent DMA mask\n"); goto fail2; } efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); if (rc) { netif_err(efx, probe, efx->net_dev, "request for memory BAR failed\n"); rc = -EIO; goto fail3; } efx->membase = ioremap_nocache(efx->membase_phys, efx->type->mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, "could not map memory BAR at %llx+%x\n", (unsigned long long)efx->membase_phys, efx->type->mem_map_size); rc = -ENOMEM; goto fail4; } netif_dbg(efx, probe, efx->net_dev, "memory BAR at %llx+%x (virtual %p)\n", (unsigned long long)efx->membase_phys, efx->type->mem_map_size, efx->membase); return 0; fail4: pci_release_region(efx->pci_dev, EFX_MEM_BAR); fail3: efx->membase_phys = 0; fail2: pci_disable_device(efx->pci_dev); fail1: return rc; } static void efx_fini_io(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); if (efx->membase) { iounmap(efx->membase); efx->membase = NULL; } if (efx->membase_phys) { pci_release_region(efx->pci_dev, EFX_MEM_BAR); efx->membase_phys = 0; } pci_disable_device(efx->pci_dev); } /* Get number of channels wanted. Each channel will have its own IRQ, * 1 RX queue and/or 2 TX queues. */ static int efx_wanted_channels(void) { cpumask_var_t core_mask; int count; int cpu; if (rss_cpus) return rss_cpus; if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { printk(KERN_WARNING "sfc: RSS disabled due to allocation failure\n"); return 1; } count = 0; for_each_online_cpu(cpu) { if (!cpumask_test_cpu(cpu, core_mask)) { ++count; cpumask_or(core_mask, core_mask, topology_core_cpumask(cpu)); } } free_cpumask_var(core_mask); return count; } static int efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) { #ifdef CONFIG_RFS_ACCEL int i, rc; efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); if (!efx->net_dev->rx_cpu_rmap) return -ENOMEM; for (i = 0; i < efx->n_rx_channels; i++) { rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, xentries[i].vector); if (rc) { free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); efx->net_dev->rx_cpu_rmap = NULL; return rc; } } #endif return 0; } /* Probe the number and type of interrupts we are able to obtain, and * the resulting numbers of channels and RX queues. */ static int efx_probe_interrupts(struct efx_nic *efx) { int max_channels = min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); int rc, i; if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { struct msix_entry xentries[EFX_MAX_CHANNELS]; int n_channels; n_channels = efx_wanted_channels(); if (separate_tx_channels) n_channels *= 2; n_channels = min(n_channels, max_channels); for (i = 0; i < n_channels; i++) xentries[i].entry = i; rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); if (rc > 0) { netif_err(efx, drv, efx->net_dev, "WARNING: Insufficient MSI-X vectors" " available (%d < %d).\n", rc, n_channels); netif_err(efx, drv, efx->net_dev, "WARNING: Performance may be reduced.\n"); EFX_BUG_ON_PARANOID(rc >= n_channels); n_channels = rc; rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); } if (rc == 0) { efx->n_channels = n_channels; if (separate_tx_channels) { efx->n_tx_channels = max(efx->n_channels / 2, 1U); efx->n_rx_channels = max(efx->n_channels - efx->n_tx_channels, 1U); } else { efx->n_tx_channels = efx->n_channels; efx->n_rx_channels = efx->n_channels; } rc = efx_init_rx_cpu_rmap(efx, xentries); if (rc) { pci_disable_msix(efx->pci_dev); return rc; } for (i = 0; i < n_channels; i++) efx_get_channel(efx, i)->irq = xentries[i].vector; } else { /* Fall back to single channel MSI */ efx->interrupt_mode = EFX_INT_MODE_MSI; netif_err(efx, drv, efx->net_dev, "could not enable MSI-X\n"); } } /* Try single interrupt MSI */ if (efx->interrupt_mode == EFX_INT_MODE_MSI) { efx->n_channels = 1; efx->n_rx_channels = 1; efx->n_tx_channels = 1; rc = pci_enable_msi(efx->pci_dev); if (rc == 0) { efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; } else { netif_err(efx, drv, efx->net_dev, "could not enable MSI\n"); efx->interrupt_mode = EFX_INT_MODE_LEGACY; } } /* Assume legacy interrupts */ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); efx->n_rx_channels = 1; efx->n_tx_channels = 1; efx->legacy_irq = efx->pci_dev->irq; } return 0; } static void efx_remove_interrupts(struct efx_nic *efx) { struct efx_channel *channel; /* Remove MSI/MSI-X interrupts */ efx_for_each_channel(channel, efx) channel->irq = 0; pci_disable_msi(efx->pci_dev); pci_disable_msix(efx->pci_dev); /* Remove legacy interrupt */ efx->legacy_irq = 0; } static void efx_set_channels(struct efx_nic *efx) { struct efx_channel *channel; struct efx_tx_queue *tx_queue; efx->tx_channel_offset = separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; /* We need to adjust the TX queue numbers if we have separate * RX-only and TX-only channels. */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) tx_queue->queue -= (efx->tx_channel_offset * EFX_TXQ_TYPES); } } static int efx_probe_nic(struct efx_nic *efx) { size_t i; int rc; netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); /* Carry out hardware-type specific initialisation */ rc = efx->type->probe(efx); if (rc) return rc; /* Determine the number of channels and queues by trying to hook * in MSI-X interrupts. */ rc = efx_probe_interrupts(efx); if (rc) goto fail; if (efx->n_channels > 1) get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) efx->rx_indir_table[i] = i % efx->n_rx_channels; efx_set_channels(efx); netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); /* Initialise the interrupt moderation settings */ efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); return 0; fail: efx->type->remove(efx); return rc; } static void efx_remove_nic(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); efx_remove_interrupts(efx); efx->type->remove(efx); } /************************************************************************** * * NIC startup/shutdown * *************************************************************************/ static int efx_probe_all(struct efx_nic *efx) { int rc; rc = efx_probe_nic(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); goto fail1; } rc = efx_probe_port(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create port\n"); goto fail2; } efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; rc = efx_probe_channels(efx); if (rc) goto fail3; rc = efx_probe_filters(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create filter tables\n"); goto fail4; } return 0; fail4: efx_remove_channels(efx); fail3: efx_remove_port(efx); fail2: efx_remove_nic(efx); fail1: return rc; } /* Called after previous invocation(s) of efx_stop_all, restarts the * port, kernel transmit queue, NAPI processing and hardware interrupts, * and ensures that the port is scheduled to be reconfigured. * This function is safe to call multiple times when the NIC is in any * state. */ static void efx_start_all(struct efx_nic *efx) { struct efx_channel *channel; EFX_ASSERT_RESET_SERIALISED(efx); /* Check that it is appropriate to restart the interface. All * of these flags are safe to read under just the rtnl lock */ if (efx->port_enabled) return; if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) return; if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) return; /* Mark the port as enabled so port reconfigurations can start, then * restart the transmit interface early so the watchdog timer stops */ efx_start_port(efx); if (efx_dev_registered(efx) && netif_device_present(efx->net_dev)) netif_tx_wake_all_queues(efx->net_dev); efx_for_each_channel(channel, efx) efx_start_channel(channel); if (efx->legacy_irq) efx->legacy_irq_enabled = true; efx_nic_enable_interrupts(efx); /* Switch to event based MCDI completions after enabling interrupts. * If a reset has been scheduled, then we need to stay in polled mode. * Rather than serialising efx_mcdi_mode_event() [which sleeps] and * reset_pending [modified from an atomic context], we instead guarantee * that efx_mcdi_mode_poll() isn't reverted erroneously */ efx_mcdi_mode_event(efx); if (efx->reset_pending) efx_mcdi_mode_poll(efx); /* Start the hardware monitor if there is one. Otherwise (we're link * event driven), we have to poll the PHY because after an event queue * flush, we could have a missed a link state change */ if (efx->type->monitor != NULL) { queue_delayed_work(efx->workqueue, &efx->monitor_work, efx_monitor_interval); } else { mutex_lock(&efx->mac_lock); if (efx->phy_op->poll(efx)) efx_link_status_changed(efx); mutex_unlock(&efx->mac_lock); } efx->type->start_stats(efx); } /* Flush all delayed work. Should only be called when no more delayed work * will be scheduled. This doesn't flush pending online resets (efx_reset), * since we're holding the rtnl_lock at this point. */ static void efx_flush_all(struct efx_nic *efx) { /* Make sure the hardware monitor is stopped */ cancel_delayed_work_sync(&efx->monitor_work); /* Stop scheduled port reconfigurations */ cancel_work_sync(&efx->mac_work); } /* Quiesce hardware and software without bringing the link down. * Safe to call multiple times, when the nic and interface is in any * state. The caller is guaranteed to subsequently be in a position * to modify any hardware and software state they see fit without * taking locks. */ static void efx_stop_all(struct efx_nic *efx) { struct efx_channel *channel; EFX_ASSERT_RESET_SERIALISED(efx); /* port_enabled can be read safely under the rtnl lock */ if (!efx->port_enabled) return; efx->type->stop_stats(efx); /* Switch to MCDI polling on Siena before disabling interrupts */ efx_mcdi_mode_poll(efx); /* Disable interrupts and wait for ISR to complete */ efx_nic_disable_interrupts(efx); if (efx->legacy_irq) { synchronize_irq(efx->legacy_irq); efx->legacy_irq_enabled = false; } efx_for_each_channel(channel, efx) { if (channel->irq) synchronize_irq(channel->irq); } /* Stop all NAPI processing and synchronous rx refills */ efx_for_each_channel(channel, efx) efx_stop_channel(channel); /* Stop all asynchronous port reconfigurations. Since all * event processing has already been stopped, there is no * window to loose phy events */ efx_stop_port(efx); /* Flush efx_mac_work(), refill_workqueue, monitor_work */ efx_flush_all(efx); /* Stop the kernel transmit interface late, so the watchdog * timer isn't ticking over the flush */ if (efx_dev_registered(efx)) { netif_tx_stop_all_queues(efx->net_dev); netif_tx_lock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev); } } static void efx_remove_all(struct efx_nic *efx) { efx_remove_filters(efx); efx_remove_channels(efx); efx_remove_port(efx); efx_remove_nic(efx); } /************************************************************************** * * Interrupt moderation * **************************************************************************/ static unsigned irq_mod_ticks(int usecs, int resolution) { if (usecs <= 0) return 0; /* cannot receive interrupts ahead of time :-) */ if (usecs < resolution) return 1; /* never round down to 0 */ return usecs / resolution; } /* Set interrupt moderation parameters */ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, bool rx_adaptive) { struct efx_channel *channel; unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); EFX_ASSERT_RESET_SERIALISED(efx); efx->irq_rx_adaptive = rx_adaptive; efx->irq_rx_moderation = rx_ticks; efx_for_each_channel(channel, efx) { if (efx_channel_has_rx_queue(channel)) channel->irq_moderation = rx_ticks; else if (efx_channel_has_tx_queues(channel)) channel->irq_moderation = tx_ticks; } } /************************************************************************** * * Hardware monitor * **************************************************************************/ /* Run periodically off the general workqueue */ static void efx_monitor(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, monitor_work.work); netif_vdbg(efx, timer, efx->net_dev, "hardware monitor executing on CPU %d\n", raw_smp_processor_id()); BUG_ON(efx->type->monitor == NULL); /* If the mac_lock is already held then it is likely a port * reconfiguration is already in place, which will likely do * most of the work of monitor() anyway. */ if (mutex_trylock(&efx->mac_lock)) { if (efx->port_enabled) efx->type->monitor(efx); mutex_unlock(&efx->mac_lock); } queue_delayed_work(efx->workqueue, &efx->monitor_work, efx_monitor_interval); } /************************************************************************** * * ioctls * *************************************************************************/ /* Net device ioctl * Context: process, rtnl_lock() held. */ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) { struct efx_nic *efx = netdev_priv(net_dev); struct mii_ioctl_data *data = if_mii(ifr); EFX_ASSERT_RESET_SERIALISED(efx); /* Convert phy_id from older PRTAD/DEVAD format */ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && (data->phy_id & 0xfc00) == 0x0400) data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; return mdio_mii_ioctl(&efx->mdio, data, cmd); } /************************************************************************** * * NAPI interface * **************************************************************************/ static void efx_init_napi(struct efx_nic *efx) { struct efx_channel *channel; efx_for_each_channel(channel, efx) { channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); } } static void efx_fini_napi_channel(struct efx_channel *channel) { if (channel->napi_dev) netif_napi_del(&channel->napi_str); channel->napi_dev = NULL; } static void efx_fini_napi(struct efx_nic *efx) { struct efx_channel *channel; efx_for_each_channel(channel, efx) efx_fini_napi_channel(channel); } /************************************************************************** * * Kernel netpoll interface * *************************************************************************/ #ifdef CONFIG_NET_POLL_CONTROLLER /* Although in the common case interrupts will be disabled, this is not * guaranteed. However, all our work happens inside the NAPI callback, * so no locking is required. */ static void efx_netpoll(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; efx_for_each_channel(channel, efx) efx_schedule_channel(channel); } #endif /************************************************************************** * * Kernel net device interface * *************************************************************************/ /* Context: process, rtnl_lock() held. */ static int efx_net_open(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); EFX_ASSERT_RESET_SERIALISED(efx); netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", raw_smp_processor_id()); if (efx->state == STATE_DISABLED) return -EIO; if (efx->phy_mode & PHY_MODE_SPECIAL) return -EBUSY; if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) return -EIO; /* Notify the kernel of the link state polled during driver load, * before the monitor starts running */ efx_link_status_changed(efx); efx_start_all(efx); return 0; } /* Context: process, rtnl_lock() held. * Note that the kernel will ignore our return code; this method * should really be a void. */ static int efx_net_stop(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", raw_smp_processor_id()); if (efx->state != STATE_DISABLED) { /* Stop the device and flush all the channels */ efx_stop_all(efx); efx_fini_channels(efx); efx_init_channels(efx); } return 0; } /* Context: process, dev_base_lock or RTNL held, non-blocking. */ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_mac_stats *mac_stats = &efx->mac_stats; spin_lock_bh(&efx->stats_lock); efx->type->update_stats(efx); spin_unlock_bh(&efx->stats_lock); stats->rx_packets = mac_stats->rx_packets; stats->tx_packets = mac_stats->tx_packets; stats->rx_bytes = mac_stats->rx_bytes; stats->tx_bytes = mac_stats->tx_bytes; stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; stats->multicast = mac_stats->rx_multicast; stats->collisions = mac_stats->tx_collision; stats->rx_length_errors = (mac_stats->rx_gtjumbo + mac_stats->rx_length_error); stats->rx_crc_errors = mac_stats->rx_bad; stats->rx_frame_errors = mac_stats->rx_align_error; stats->rx_fifo_errors = mac_stats->rx_overflow; stats->rx_missed_errors = mac_stats->rx_missed; stats->tx_window_errors = mac_stats->tx_late_collision; stats->rx_errors = (stats->rx_length_errors + stats->rx_crc_errors + stats->rx_frame_errors + mac_stats->rx_symbol_error); stats->tx_errors = (stats->tx_window_errors + mac_stats->tx_bad); return stats; } /* Context: netif_tx_lock held, BHs disabled. */ static void efx_watchdog(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); netif_err(efx, tx_err, efx->net_dev, "TX stuck with port_enabled=%d: resetting channels\n", efx->port_enabled); efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); } /* Context: process, rtnl_lock() held. */ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) { struct efx_nic *efx = netdev_priv(net_dev); int rc = 0; EFX_ASSERT_RESET_SERIALISED(efx); if (new_mtu > EFX_MAX_MTU) return -EINVAL; efx_stop_all(efx); netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); efx_fini_channels(efx); mutex_lock(&efx->mac_lock); /* Reconfigure the MAC before enabling the dma queues so that * the RX buffers don't overflow */ net_dev->mtu = new_mtu; efx->mac_op->reconfigure(efx); mutex_unlock(&efx->mac_lock); efx_init_channels(efx); efx_start_all(efx); return rc; } static int efx_set_mac_address(struct net_device *net_dev, void *data) { struct efx_nic *efx = netdev_priv(net_dev); struct sockaddr *addr = data; char *new_addr = addr->sa_data; EFX_ASSERT_RESET_SERIALISED(efx); if (!is_valid_ether_addr(new_addr)) { netif_err(efx, drv, efx->net_dev, "invalid ethernet MAC address requested: %pM\n", new_addr); return -EINVAL; } memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); /* Reconfigure the MAC */ mutex_lock(&efx->mac_lock); efx->mac_op->reconfigure(efx); mutex_unlock(&efx->mac_lock); return 0; } /* Context: netif_addr_lock held, BHs disabled. */ static void efx_set_multicast_list(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); struct netdev_hw_addr *ha; union efx_multicast_hash *mc_hash = &efx->multicast_hash; u32 crc; int bit; efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); /* Build multicast hash table */ if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { memset(mc_hash, 0xff, sizeof(*mc_hash)); } else { memset(mc_hash, 0x00, sizeof(*mc_hash)); netdev_for_each_mc_addr(ha, net_dev) { crc = ether_crc_le(ETH_ALEN, ha->addr); bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); set_bit_le(bit, mc_hash->byte); } /* Broadcast packets go through the multicast hash filter. * ether_crc_le() of the broadcast address is 0xbe2612ff * so we always add bit 0xff to the mask. */ set_bit_le(0xff, mc_hash->byte); } if (efx->port_enabled) queue_work(efx->workqueue, &efx->mac_work); /* Otherwise efx_start_port() will do this */ } static int efx_set_features(struct net_device *net_dev, u32 data) { struct efx_nic *efx = netdev_priv(net_dev); /* If disabling RX n-tuple filtering, clear existing filters */ if (net_dev->features & ~data & NETIF_F_NTUPLE) efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); return 0; } static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, .ndo_get_stats64 = efx_net_stats, .ndo_tx_timeout = efx_watchdog, .ndo_start_xmit = efx_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = efx_ioctl, .ndo_change_mtu = efx_change_mtu, .ndo_set_mac_address = efx_set_mac_address, .ndo_set_multicast_list = efx_set_multicast_list, .ndo_set_features = efx_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif .ndo_setup_tc = efx_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif }; static void efx_update_name(struct efx_nic *efx) { strcpy(efx->name, efx->net_dev->name); efx_mtd_rename(efx); efx_set_channel_names(efx); } static int efx_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *net_dev = ptr; if (net_dev->netdev_ops == &efx_netdev_ops && event == NETDEV_CHANGENAME) efx_update_name(netdev_priv(net_dev)); return NOTIFY_DONE; } static struct notifier_block efx_netdev_notifier = { .notifier_call = efx_netdev_event, }; static ssize_t show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); return sprintf(buf, "%d\n", efx->phy_type); } static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); static int efx_register_netdev(struct efx_nic *efx) { struct net_device *net_dev = efx->net_dev; struct efx_channel *channel; int rc; net_dev->watchdog_timeo = 5 * HZ; net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); /* Clear MAC statistics */ efx->mac_op->update_stats(efx); memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); rtnl_lock(); rc = dev_alloc_name(net_dev, net_dev->name); if (rc < 0) goto fail_locked; efx_update_name(efx); rc = register_netdevice(net_dev); if (rc) goto fail_locked; efx_for_each_channel(channel, efx) { struct efx_tx_queue *tx_queue; efx_for_each_channel_tx_queue(tx_queue, channel) efx_init_tx_queue_core_txq(tx_queue); } /* Always start with carrier off; PHY events will detect the link */ netif_carrier_off(efx->net_dev); rtnl_unlock(); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to init net dev attributes\n"); goto fail_registered; } return 0; fail_locked: rtnl_unlock(); netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); return rc; fail_registered: unregister_netdev(net_dev); return rc; } static void efx_unregister_netdev(struct efx_nic *efx) { struct efx_channel *channel; struct efx_tx_queue *tx_queue; if (!efx->net_dev) return; BUG_ON(netdev_priv(efx->net_dev) != efx); /* Free up any skbs still remaining. This has to happen before * we try to unregister the netdev as running their destructors * may be needed to get the device ref. count to 0. */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) efx_release_tx_buffers(tx_queue); } if (efx_dev_registered(efx)) { strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); unregister_netdev(efx->net_dev); } } /************************************************************************** * * Device reset and suspend * **************************************************************************/ /* Tears down the entire software state and most of the hardware state * before reset. */ void efx_reset_down(struct efx_nic *efx, enum reset_type method) { EFX_ASSERT_RESET_SERIALISED(efx); efx_stop_all(efx); mutex_lock(&efx->mac_lock); efx_fini_channels(efx); if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) efx->phy_op->fini(efx); efx->type->fini(efx); } /* This function will always ensure that the locks acquired in * efx_reset_down() are released. A failure return code indicates * that we were unable to reinitialise the hardware, and the * driver should be disabled. If ok is false, then the rx and tx * engines are not restarted, pending a RESET_DISABLE. */ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) { int rc; EFX_ASSERT_RESET_SERIALISED(efx); rc = efx->type->init(efx); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); goto fail; } if (!ok) goto fail; if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { rc = efx->phy_op->init(efx); if (rc) goto fail; if (efx->phy_op->reconfigure(efx)) netif_err(efx, drv, efx->net_dev, "could not restore PHY settings\n"); } efx->mac_op->reconfigure(efx); efx_init_channels(efx); efx_restore_filters(efx); mutex_unlock(&efx->mac_lock); efx_start_all(efx); return 0; fail: efx->port_initialized = false; mutex_unlock(&efx->mac_lock); return rc; } /* Reset the NIC using the specified method. Note that the reset may * fail, in which case the card will be left in an unusable state. * * Caller must hold the rtnl_lock. */ int efx_reset(struct efx_nic *efx, enum reset_type method) { int rc, rc2; bool disabled; netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", RESET_TYPE(method)); netif_device_detach(efx->net_dev); efx_reset_down(efx, method); rc = efx->type->reset(efx, method); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); goto out; } /* Clear flags for the scopes we covered. We assume the NIC and * driver are now quiescent so that there is no race here. */ efx->reset_pending &= -(1 << (method + 1)); /* Reinitialise bus-mastering, which may have been turned off before * the reset was scheduled. This is still appropriate, even in the * RESET_TYPE_DISABLE since this driver generally assumes the hardware * can respond to requests. */ pci_set_master(efx->pci_dev); out: /* Leave device stopped if necessary */ disabled = rc || method == RESET_TYPE_DISABLE; rc2 = efx_reset_up(efx, method, !disabled); if (rc2) { disabled = true; if (!rc) rc = rc2; } if (disabled) { dev_close(efx->net_dev); netif_err(efx, drv, efx->net_dev, "has been disabled\n"); efx->state = STATE_DISABLED; } else { netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); netif_device_attach(efx->net_dev); } return rc; } /* The worker thread exists so that code that cannot sleep can * schedule a reset for later. */ static void efx_reset_work(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); unsigned long pending = ACCESS_ONCE(efx->reset_pending); if (!pending) return; /* If we're not RUNNING then don't reset. Leave the reset_pending * flags set so that efx_pci_probe_main will be retried */ if (efx->state != STATE_RUNNING) { netif_info(efx, drv, efx->net_dev, "scheduled reset quenched. NIC not RUNNING\n"); return; } rtnl_lock(); (void)efx_reset(efx, fls(pending) - 1); rtnl_unlock(); } void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) { enum reset_type method; switch (type) { case RESET_TYPE_INVISIBLE: case RESET_TYPE_ALL: case RESET_TYPE_WORLD: case RESET_TYPE_DISABLE: method = type; netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", RESET_TYPE(method)); break; default: method = efx->type->map_reset_reason(type); netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset for %s\n", RESET_TYPE(method), RESET_TYPE(type)); break; } set_bit(method, &efx->reset_pending); /* efx_process_channel() will no longer read events once a * reset is scheduled. So switch back to poll'd MCDI completions. */ efx_mcdi_mode_poll(efx); queue_work(reset_workqueue, &efx->reset_work); } /************************************************************************** * * List of NICs we support * **************************************************************************/ /* PCI device ID table */ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), .driver_data = (unsigned long) &falcon_a1_nic_type}, {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), .driver_data = (unsigned long) &falcon_b0_nic_type}, {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), .driver_data = (unsigned long) &siena_a0_nic_type}, {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), .driver_data = (unsigned long) &siena_a0_nic_type}, {0} /* end of list */ }; /************************************************************************** * * Dummy PHY/MAC operations * * Can be used for some unimplemented operations * Needed so all function pointers are valid and do not have to be tested * before use * **************************************************************************/ int efx_port_dummy_op_int(struct efx_nic *efx) { return 0; } void efx_port_dummy_op_void(struct efx_nic *efx) {} static bool efx_port_dummy_op_poll(struct efx_nic *efx) { return false; } static const struct efx_phy_operations efx_dummy_phy_operations = { .init = efx_port_dummy_op_int, .reconfigure = efx_port_dummy_op_int, .poll = efx_port_dummy_op_poll, .fini = efx_port_dummy_op_void, }; /************************************************************************** * * Data housekeeping * **************************************************************************/ /* This zeroes out and then fills in the invariants in a struct * efx_nic (including all sub-structures). */ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, struct pci_dev *pci_dev, struct net_device *net_dev) { int i; /* Initialise common structures */ memset(efx, 0, sizeof(*efx)); spin_lock_init(&efx->biu_lock); #ifdef CONFIG_SFC_MTD INIT_LIST_HEAD(&efx->mtd_list); #endif INIT_WORK(&efx->reset_work, efx_reset_work); INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); efx->pci_dev = pci_dev; efx->msg_enable = debug; efx->state = STATE_INIT; strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); efx->net_dev = net_dev; spin_lock_init(&efx->stats_lock); mutex_init(&efx->mac_lock); efx->mac_op = type->default_mac_ops; efx->phy_op = &efx_dummy_phy_operations; efx->mdio.dev = net_dev; INIT_WORK(&efx->mac_work, efx_mac_work); for (i = 0; i < EFX_MAX_CHANNELS; i++) { efx->channel[i] = efx_alloc_channel(efx, i, NULL); if (!efx->channel[i]) goto fail; } efx->type = type; EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); /* Higher numbered interrupt modes are less capable! */ efx->interrupt_mode = max(efx->type->max_interrupt_mode, interrupt_mode); /* Would be good to use the net_dev name, but we're too early */ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", pci_name(pci_dev)); efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); if (!efx->workqueue) goto fail; return 0; fail: efx_fini_struct(efx); return -ENOMEM; } static void efx_fini_struct(struct efx_nic *efx) { int i; for (i = 0; i < EFX_MAX_CHANNELS; i++) kfree(efx->channel[i]); if (efx->workqueue) { destroy_workqueue(efx->workqueue); efx->workqueue = NULL; } } /************************************************************************** * * PCI interface * **************************************************************************/ /* Main body of final NIC shutdown code * This is called only at module unload (or hotplug removal). */ static void efx_pci_remove_main(struct efx_nic *efx) { #ifdef CONFIG_RFS_ACCEL free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); efx->net_dev->rx_cpu_rmap = NULL; #endif efx_nic_fini_interrupt(efx); efx_fini_channels(efx); efx_fini_port(efx); efx->type->fini(efx); efx_fini_napi(efx); efx_remove_all(efx); } /* Final NIC shutdown * This is called only at module unload (or hotplug removal). */ static void efx_pci_remove(struct pci_dev *pci_dev) { struct efx_nic *efx; efx = pci_get_drvdata(pci_dev); if (!efx) return; /* Mark the NIC as fini, then stop the interface */ rtnl_lock(); efx->state = STATE_FINI; dev_close(efx->net_dev); /* Allow any queued efx_resets() to complete */ rtnl_unlock(); efx_unregister_netdev(efx); efx_mtd_remove(efx); /* Wait for any scheduled resets to complete. No more will be * scheduled from this point because efx_stop_all() has been * called, we are no longer registered with driverlink, and * the net_device's have been removed. */ cancel_work_sync(&efx->reset_work); efx_pci_remove_main(efx); efx_fini_io(efx); netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); pci_set_drvdata(pci_dev, NULL); efx_fini_struct(efx); free_netdev(efx->net_dev); }; /* Main body of NIC initialisation * This is called at module load (or hotplug insertion, theoretically). */ static int efx_pci_probe_main(struct efx_nic *efx) { int rc; /* Do start-of-day initialisation */ rc = efx_probe_all(efx); if (rc) goto fail1; efx_init_napi(efx); rc = efx->type->init(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to initialise NIC\n"); goto fail3; } rc = efx_init_port(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to initialise port\n"); goto fail4; } efx_init_channels(efx); rc = efx_nic_init_interrupt(efx); if (rc) goto fail5; return 0; fail5: efx_fini_channels(efx); efx_fini_port(efx); fail4: efx->type->fini(efx); fail3: efx_fini_napi(efx); efx_remove_all(efx); fail1: return rc; } /* NIC initialisation * * This is called at module load (or hotplug insertion, * theoretically). It sets up PCI mappings, tests and resets the NIC, * sets up and registers the network devices with the kernel and hooks * the interrupt service routine. It does not prepare the device for * transmission; this is left to the first time one of the network * interfaces is brought up (i.e. efx_net_open). */ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *entry) { const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; struct net_device *net_dev; struct efx_nic *efx; int i, rc; /* Allocate and initialise a struct net_device and struct efx_nic */ net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, EFX_MAX_RX_QUEUES); if (!net_dev) return -ENOMEM; net_dev->features |= (type->offload_features | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_RXCSUM); if (type->offload_features & NETIF_F_V6_CSUM) net_dev->features |= NETIF_F_TSO6; /* Mask for features that also apply to VLAN devices */ net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | NETIF_F_RXCSUM); /* All offloads can be toggled */ net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; efx = netdev_priv(net_dev); pci_set_drvdata(pci_dev, efx); SET_NETDEV_DEV(net_dev, &pci_dev->dev); rc = efx_init_struct(efx, type, pci_dev, net_dev); if (rc) goto fail1; netif_info(efx, probe, efx->net_dev, "Solarflare NIC detected\n"); /* Set up basic I/O (BAR mappings etc) */ rc = efx_init_io(efx); if (rc) goto fail2; /* No serialisation is required with the reset path because * we're in STATE_INIT. */ for (i = 0; i < 5; i++) { rc = efx_pci_probe_main(efx); /* Serialise against efx_reset(). No more resets will be * scheduled since efx_stop_all() has been called, and we * have not and never have been registered with either * the rtnetlink or driverlink layers. */ cancel_work_sync(&efx->reset_work); if (rc == 0) { if (efx->reset_pending) { /* If there was a scheduled reset during * probe, the NIC is probably hosed anyway */ efx_pci_remove_main(efx); rc = -EIO; } else { break; } } /* Retry if a recoverably reset event has been scheduled */ if (efx->reset_pending & ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) || !efx->reset_pending) goto fail3; efx->reset_pending = 0; } if (rc) { netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); goto fail4; } /* Switch to the running state before we expose the device to the OS, * so that dev_open()|efx_start_all() will actually start the device */ efx->state = STATE_RUNNING; rc = efx_register_netdev(efx); if (rc) goto fail5; netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); rtnl_lock(); efx_mtd_probe(efx); /* allowed to fail */ rtnl_unlock(); return 0; fail5: efx_pci_remove_main(efx); fail4: fail3: efx_fini_io(efx); fail2: efx_fini_struct(efx); fail1: WARN_ON(rc > 0); netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); free_netdev(net_dev); return rc; } static int efx_pm_freeze(struct device *dev) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); efx->state = STATE_FINI; netif_device_detach(efx->net_dev); efx_stop_all(efx); efx_fini_channels(efx); return 0; } static int efx_pm_thaw(struct device *dev) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); efx->state = STATE_INIT; efx_init_channels(efx); mutex_lock(&efx->mac_lock); efx->phy_op->reconfigure(efx); mutex_unlock(&efx->mac_lock); efx_start_all(efx); netif_device_attach(efx->net_dev); efx->state = STATE_RUNNING; efx->type->resume_wol(efx); /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ queue_work(reset_workqueue, &efx->reset_work); return 0; } static int efx_pm_poweroff(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct efx_nic *efx = pci_get_drvdata(pci_dev); efx->type->fini(efx); efx->reset_pending = 0; pci_save_state(pci_dev); return pci_set_power_state(pci_dev, PCI_D3hot); } /* Used for both resume and restore */ static int efx_pm_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct efx_nic *efx = pci_get_drvdata(pci_dev); int rc; rc = pci_set_power_state(pci_dev, PCI_D0); if (rc) return rc; pci_restore_state(pci_dev); rc = pci_enable_device(pci_dev); if (rc) return rc; pci_set_master(efx->pci_dev); rc = efx->type->reset(efx, RESET_TYPE_ALL); if (rc) return rc; rc = efx->type->init(efx); if (rc) return rc; efx_pm_thaw(dev); return 0; } static int efx_pm_suspend(struct device *dev) { int rc; efx_pm_freeze(dev); rc = efx_pm_poweroff(dev); if (rc) efx_pm_resume(dev); return rc; } static struct dev_pm_ops efx_pm_ops = { .suspend = efx_pm_suspend, .resume = efx_pm_resume, .freeze = efx_pm_freeze, .thaw = efx_pm_thaw, .poweroff = efx_pm_poweroff, .restore = efx_pm_resume, }; static struct pci_driver efx_pci_driver = { .name = KBUILD_MODNAME, .id_table = efx_pci_table, .probe = efx_pci_probe, .remove = efx_pci_remove, .driver.pm = &efx_pm_ops, }; /************************************************************************** * * Kernel module interface * *************************************************************************/ module_param(interrupt_mode, uint, 0444); MODULE_PARM_DESC(interrupt_mode, "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); static int __init efx_init_module(void) { int rc; printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); rc = register_netdevice_notifier(&efx_netdev_notifier); if (rc) goto err_notifier; reset_workqueue = create_singlethread_workqueue("sfc_reset"); if (!reset_workqueue) { rc = -ENOMEM; goto err_reset; } rc = pci_register_driver(&efx_pci_driver); if (rc < 0) goto err_pci; return 0; err_pci: destroy_workqueue(reset_workqueue); err_reset: unregister_netdevice_notifier(&efx_netdev_notifier); err_notifier: return rc; } static void __exit efx_exit_module(void) { printk(KERN_INFO "Solarflare NET driver unloading\n"); pci_unregister_driver(&efx_pci_driver); destroy_workqueue(reset_workqueue); unregister_netdevice_notifier(&efx_netdev_notifier); } module_init(efx_init_module); module_exit(efx_exit_module); MODULE_AUTHOR("Solarflare Communications and " "Michael Brown <mbrown@fensystems.co.uk>"); MODULE_DESCRIPTION("Solarflare Communications network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, efx_pci_table);
gpl-2.0
VentureROM-L/android_kernel_moto_shamu
drivers/pci/rom.c
630
6301
/* * drivers/pci/rom.c * * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com> * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com> * * PCI ROM access routines */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/slab.h> #include "pci.h" /** * pci_enable_rom - enable ROM decoding for a PCI device * @pdev: PCI device to enable * * Enable ROM decoding on @dev. This involves simply turning on the last * bit of the PCI ROM BAR. Note that some cards may share address decoders * between the ROM and other resources, so enabling it may disable access * to MMIO registers or other card memory. */ int pci_enable_rom(struct pci_dev *pdev) { struct resource *res = pdev->resource + PCI_ROM_RESOURCE; struct pci_bus_region region; u32 rom_addr; if (!res->flags) return -1; pcibios_resource_to_bus(pdev, &region, res); pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); rom_addr &= ~PCI_ROM_ADDRESS_MASK; rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); return 0; } /** * pci_disable_rom - disable ROM decoding for a PCI device * @pdev: PCI device to disable * * Disable ROM decoding on a PCI device by turning off the last bit in the * ROM BAR. */ void pci_disable_rom(struct pci_dev *pdev) { u32 rom_addr; pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); } /** * pci_get_rom_size - obtain the actual size of the ROM image * @pdev: target PCI device * @rom: kernel virtual pointer to image of ROM * @size: size of PCI window * return: size of actual ROM image * * Determine the actual length of the ROM image. * The PCI window size could be much larger than the * actual image size. */ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) { void __iomem *image; int last_image; unsigned length; image = rom; do { void __iomem *pds; /* Standard PCI ROMs start out with these bytes 55 AA */ if (readb(image) != 0x55) { dev_err(&pdev->dev, "Invalid ROM contents\n"); break; } if (readb(image + 1) != 0xAA) break; /* get the PCI data structure and check its signature */ pds = image + readw(image + 24); if (readb(pds) != 'P') break; if (readb(pds + 1) != 'C') break; if (readb(pds + 2) != 'I') break; if (readb(pds + 3) != 'R') break; last_image = readb(pds + 21) & 0x80; length = readw(pds + 16); image += length * 512; } while (length && !last_image); /* never return a size larger than the PCI resource window */ /* there are known ROMs that get the size wrong */ return min((size_t)(image - rom), size); } /** * pci_map_rom - map a PCI ROM to kernel space * @pdev: pointer to pci device struct * @size: pointer to receive size of pci window over ROM * * Return: kernel virtual pointer to image of ROM * * Map a PCI ROM into kernel space. If ROM is boot video ROM, * the shadow BIOS copy will be returned instead of the * actual ROM. */ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; loff_t start; void __iomem *rom; /* * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy * memory map if the VGA enable bit of the Bridge Control register is * set for embedded VGA. */ if (res->flags & IORESOURCE_ROM_SHADOW) { /* primary video rom always starts here */ start = (loff_t)0xC0000; *size = 0x20000; /* cover C000:0 through E000:0 */ } else { if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); return (void __iomem *)(unsigned long) pci_resource_start(pdev, PCI_ROM_RESOURCE); } else { /* assign the ROM an address if it doesn't have one */ if (res->parent == NULL && pci_assign_resource(pdev,PCI_ROM_RESOURCE)) return NULL; start = pci_resource_start(pdev, PCI_ROM_RESOURCE); *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); if (*size == 0) return NULL; /* Enable ROM space decodes */ if (pci_enable_rom(pdev)) return NULL; } } rom = ioremap(start, *size); if (!rom) { /* restore enable if ioremap fails */ if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW | IORESOURCE_ROM_COPY))) pci_disable_rom(pdev); return NULL; } /* * Try to find the true size of the ROM since sometimes the PCI window * size is much larger than the actual size of the ROM. * True size is important if the ROM is going to be copied. */ *size = pci_get_rom_size(pdev, rom, *size); return rom; } /** * pci_unmap_rom - unmap the ROM from kernel space * @pdev: pointer to pci device struct * @rom: virtual address of the previous mapping * * Remove a mapping of a previously mapped ROM */ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) return; iounmap(rom); /* Disable again before continuing, leave enabled if pci=rom */ if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) pci_disable_rom(pdev); } /** * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy * @pdev: pointer to pci device struct * * Free the copied ROM if we allocated one. */ void pci_cleanup_rom(struct pci_dev *pdev) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; if (res->flags & IORESOURCE_ROM_COPY) { kfree((void*)(unsigned long)res->start); res->flags &= ~IORESOURCE_ROM_COPY; res->start = 0; res->end = 0; } } /** * pci_platform_rom - provides a pointer to any ROM image provided by the * platform * @pdev: pointer to pci device struct * @size: pointer to receive size of pci window over ROM */ void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) { if (pdev->rom && pdev->romlen) { *size = pdev->romlen; return phys_to_virt((phys_addr_t)pdev->rom); } return NULL; } EXPORT_SYMBOL(pci_map_rom); EXPORT_SYMBOL(pci_unmap_rom); EXPORT_SYMBOL_GPL(pci_enable_rom); EXPORT_SYMBOL_GPL(pci_disable_rom); EXPORT_SYMBOL(pci_platform_rom);
gpl-2.0
huz123/bricked.tenderloin
arch/x86/kernel/cpuid.c
886
5561
/* ----------------------------------------------------------------------- * * * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, * USA; either version 2 of the License, or (at your option) any later * version; incorporated herein by reference. * * ----------------------------------------------------------------------- */ /* * x86 CPUID access device * * This device is accessed by lseek() to the appropriate CPUID level * and then read in chunks of 16 bytes. A larger size means multiple * reads of consecutive levels. * * The lower 32 bits of the file position is used as the incoming %eax, * and the upper 32 bits of the file position as the incoming %ecx, * the latter intended for "counting" eax levels like eax=4. * * This driver uses /dev/cpu/%d/cpuid where %d is the minor number, and on * an SMP box will direct the access to CPU %d. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/uaccess.h> #include <linux/gfp.h> #include <asm/processor.h> #include <asm/msr.h> #include <asm/system.h> static struct class *cpuid_class; struct cpuid_regs { u32 eax, ebx, ecx, edx; }; static void cpuid_smp_cpuid(void *cmd_block) { struct cpuid_regs *cmd = (struct cpuid_regs *)cmd_block; cpuid_count(cmd->eax, cmd->ecx, &cmd->eax, &cmd->ebx, &cmd->ecx, &cmd->edx); } static loff_t cpuid_seek(struct file *file, loff_t offset, int orig) { loff_t ret; struct inode *inode = file->f_mapping->host; mutex_lock(&inode->i_mutex); switch (orig) { case 0: file->f_pos = offset; ret = file->f_pos; break; case 1: file->f_pos += offset; ret = file->f_pos; break; default: ret = -EINVAL; } mutex_unlock(&inode->i_mutex); return ret; } static ssize_t cpuid_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char __user *tmp = buf; struct cpuid_regs cmd; int cpu = iminor(file->f_path.dentry->d_inode); u64 pos = *ppos; ssize_t bytes = 0; int err = 0; if (count % 16) return -EINVAL; /* Invalid chunk size */ for (; count; count -= 16) { cmd.eax = pos; cmd.ecx = pos >> 32; err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); if (err) break; if (copy_to_user(tmp, &cmd, 16)) { err = -EFAULT; break; } tmp += 16; bytes += 16; *ppos = ++pos; } return bytes ? bytes : err; } static int cpuid_open(struct inode *inode, struct file *file) { unsigned int cpu; struct cpuinfo_x86 *c; cpu = iminor(file->f_path.dentry->d_inode); if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; /* No such CPU */ c = &cpu_data(cpu); if (c->cpuid_level < 0) return -EIO; /* CPUID not supported */ return 0; } /* * File operations we support */ static const struct file_operations cpuid_fops = { .owner = THIS_MODULE, .llseek = cpuid_seek, .read = cpuid_read, .open = cpuid_open, }; static __cpuinit int cpuid_device_create(int cpu) { struct device *dev; dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL, "cpu%d", cpu); return IS_ERR(dev) ? PTR_ERR(dev) : 0; } static void cpuid_device_destroy(int cpu) { device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); } static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int err = 0; switch (action) { case CPU_UP_PREPARE: err = cpuid_device_create(cpu); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: cpuid_device_destroy(cpu); break; } return notifier_from_errno(err); } static struct notifier_block __refdata cpuid_class_cpu_notifier = { .notifier_call = cpuid_class_cpu_callback, }; static char *cpuid_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt)); } static int __init cpuid_init(void) { int i, err = 0; i = 0; if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid", &cpuid_fops)) { printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n", CPUID_MAJOR); err = -EBUSY; goto out; } cpuid_class = class_create(THIS_MODULE, "cpuid"); if (IS_ERR(cpuid_class)) { err = PTR_ERR(cpuid_class); goto out_chrdev; } cpuid_class->devnode = cpuid_devnode; for_each_online_cpu(i) { err = cpuid_device_create(i); if (err != 0) goto out_class; } register_hotcpu_notifier(&cpuid_class_cpu_notifier); err = 0; goto out; out_class: i = 0; for_each_online_cpu(i) { cpuid_device_destroy(i); } class_destroy(cpuid_class); out_chrdev: __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); out: return err; } static void __exit cpuid_exit(void) { int cpu = 0; for_each_online_cpu(cpu) cpuid_device_destroy(cpu); class_destroy(cpuid_class); __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); } module_init(cpuid_init); module_exit(cpuid_exit); MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>"); MODULE_DESCRIPTION("x86 generic CPUID driver"); MODULE_LICENSE("GPL");
gpl-2.0
mus1711/nitro_kernel
drivers/usb/gadget/s3c2410_udc.c
2166
49053
/* * linux/drivers/usb/gadget/s3c2410_udc.c * * Samsung S3C24xx series on-chip full speed USB device controllers * * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard * Additional cleanups by Ben Dooks <ben-linux@fluff.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #define pr_fmt(fmt) "s3c2410_udc: " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/prefetch.h> #include <linux/io.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/usb.h> #include <linux/usb/gadget.h> #include <asm/byteorder.h> #include <asm/irq.h> #include <asm/unaligned.h> #include <mach/irqs.h> #include <mach/hardware.h> #include <plat/regs-udc.h> #include <linux/platform_data/usb-s3c2410_udc.h> #include "s3c2410_udc.h" #define DRIVER_DESC "S3C2410 USB Device Controller Gadget" #define DRIVER_VERSION "29 Apr 2007" #define DRIVER_AUTHOR "Herbert Pötzl <herbert@13thfloor.at>, " \ "Arnaud Patard <arnaud.patard@rtp-net.org>" static const char gadget_name[] = "s3c2410_udc"; static const char driver_desc[] = DRIVER_DESC; static struct s3c2410_udc *the_controller; static struct clk *udc_clock; static struct clk *usb_bus_clock; static void __iomem *base_addr; static u64 rsrc_start; static u64 rsrc_len; static struct dentry *s3c2410_udc_debugfs_root; static inline u32 udc_read(u32 reg) { return readb(base_addr + reg); } static inline void udc_write(u32 value, u32 reg) { writeb(value, base_addr + reg); } static inline void udc_writeb(void __iomem *base, u32 value, u32 reg) { writeb(value, base + reg); } static struct s3c2410_udc_mach_info *udc_info; /*************************** DEBUG FUNCTION ***************************/ #define DEBUG_NORMAL 1 #define DEBUG_VERBOSE 2 #ifdef CONFIG_USB_S3C2410_DEBUG #define USB_S3C2410_DEBUG_LEVEL 0 static uint32_t s3c2410_ticks = 0; static int dprintk(int level, const char *fmt, ...) { static char printk_buf[1024]; static long prevticks; static int invocation; va_list args; int len; if (level > USB_S3C2410_DEBUG_LEVEL) return 0; if (s3c2410_ticks != prevticks) { prevticks = s3c2410_ticks; invocation = 0; } len = scnprintf(printk_buf, sizeof(printk_buf), "%1lu.%02d USB: ", prevticks, invocation++); va_start(args, fmt); len = vscnprintf(printk_buf+len, sizeof(printk_buf)-len, fmt, args); va_end(args); return pr_debug("%s", printk_buf); } #else static int dprintk(int level, const char *fmt, ...) { return 0; } #endif static int s3c2410_udc_debugfs_seq_show(struct seq_file *m, void *p) { u32 addr_reg, pwr_reg, ep_int_reg, usb_int_reg; u32 ep_int_en_reg, usb_int_en_reg, ep0_csr; u32 ep1_i_csr1, ep1_i_csr2, ep1_o_csr1, ep1_o_csr2; u32 ep2_i_csr1, ep2_i_csr2, ep2_o_csr1, ep2_o_csr2; addr_reg = udc_read(S3C2410_UDC_FUNC_ADDR_REG); pwr_reg = udc_read(S3C2410_UDC_PWR_REG); ep_int_reg = udc_read(S3C2410_UDC_EP_INT_REG); usb_int_reg = udc_read(S3C2410_UDC_USB_INT_REG); ep_int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG); usb_int_en_reg = udc_read(S3C2410_UDC_USB_INT_EN_REG); udc_write(0, S3C2410_UDC_INDEX_REG); ep0_csr = udc_read(S3C2410_UDC_IN_CSR1_REG); udc_write(1, S3C2410_UDC_INDEX_REG); ep1_i_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG); ep1_i_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG); ep1_o_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG); ep1_o_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG); udc_write(2, S3C2410_UDC_INDEX_REG); ep2_i_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG); ep2_i_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG); ep2_o_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG); ep2_o_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG); seq_printf(m, "FUNC_ADDR_REG : 0x%04X\n" "PWR_REG : 0x%04X\n" "EP_INT_REG : 0x%04X\n" "USB_INT_REG : 0x%04X\n" "EP_INT_EN_REG : 0x%04X\n" "USB_INT_EN_REG : 0x%04X\n" "EP0_CSR : 0x%04X\n" "EP1_I_CSR1 : 0x%04X\n" "EP1_I_CSR2 : 0x%04X\n" "EP1_O_CSR1 : 0x%04X\n" "EP1_O_CSR2 : 0x%04X\n" "EP2_I_CSR1 : 0x%04X\n" "EP2_I_CSR2 : 0x%04X\n" "EP2_O_CSR1 : 0x%04X\n" "EP2_O_CSR2 : 0x%04X\n", addr_reg, pwr_reg, ep_int_reg, usb_int_reg, ep_int_en_reg, usb_int_en_reg, ep0_csr, ep1_i_csr1, ep1_i_csr2, ep1_o_csr1, ep1_o_csr2, ep2_i_csr1, ep2_i_csr2, ep2_o_csr1, ep2_o_csr2 ); return 0; } static int s3c2410_udc_debugfs_fops_open(struct inode *inode, struct file *file) { return single_open(file, s3c2410_udc_debugfs_seq_show, NULL); } static const struct file_operations s3c2410_udc_debugfs_fops = { .open = s3c2410_udc_debugfs_fops_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; /* io macros */ static inline void s3c2410_udc_clear_ep0_opr(void __iomem *base) { udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG); udc_writeb(base, S3C2410_UDC_EP0_CSR_SOPKTRDY, S3C2410_UDC_EP0_CSR_REG); } static inline void s3c2410_udc_clear_ep0_sst(void __iomem *base) { udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG); writeb(0x00, base + S3C2410_UDC_EP0_CSR_REG); } static inline void s3c2410_udc_clear_ep0_se(void __iomem *base) { udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG); udc_writeb(base, S3C2410_UDC_EP0_CSR_SSE, S3C2410_UDC_EP0_CSR_REG); } static inline void s3c2410_udc_set_ep0_ipr(void __iomem *base) { udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG); udc_writeb(base, S3C2410_UDC_EP0_CSR_IPKRDY, S3C2410_UDC_EP0_CSR_REG); } static inline void s3c2410_udc_set_ep0_de(void __iomem *base) { udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG); udc_writeb(base, S3C2410_UDC_EP0_CSR_DE, S3C2410_UDC_EP0_CSR_REG); } inline void s3c2410_udc_set_ep0_ss(void __iomem *b) { udc_writeb(b, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG); udc_writeb(b, S3C2410_UDC_EP0_CSR_SENDSTL, S3C2410_UDC_EP0_CSR_REG); } static inline void s3c2410_udc_set_ep0_de_out(void __iomem *base) { udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG); udc_writeb(base, (S3C2410_UDC_EP0_CSR_SOPKTRDY | S3C2410_UDC_EP0_CSR_DE), S3C2410_UDC_EP0_CSR_REG); } static inline void s3c2410_udc_set_ep0_sse_out(void __iomem *base) { udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG); udc_writeb(base, (S3C2410_UDC_EP0_CSR_SOPKTRDY | S3C2410_UDC_EP0_CSR_SSE), S3C2410_UDC_EP0_CSR_REG); } static inline void s3c2410_udc_set_ep0_de_in(void __iomem *base) { udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG); udc_writeb(base, (S3C2410_UDC_EP0_CSR_IPKRDY | S3C2410_UDC_EP0_CSR_DE), S3C2410_UDC_EP0_CSR_REG); } /*------------------------- I/O ----------------------------------*/ /* * s3c2410_udc_done */ static void s3c2410_udc_done(struct s3c2410_ep *ep, struct s3c2410_request *req, int status) { unsigned halted = ep->halted; list_del_init(&req->queue); if (likely(req->req.status == -EINPROGRESS)) req->req.status = status; else status = req->req.status; ep->halted = 1; req->req.complete(&ep->ep, &req->req); ep->halted = halted; } static void s3c2410_udc_nuke(struct s3c2410_udc *udc, struct s3c2410_ep *ep, int status) { /* Sanity check */ if (&ep->queue == NULL) return; while (!list_empty(&ep->queue)) { struct s3c2410_request *req; req = list_entry(ep->queue.next, struct s3c2410_request, queue); s3c2410_udc_done(ep, req, status); } } static inline void s3c2410_udc_clear_ep_state(struct s3c2410_udc *dev) { unsigned i; /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint * fifos, and pending transactions mustn't be continued in any case. */ for (i = 1; i < S3C2410_ENDPOINTS; i++) s3c2410_udc_nuke(dev, &dev->ep[i], -ECONNABORTED); } static inline int s3c2410_udc_fifo_count_out(void) { int tmp; tmp = udc_read(S3C2410_UDC_OUT_FIFO_CNT2_REG) << 8; tmp |= udc_read(S3C2410_UDC_OUT_FIFO_CNT1_REG); return tmp; } /* * s3c2410_udc_write_packet */ static inline int s3c2410_udc_write_packet(int fifo, struct s3c2410_request *req, unsigned max) { unsigned len = min(req->req.length - req->req.actual, max); u8 *buf = req->req.buf + req->req.actual; prefetch(buf); dprintk(DEBUG_VERBOSE, "%s %d %d %d %d\n", __func__, req->req.actual, req->req.length, len, req->req.actual + len); req->req.actual += len; udelay(5); writesb(base_addr + fifo, buf, len); return len; } /* * s3c2410_udc_write_fifo * * return: 0 = still running, 1 = completed, negative = errno */ static int s3c2410_udc_write_fifo(struct s3c2410_ep *ep, struct s3c2410_request *req) { unsigned count; int is_last; u32 idx; int fifo_reg; u32 ep_csr; idx = ep->bEndpointAddress & 0x7F; switch (idx) { default: idx = 0; case 0: fifo_reg = S3C2410_UDC_EP0_FIFO_REG; break; case 1: fifo_reg = S3C2410_UDC_EP1_FIFO_REG; break; case 2: fifo_reg = S3C2410_UDC_EP2_FIFO_REG; break; case 3: fifo_reg = S3C2410_UDC_EP3_FIFO_REG; break; case 4: fifo_reg = S3C2410_UDC_EP4_FIFO_REG; break; } count = s3c2410_udc_write_packet(fifo_reg, req, ep->ep.maxpacket); /* last packet is often short (sometimes a zlp) */ if (count != ep->ep.maxpacket) is_last = 1; else if (req->req.length != req->req.actual || req->req.zero) is_last = 0; else is_last = 2; /* Only ep0 debug messages are interesting */ if (idx == 0) dprintk(DEBUG_NORMAL, "Written ep%d %d.%d of %d b [last %d,z %d]\n", idx, count, req->req.actual, req->req.length, is_last, req->req.zero); if (is_last) { /* The order is important. It prevents sending 2 packets * at the same time */ if (idx == 0) { /* Reset signal => no need to say 'data sent' */ if (!(udc_read(S3C2410_UDC_USB_INT_REG) & S3C2410_UDC_USBINT_RESET)) s3c2410_udc_set_ep0_de_in(base_addr); ep->dev->ep0state = EP0_IDLE; } else { udc_write(idx, S3C2410_UDC_INDEX_REG); ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG); udc_write(idx, S3C2410_UDC_INDEX_REG); udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY, S3C2410_UDC_IN_CSR1_REG); } s3c2410_udc_done(ep, req, 0); is_last = 1; } else { if (idx == 0) { /* Reset signal => no need to say 'data sent' */ if (!(udc_read(S3C2410_UDC_USB_INT_REG) & S3C2410_UDC_USBINT_RESET)) s3c2410_udc_set_ep0_ipr(base_addr); } else { udc_write(idx, S3C2410_UDC_INDEX_REG); ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG); udc_write(idx, S3C2410_UDC_INDEX_REG); udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY, S3C2410_UDC_IN_CSR1_REG); } } return is_last; } static inline int s3c2410_udc_read_packet(int fifo, u8 *buf, struct s3c2410_request *req, unsigned avail) { unsigned len; len = min(req->req.length - req->req.actual, avail); req->req.actual += len; readsb(fifo + base_addr, buf, len); return len; } /* * return: 0 = still running, 1 = queue empty, negative = errno */ static int s3c2410_udc_read_fifo(struct s3c2410_ep *ep, struct s3c2410_request *req) { u8 *buf; u32 ep_csr; unsigned bufferspace; int is_last = 1; unsigned avail; int fifo_count = 0; u32 idx; int fifo_reg; idx = ep->bEndpointAddress & 0x7F; switch (idx) { default: idx = 0; case 0: fifo_reg = S3C2410_UDC_EP0_FIFO_REG; break; case 1: fifo_reg = S3C2410_UDC_EP1_FIFO_REG; break; case 2: fifo_reg = S3C2410_UDC_EP2_FIFO_REG; break; case 3: fifo_reg = S3C2410_UDC_EP3_FIFO_REG; break; case 4: fifo_reg = S3C2410_UDC_EP4_FIFO_REG; break; } if (!req->req.length) return 1; buf = req->req.buf + req->req.actual; bufferspace = req->req.length - req->req.actual; if (!bufferspace) { dprintk(DEBUG_NORMAL, "%s: buffer full!\n", __func__); return -1; } udc_write(idx, S3C2410_UDC_INDEX_REG); fifo_count = s3c2410_udc_fifo_count_out(); dprintk(DEBUG_NORMAL, "%s fifo count : %d\n", __func__, fifo_count); if (fifo_count > ep->ep.maxpacket) avail = ep->ep.maxpacket; else avail = fifo_count; fifo_count = s3c2410_udc_read_packet(fifo_reg, buf, req, avail); /* checking this with ep0 is not accurate as we already * read a control request **/ if (idx != 0 && fifo_count < ep->ep.maxpacket) { is_last = 1; /* overflowed this request? flush extra data */ if (fifo_count != avail) req->req.status = -EOVERFLOW; } else { is_last = (req->req.length <= req->req.actual) ? 1 : 0; } udc_write(idx, S3C2410_UDC_INDEX_REG); fifo_count = s3c2410_udc_fifo_count_out(); /* Only ep0 debug messages are interesting */ if (idx == 0) dprintk(DEBUG_VERBOSE, "%s fifo count : %d [last %d]\n", __func__, fifo_count, is_last); if (is_last) { if (idx == 0) { s3c2410_udc_set_ep0_de_out(base_addr); ep->dev->ep0state = EP0_IDLE; } else { udc_write(idx, S3C2410_UDC_INDEX_REG); ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG); udc_write(idx, S3C2410_UDC_INDEX_REG); udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY, S3C2410_UDC_OUT_CSR1_REG); } s3c2410_udc_done(ep, req, 0); } else { if (idx == 0) { s3c2410_udc_clear_ep0_opr(base_addr); } else { udc_write(idx, S3C2410_UDC_INDEX_REG); ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG); udc_write(idx, S3C2410_UDC_INDEX_REG); udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY, S3C2410_UDC_OUT_CSR1_REG); } } return is_last; } static int s3c2410_udc_read_fifo_crq(struct usb_ctrlrequest *crq) { unsigned char *outbuf = (unsigned char *)crq; int bytes_read = 0; udc_write(0, S3C2410_UDC_INDEX_REG); bytes_read = s3c2410_udc_fifo_count_out(); dprintk(DEBUG_NORMAL, "%s: fifo_count=%d\n", __func__, bytes_read); if (bytes_read > sizeof(struct usb_ctrlrequest)) bytes_read = sizeof(struct usb_ctrlrequest); readsb(S3C2410_UDC_EP0_FIFO_REG + base_addr, outbuf, bytes_read); dprintk(DEBUG_VERBOSE, "%s: len=%d %02x:%02x {%x,%x,%x}\n", __func__, bytes_read, crq->bRequest, crq->bRequestType, crq->wValue, crq->wIndex, crq->wLength); return bytes_read; } static int s3c2410_udc_get_status(struct s3c2410_udc *dev, struct usb_ctrlrequest *crq) { u16 status = 0; u8 ep_num = crq->wIndex & 0x7F; u8 is_in = crq->wIndex & USB_DIR_IN; switch (crq->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: break; case USB_RECIP_DEVICE: status = dev->devstatus; break; case USB_RECIP_ENDPOINT: if (ep_num > 4 || crq->wLength > 2) return 1; if (ep_num == 0) { udc_write(0, S3C2410_UDC_INDEX_REG); status = udc_read(S3C2410_UDC_IN_CSR1_REG); status = status & S3C2410_UDC_EP0_CSR_SENDSTL; } else { udc_write(ep_num, S3C2410_UDC_INDEX_REG); if (is_in) { status = udc_read(S3C2410_UDC_IN_CSR1_REG); status = status & S3C2410_UDC_ICSR1_SENDSTL; } else { status = udc_read(S3C2410_UDC_OUT_CSR1_REG); status = status & S3C2410_UDC_OCSR1_SENDSTL; } } status = status ? 1 : 0; break; default: return 1; } /* Seems to be needed to get it working. ouch :( */ udelay(5); udc_write(status & 0xFF, S3C2410_UDC_EP0_FIFO_REG); udc_write(status >> 8, S3C2410_UDC_EP0_FIFO_REG); s3c2410_udc_set_ep0_de_in(base_addr); return 0; } /*------------------------- usb state machine -------------------------------*/ static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value); static void s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev, struct s3c2410_ep *ep, struct usb_ctrlrequest *crq, u32 ep0csr) { int len, ret, tmp; /* start control request? */ if (!(ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY)) return; s3c2410_udc_nuke(dev, ep, -EPROTO); len = s3c2410_udc_read_fifo_crq(crq); if (len != sizeof(*crq)) { dprintk(DEBUG_NORMAL, "setup begin: fifo READ ERROR" " wanted %d bytes got %d. Stalling out...\n", sizeof(*crq), len); s3c2410_udc_set_ep0_ss(base_addr); return; } dprintk(DEBUG_NORMAL, "bRequest = %d bRequestType %d wLength = %d\n", crq->bRequest, crq->bRequestType, crq->wLength); /* cope with automagic for some standard requests. */ dev->req_std = (crq->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD; dev->req_config = 0; dev->req_pending = 1; switch (crq->bRequest) { case USB_REQ_SET_CONFIGURATION: dprintk(DEBUG_NORMAL, "USB_REQ_SET_CONFIGURATION ...\n"); if (crq->bRequestType == USB_RECIP_DEVICE) { dev->req_config = 1; s3c2410_udc_set_ep0_de_out(base_addr); } break; case USB_REQ_SET_INTERFACE: dprintk(DEBUG_NORMAL, "USB_REQ_SET_INTERFACE ...\n"); if (crq->bRequestType == USB_RECIP_INTERFACE) { dev->req_config = 1; s3c2410_udc_set_ep0_de_out(base_addr); } break; case USB_REQ_SET_ADDRESS: dprintk(DEBUG_NORMAL, "USB_REQ_SET_ADDRESS ...\n"); if (crq->bRequestType == USB_RECIP_DEVICE) { tmp = crq->wValue & 0x7F; dev->address = tmp; udc_write((tmp | S3C2410_UDC_FUNCADDR_UPDATE), S3C2410_UDC_FUNC_ADDR_REG); s3c2410_udc_set_ep0_de_out(base_addr); return; } break; case USB_REQ_GET_STATUS: dprintk(DEBUG_NORMAL, "USB_REQ_GET_STATUS ...\n"); s3c2410_udc_clear_ep0_opr(base_addr); if (dev->req_std) { if (!s3c2410_udc_get_status(dev, crq)) return; } break; case USB_REQ_CLEAR_FEATURE: s3c2410_udc_clear_ep0_opr(base_addr); if (crq->bRequestType != USB_RECIP_ENDPOINT) break; if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0) break; s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 0); s3c2410_udc_set_ep0_de_out(base_addr); return; case USB_REQ_SET_FEATURE: s3c2410_udc_clear_ep0_opr(base_addr); if (crq->bRequestType != USB_RECIP_ENDPOINT) break; if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0) break; s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 1); s3c2410_udc_set_ep0_de_out(base_addr); return; default: s3c2410_udc_clear_ep0_opr(base_addr); break; } if (crq->bRequestType & USB_DIR_IN) dev->ep0state = EP0_IN_DATA_PHASE; else dev->ep0state = EP0_OUT_DATA_PHASE; if (!dev->driver) return; /* deliver the request to the gadget driver */ ret = dev->driver->setup(&dev->gadget, crq); if (ret < 0) { if (dev->req_config) { dprintk(DEBUG_NORMAL, "config change %02x fail %d?\n", crq->bRequest, ret); return; } if (ret == -EOPNOTSUPP) dprintk(DEBUG_NORMAL, "Operation not supported\n"); else dprintk(DEBUG_NORMAL, "dev->driver->setup failed. (%d)\n", ret); udelay(5); s3c2410_udc_set_ep0_ss(base_addr); s3c2410_udc_set_ep0_de_out(base_addr); dev->ep0state = EP0_IDLE; /* deferred i/o == no response yet */ } else if (dev->req_pending) { dprintk(DEBUG_VERBOSE, "dev->req_pending... what now?\n"); dev->req_pending = 0; } dprintk(DEBUG_VERBOSE, "ep0state %s\n", ep0states[dev->ep0state]); } static void s3c2410_udc_handle_ep0(struct s3c2410_udc *dev) { u32 ep0csr; struct s3c2410_ep *ep = &dev->ep[0]; struct s3c2410_request *req; struct usb_ctrlrequest crq; if (list_empty(&ep->queue)) req = NULL; else req = list_entry(ep->queue.next, struct s3c2410_request, queue); /* We make the assumption that S3C2410_UDC_IN_CSR1_REG equal to * S3C2410_UDC_EP0_CSR_REG when index is zero */ udc_write(0, S3C2410_UDC_INDEX_REG); ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG); dprintk(DEBUG_NORMAL, "ep0csr %x ep0state %s\n", ep0csr, ep0states[dev->ep0state]); /* clear stall status */ if (ep0csr & S3C2410_UDC_EP0_CSR_SENTSTL) { s3c2410_udc_nuke(dev, ep, -EPIPE); dprintk(DEBUG_NORMAL, "... clear SENT_STALL ...\n"); s3c2410_udc_clear_ep0_sst(base_addr); dev->ep0state = EP0_IDLE; return; } /* clear setup end */ if (ep0csr & S3C2410_UDC_EP0_CSR_SE) { dprintk(DEBUG_NORMAL, "... serviced SETUP_END ...\n"); s3c2410_udc_nuke(dev, ep, 0); s3c2410_udc_clear_ep0_se(base_addr); dev->ep0state = EP0_IDLE; } switch (dev->ep0state) { case EP0_IDLE: s3c2410_udc_handle_ep0_idle(dev, ep, &crq, ep0csr); break; case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */ dprintk(DEBUG_NORMAL, "EP0_IN_DATA_PHASE ... what now?\n"); if (!(ep0csr & S3C2410_UDC_EP0_CSR_IPKRDY) && req) s3c2410_udc_write_fifo(ep, req); break; case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */ dprintk(DEBUG_NORMAL, "EP0_OUT_DATA_PHASE ... what now?\n"); if ((ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY) && req) s3c2410_udc_read_fifo(ep, req); break; case EP0_END_XFER: dprintk(DEBUG_NORMAL, "EP0_END_XFER ... what now?\n"); dev->ep0state = EP0_IDLE; break; case EP0_STALL: dprintk(DEBUG_NORMAL, "EP0_STALL ... what now?\n"); dev->ep0state = EP0_IDLE; break; } } /* * handle_ep - Manage I/O endpoints */ static void s3c2410_udc_handle_ep(struct s3c2410_ep *ep) { struct s3c2410_request *req; int is_in = ep->bEndpointAddress & USB_DIR_IN; u32 ep_csr1; u32 idx; if (likely(!list_empty(&ep->queue))) req = list_entry(ep->queue.next, struct s3c2410_request, queue); else req = NULL; idx = ep->bEndpointAddress & 0x7F; if (is_in) { udc_write(idx, S3C2410_UDC_INDEX_REG); ep_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG); dprintk(DEBUG_VERBOSE, "ep%01d write csr:%02x %d\n", idx, ep_csr1, req ? 1 : 0); if (ep_csr1 & S3C2410_UDC_ICSR1_SENTSTL) { dprintk(DEBUG_VERBOSE, "st\n"); udc_write(idx, S3C2410_UDC_INDEX_REG); udc_write(ep_csr1 & ~S3C2410_UDC_ICSR1_SENTSTL, S3C2410_UDC_IN_CSR1_REG); return; } if (!(ep_csr1 & S3C2410_UDC_ICSR1_PKTRDY) && req) s3c2410_udc_write_fifo(ep, req); } else { udc_write(idx, S3C2410_UDC_INDEX_REG); ep_csr1 = udc_read(S3C2410_UDC_OUT_CSR1_REG); dprintk(DEBUG_VERBOSE, "ep%01d rd csr:%02x\n", idx, ep_csr1); if (ep_csr1 & S3C2410_UDC_OCSR1_SENTSTL) { udc_write(idx, S3C2410_UDC_INDEX_REG); udc_write(ep_csr1 & ~S3C2410_UDC_OCSR1_SENTSTL, S3C2410_UDC_OUT_CSR1_REG); return; } if ((ep_csr1 & S3C2410_UDC_OCSR1_PKTRDY) && req) s3c2410_udc_read_fifo(ep, req); } } #include <mach/regs-irq.h> /* * s3c2410_udc_irq - interrupt handler */ static irqreturn_t s3c2410_udc_irq(int dummy, void *_dev) { struct s3c2410_udc *dev = _dev; int usb_status; int usbd_status; int pwr_reg; int ep0csr; int i; u32 idx, idx2; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); /* Driver connected ? */ if (!dev->driver) { /* Clear interrupts */ udc_write(udc_read(S3C2410_UDC_USB_INT_REG), S3C2410_UDC_USB_INT_REG); udc_write(udc_read(S3C2410_UDC_EP_INT_REG), S3C2410_UDC_EP_INT_REG); } /* Save index */ idx = udc_read(S3C2410_UDC_INDEX_REG); /* Read status registers */ usb_status = udc_read(S3C2410_UDC_USB_INT_REG); usbd_status = udc_read(S3C2410_UDC_EP_INT_REG); pwr_reg = udc_read(S3C2410_UDC_PWR_REG); udc_writeb(base_addr, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG); ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG); dprintk(DEBUG_NORMAL, "usbs=%02x, usbds=%02x, pwr=%02x ep0csr=%02x\n", usb_status, usbd_status, pwr_reg, ep0csr); /* * Now, handle interrupts. There's two types : * - Reset, Resume, Suspend coming -> usb_int_reg * - EP -> ep_int_reg */ /* RESET */ if (usb_status & S3C2410_UDC_USBINT_RESET) { /* two kind of reset : * - reset start -> pwr reg = 8 * - reset end -> pwr reg = 0 **/ dprintk(DEBUG_NORMAL, "USB reset csr %x pwr %x\n", ep0csr, pwr_reg); dev->gadget.speed = USB_SPEED_UNKNOWN; udc_write(0x00, S3C2410_UDC_INDEX_REG); udc_write((dev->ep[0].ep.maxpacket & 0x7ff) >> 3, S3C2410_UDC_MAXP_REG); dev->address = 0; dev->ep0state = EP0_IDLE; dev->gadget.speed = USB_SPEED_FULL; /* clear interrupt */ udc_write(S3C2410_UDC_USBINT_RESET, S3C2410_UDC_USB_INT_REG); udc_write(idx, S3C2410_UDC_INDEX_REG); spin_unlock_irqrestore(&dev->lock, flags); return IRQ_HANDLED; } /* RESUME */ if (usb_status & S3C2410_UDC_USBINT_RESUME) { dprintk(DEBUG_NORMAL, "USB resume\n"); /* clear interrupt */ udc_write(S3C2410_UDC_USBINT_RESUME, S3C2410_UDC_USB_INT_REG); if (dev->gadget.speed != USB_SPEED_UNKNOWN && dev->driver && dev->driver->resume) dev->driver->resume(&dev->gadget); } /* SUSPEND */ if (usb_status & S3C2410_UDC_USBINT_SUSPEND) { dprintk(DEBUG_NORMAL, "USB suspend\n"); /* clear interrupt */ udc_write(S3C2410_UDC_USBINT_SUSPEND, S3C2410_UDC_USB_INT_REG); if (dev->gadget.speed != USB_SPEED_UNKNOWN && dev->driver && dev->driver->suspend) dev->driver->suspend(&dev->gadget); dev->ep0state = EP0_IDLE; } /* EP */ /* control traffic */ /* check on ep0csr != 0 is not a good idea as clearing in_pkt_ready * generate an interrupt */ if (usbd_status & S3C2410_UDC_INT_EP0) { dprintk(DEBUG_VERBOSE, "USB ep0 irq\n"); /* Clear the interrupt bit by setting it to 1 */ udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_REG); s3c2410_udc_handle_ep0(dev); } /* endpoint data transfers */ for (i = 1; i < S3C2410_ENDPOINTS; i++) { u32 tmp = 1 << i; if (usbd_status & tmp) { dprintk(DEBUG_VERBOSE, "USB ep%d irq\n", i); /* Clear the interrupt bit by setting it to 1 */ udc_write(tmp, S3C2410_UDC_EP_INT_REG); s3c2410_udc_handle_ep(&dev->ep[i]); } } /* what else causes this interrupt? a receive! who is it? */ if (!usb_status && !usbd_status && !pwr_reg && !ep0csr) { for (i = 1; i < S3C2410_ENDPOINTS; i++) { idx2 = udc_read(S3C2410_UDC_INDEX_REG); udc_write(i, S3C2410_UDC_INDEX_REG); if (udc_read(S3C2410_UDC_OUT_CSR1_REG) & 0x1) s3c2410_udc_handle_ep(&dev->ep[i]); /* restore index */ udc_write(idx2, S3C2410_UDC_INDEX_REG); } } dprintk(DEBUG_VERBOSE, "irq: %d s3c2410_udc_done.\n", IRQ_USBD); /* Restore old index */ udc_write(idx, S3C2410_UDC_INDEX_REG); spin_unlock_irqrestore(&dev->lock, flags); return IRQ_HANDLED; } /*------------------------- s3c2410_ep_ops ----------------------------------*/ static inline struct s3c2410_ep *to_s3c2410_ep(struct usb_ep *ep) { return container_of(ep, struct s3c2410_ep, ep); } static inline struct s3c2410_udc *to_s3c2410_udc(struct usb_gadget *gadget) { return container_of(gadget, struct s3c2410_udc, gadget); } static inline struct s3c2410_request *to_s3c2410_req(struct usb_request *req) { return container_of(req, struct s3c2410_request, req); } /* * s3c2410_udc_ep_enable */ static int s3c2410_udc_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct s3c2410_udc *dev; struct s3c2410_ep *ep; u32 max, tmp; unsigned long flags; u32 csr1, csr2; u32 int_en_reg; ep = to_s3c2410_ep(_ep); if (!_ep || !desc || _ep->name == ep0name || desc->bDescriptorType != USB_DT_ENDPOINT) return -EINVAL; dev = ep->dev; if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; max = usb_endpoint_maxp(desc) & 0x1fff; local_irq_save(flags); _ep->maxpacket = max & 0x7ff; ep->ep.desc = desc; ep->halted = 0; ep->bEndpointAddress = desc->bEndpointAddress; /* set max packet */ udc_write(ep->num, S3C2410_UDC_INDEX_REG); udc_write(max >> 3, S3C2410_UDC_MAXP_REG); /* set type, direction, address; reset fifo counters */ if (desc->bEndpointAddress & USB_DIR_IN) { csr1 = S3C2410_UDC_ICSR1_FFLUSH|S3C2410_UDC_ICSR1_CLRDT; csr2 = S3C2410_UDC_ICSR2_MODEIN|S3C2410_UDC_ICSR2_DMAIEN; udc_write(ep->num, S3C2410_UDC_INDEX_REG); udc_write(csr1, S3C2410_UDC_IN_CSR1_REG); udc_write(ep->num, S3C2410_UDC_INDEX_REG); udc_write(csr2, S3C2410_UDC_IN_CSR2_REG); } else { /* don't flush in fifo or it will cause endpoint interrupt */ csr1 = S3C2410_UDC_ICSR1_CLRDT; csr2 = S3C2410_UDC_ICSR2_DMAIEN; udc_write(ep->num, S3C2410_UDC_INDEX_REG); udc_write(csr1, S3C2410_UDC_IN_CSR1_REG); udc_write(ep->num, S3C2410_UDC_INDEX_REG); udc_write(csr2, S3C2410_UDC_IN_CSR2_REG); csr1 = S3C2410_UDC_OCSR1_FFLUSH | S3C2410_UDC_OCSR1_CLRDT; csr2 = S3C2410_UDC_OCSR2_DMAIEN; udc_write(ep->num, S3C2410_UDC_INDEX_REG); udc_write(csr1, S3C2410_UDC_OUT_CSR1_REG); udc_write(ep->num, S3C2410_UDC_INDEX_REG); udc_write(csr2, S3C2410_UDC_OUT_CSR2_REG); } /* enable irqs */ int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG); udc_write(int_en_reg | (1 << ep->num), S3C2410_UDC_EP_INT_EN_REG); /* print some debug message */ tmp = desc->bEndpointAddress; dprintk(DEBUG_NORMAL, "enable %s(%d) ep%x%s-blk max %02x\n", _ep->name, ep->num, tmp, desc->bEndpointAddress & USB_DIR_IN ? "in" : "out", max); local_irq_restore(flags); s3c2410_udc_set_halt(_ep, 0); return 0; } /* * s3c2410_udc_ep_disable */ static int s3c2410_udc_ep_disable(struct usb_ep *_ep) { struct s3c2410_ep *ep = to_s3c2410_ep(_ep); unsigned long flags; u32 int_en_reg; if (!_ep || !ep->ep.desc) { dprintk(DEBUG_NORMAL, "%s not enabled\n", _ep ? ep->ep.name : NULL); return -EINVAL; } local_irq_save(flags); dprintk(DEBUG_NORMAL, "ep_disable: %s\n", _ep->name); ep->ep.desc = NULL; ep->halted = 1; s3c2410_udc_nuke(ep->dev, ep, -ESHUTDOWN); /* disable irqs */ int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG); udc_write(int_en_reg & ~(1<<ep->num), S3C2410_UDC_EP_INT_EN_REG); local_irq_restore(flags); dprintk(DEBUG_NORMAL, "%s disabled\n", _ep->name); return 0; } /* * s3c2410_udc_alloc_request */ static struct usb_request * s3c2410_udc_alloc_request(struct usb_ep *_ep, gfp_t mem_flags) { struct s3c2410_request *req; dprintk(DEBUG_VERBOSE, "%s(%p,%d)\n", __func__, _ep, mem_flags); if (!_ep) return NULL; req = kzalloc(sizeof(struct s3c2410_request), mem_flags); if (!req) return NULL; INIT_LIST_HEAD(&req->queue); return &req->req; } /* * s3c2410_udc_free_request */ static void s3c2410_udc_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct s3c2410_ep *ep = to_s3c2410_ep(_ep); struct s3c2410_request *req = to_s3c2410_req(_req); dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req); if (!ep || !_req || (!ep->ep.desc && _ep->name != ep0name)) return; WARN_ON(!list_empty(&req->queue)); kfree(req); } /* * s3c2410_udc_queue */ static int s3c2410_udc_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct s3c2410_request *req = to_s3c2410_req(_req); struct s3c2410_ep *ep = to_s3c2410_ep(_ep); struct s3c2410_udc *dev; u32 ep_csr = 0; int fifo_count = 0; unsigned long flags; if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) { dprintk(DEBUG_NORMAL, "%s: invalid args\n", __func__); return -EINVAL; } dev = ep->dev; if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) { return -ESHUTDOWN; } local_irq_save(flags); if (unlikely(!_req || !_req->complete || !_req->buf || !list_empty(&req->queue))) { if (!_req) dprintk(DEBUG_NORMAL, "%s: 1 X X X\n", __func__); else { dprintk(DEBUG_NORMAL, "%s: 0 %01d %01d %01d\n", __func__, !_req->complete, !_req->buf, !list_empty(&req->queue)); } local_irq_restore(flags); return -EINVAL; } _req->status = -EINPROGRESS; _req->actual = 0; dprintk(DEBUG_VERBOSE, "%s: ep%x len %d\n", __func__, ep->bEndpointAddress, _req->length); if (ep->bEndpointAddress) { udc_write(ep->bEndpointAddress & 0x7F, S3C2410_UDC_INDEX_REG); ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN) ? S3C2410_UDC_IN_CSR1_REG : S3C2410_UDC_OUT_CSR1_REG); fifo_count = s3c2410_udc_fifo_count_out(); } else { udc_write(0, S3C2410_UDC_INDEX_REG); ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG); fifo_count = s3c2410_udc_fifo_count_out(); } /* kickstart this i/o queue? */ if (list_empty(&ep->queue) && !ep->halted) { if (ep->bEndpointAddress == 0 /* ep0 */) { switch (dev->ep0state) { case EP0_IN_DATA_PHASE: if (!(ep_csr&S3C2410_UDC_EP0_CSR_IPKRDY) && s3c2410_udc_write_fifo(ep, req)) { dev->ep0state = EP0_IDLE; req = NULL; } break; case EP0_OUT_DATA_PHASE: if ((!_req->length) || ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY) && s3c2410_udc_read_fifo(ep, req))) { dev->ep0state = EP0_IDLE; req = NULL; } break; default: local_irq_restore(flags); return -EL2HLT; } } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0 && (!(ep_csr&S3C2410_UDC_OCSR1_PKTRDY)) && s3c2410_udc_write_fifo(ep, req)) { req = NULL; } else if ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY) && fifo_count && s3c2410_udc_read_fifo(ep, req)) { req = NULL; } } /* pio or dma irq handler advances the queue. */ if (likely(req)) list_add_tail(&req->queue, &ep->queue); local_irq_restore(flags); dprintk(DEBUG_VERBOSE, "%s ok\n", __func__); return 0; } /* * s3c2410_udc_dequeue */ static int s3c2410_udc_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct s3c2410_ep *ep = to_s3c2410_ep(_ep); struct s3c2410_udc *udc; int retval = -EINVAL; unsigned long flags; struct s3c2410_request *req = NULL; dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req); if (!the_controller->driver) return -ESHUTDOWN; if (!_ep || !_req) return retval; udc = to_s3c2410_udc(ep->gadget); local_irq_save(flags); list_for_each_entry(req, &ep->queue, queue) { if (&req->req == _req) { list_del_init(&req->queue); _req->status = -ECONNRESET; retval = 0; break; } } if (retval == 0) { dprintk(DEBUG_VERBOSE, "dequeued req %p from %s, len %d buf %p\n", req, _ep->name, _req->length, _req->buf); s3c2410_udc_done(ep, req, -ECONNRESET); } local_irq_restore(flags); return retval; } /* * s3c2410_udc_set_halt */ static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value) { struct s3c2410_ep *ep = to_s3c2410_ep(_ep); u32 ep_csr = 0; unsigned long flags; u32 idx; if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) { dprintk(DEBUG_NORMAL, "%s: inval 2\n", __func__); return -EINVAL; } local_irq_save(flags); idx = ep->bEndpointAddress & 0x7F; if (idx == 0) { s3c2410_udc_set_ep0_ss(base_addr); s3c2410_udc_set_ep0_de_out(base_addr); } else { udc_write(idx, S3C2410_UDC_INDEX_REG); ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN) ? S3C2410_UDC_IN_CSR1_REG : S3C2410_UDC_OUT_CSR1_REG); if ((ep->bEndpointAddress & USB_DIR_IN) != 0) { if (value) udc_write(ep_csr | S3C2410_UDC_ICSR1_SENDSTL, S3C2410_UDC_IN_CSR1_REG); else { ep_csr &= ~S3C2410_UDC_ICSR1_SENDSTL; udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG); ep_csr |= S3C2410_UDC_ICSR1_CLRDT; udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG); } } else { if (value) udc_write(ep_csr | S3C2410_UDC_OCSR1_SENDSTL, S3C2410_UDC_OUT_CSR1_REG); else { ep_csr &= ~S3C2410_UDC_OCSR1_SENDSTL; udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG); ep_csr |= S3C2410_UDC_OCSR1_CLRDT; udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG); } } } ep->halted = value ? 1 : 0; local_irq_restore(flags); return 0; } static const struct usb_ep_ops s3c2410_ep_ops = { .enable = s3c2410_udc_ep_enable, .disable = s3c2410_udc_ep_disable, .alloc_request = s3c2410_udc_alloc_request, .free_request = s3c2410_udc_free_request, .queue = s3c2410_udc_queue, .dequeue = s3c2410_udc_dequeue, .set_halt = s3c2410_udc_set_halt, }; /*------------------------- usb_gadget_ops ----------------------------------*/ /* * s3c2410_udc_get_frame */ static int s3c2410_udc_get_frame(struct usb_gadget *_gadget) { int tmp; dprintk(DEBUG_VERBOSE, "%s()\n", __func__); tmp = udc_read(S3C2410_UDC_FRAME_NUM2_REG) << 8; tmp |= udc_read(S3C2410_UDC_FRAME_NUM1_REG); return tmp; } /* * s3c2410_udc_wakeup */ static int s3c2410_udc_wakeup(struct usb_gadget *_gadget) { dprintk(DEBUG_NORMAL, "%s()\n", __func__); return 0; } /* * s3c2410_udc_set_selfpowered */ static int s3c2410_udc_set_selfpowered(struct usb_gadget *gadget, int value) { struct s3c2410_udc *udc = to_s3c2410_udc(gadget); dprintk(DEBUG_NORMAL, "%s()\n", __func__); if (value) udc->devstatus |= (1 << USB_DEVICE_SELF_POWERED); else udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED); return 0; } static void s3c2410_udc_disable(struct s3c2410_udc *dev); static void s3c2410_udc_enable(struct s3c2410_udc *dev); static int s3c2410_udc_set_pullup(struct s3c2410_udc *udc, int is_on) { dprintk(DEBUG_NORMAL, "%s()\n", __func__); if (udc_info && (udc_info->udc_command || gpio_is_valid(udc_info->pullup_pin))) { if (is_on) s3c2410_udc_enable(udc); else { if (udc->gadget.speed != USB_SPEED_UNKNOWN) { if (udc->driver && udc->driver->disconnect) udc->driver->disconnect(&udc->gadget); } s3c2410_udc_disable(udc); } } else { return -EOPNOTSUPP; } return 0; } static int s3c2410_udc_vbus_session(struct usb_gadget *gadget, int is_active) { struct s3c2410_udc *udc = to_s3c2410_udc(gadget); dprintk(DEBUG_NORMAL, "%s()\n", __func__); udc->vbus = (is_active != 0); s3c2410_udc_set_pullup(udc, is_active); return 0; } static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on) { struct s3c2410_udc *udc = to_s3c2410_udc(gadget); dprintk(DEBUG_NORMAL, "%s()\n", __func__); s3c2410_udc_set_pullup(udc, is_on ? 0 : 1); return 0; } static irqreturn_t s3c2410_udc_vbus_irq(int irq, void *_dev) { struct s3c2410_udc *dev = _dev; unsigned int value; dprintk(DEBUG_NORMAL, "%s()\n", __func__); value = gpio_get_value(udc_info->vbus_pin) ? 1 : 0; if (udc_info->vbus_pin_inverted) value = !value; if (value != dev->vbus) s3c2410_udc_vbus_session(&dev->gadget, value); return IRQ_HANDLED; } static int s3c2410_vbus_draw(struct usb_gadget *_gadget, unsigned ma) { dprintk(DEBUG_NORMAL, "%s()\n", __func__); if (udc_info && udc_info->vbus_draw) { udc_info->vbus_draw(ma); return 0; } return -ENOTSUPP; } static int s3c2410_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver); static int s3c2410_udc_stop(struct usb_gadget *g, struct usb_gadget_driver *driver); static const struct usb_gadget_ops s3c2410_ops = { .get_frame = s3c2410_udc_get_frame, .wakeup = s3c2410_udc_wakeup, .set_selfpowered = s3c2410_udc_set_selfpowered, .pullup = s3c2410_udc_pullup, .vbus_session = s3c2410_udc_vbus_session, .vbus_draw = s3c2410_vbus_draw, .udc_start = s3c2410_udc_start, .udc_stop = s3c2410_udc_stop, }; static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd) { if (!udc_info) return; if (udc_info->udc_command) { udc_info->udc_command(cmd); } else if (gpio_is_valid(udc_info->pullup_pin)) { int value; switch (cmd) { case S3C2410_UDC_P_ENABLE: value = 1; break; case S3C2410_UDC_P_DISABLE: value = 0; break; default: return; } value ^= udc_info->pullup_pin_inverted; gpio_set_value(udc_info->pullup_pin, value); } } /*------------------------- gadget driver handling---------------------------*/ /* * s3c2410_udc_disable */ static void s3c2410_udc_disable(struct s3c2410_udc *dev) { dprintk(DEBUG_NORMAL, "%s()\n", __func__); /* Disable all interrupts */ udc_write(0x00, S3C2410_UDC_USB_INT_EN_REG); udc_write(0x00, S3C2410_UDC_EP_INT_EN_REG); /* Clear the interrupt registers */ udc_write(S3C2410_UDC_USBINT_RESET | S3C2410_UDC_USBINT_RESUME | S3C2410_UDC_USBINT_SUSPEND, S3C2410_UDC_USB_INT_REG); udc_write(0x1F, S3C2410_UDC_EP_INT_REG); /* Good bye, cruel world */ s3c2410_udc_command(S3C2410_UDC_P_DISABLE); /* Set speed to unknown */ dev->gadget.speed = USB_SPEED_UNKNOWN; } /* * s3c2410_udc_reinit */ static void s3c2410_udc_reinit(struct s3c2410_udc *dev) { u32 i; /* device/ep0 records init */ INIT_LIST_HEAD(&dev->gadget.ep_list); INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); dev->ep0state = EP0_IDLE; for (i = 0; i < S3C2410_ENDPOINTS; i++) { struct s3c2410_ep *ep = &dev->ep[i]; if (i != 0) list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list); ep->dev = dev; ep->ep.desc = NULL; ep->halted = 0; INIT_LIST_HEAD(&ep->queue); } } /* * s3c2410_udc_enable */ static void s3c2410_udc_enable(struct s3c2410_udc *dev) { int i; dprintk(DEBUG_NORMAL, "s3c2410_udc_enable called\n"); /* dev->gadget.speed = USB_SPEED_UNKNOWN; */ dev->gadget.speed = USB_SPEED_FULL; /* Set MAXP for all endpoints */ for (i = 0; i < S3C2410_ENDPOINTS; i++) { udc_write(i, S3C2410_UDC_INDEX_REG); udc_write((dev->ep[i].ep.maxpacket & 0x7ff) >> 3, S3C2410_UDC_MAXP_REG); } /* Set default power state */ udc_write(DEFAULT_POWER_STATE, S3C2410_UDC_PWR_REG); /* Enable reset and suspend interrupt interrupts */ udc_write(S3C2410_UDC_USBINT_RESET | S3C2410_UDC_USBINT_SUSPEND, S3C2410_UDC_USB_INT_EN_REG); /* Enable ep0 interrupt */ udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_EN_REG); /* time to say "hello, world" */ s3c2410_udc_command(S3C2410_UDC_P_ENABLE); } static int s3c2410_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct s3c2410_udc *udc = to_s3c2410(g); dprintk(DEBUG_NORMAL, "%s() '%s'\n", __func__, driver->driver.name); /* Hook the driver */ udc->driver = driver; /* Enable udc */ s3c2410_udc_enable(udc); return 0; } static int s3c2410_udc_stop(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct s3c2410_udc *udc = to_s3c2410(g); udc->driver = NULL; /* Disable udc */ s3c2410_udc_disable(udc); return 0; } /*---------------------------------------------------------------------------*/ static struct s3c2410_udc memory = { .gadget = { .ops = &s3c2410_ops, .ep0 = &memory.ep[0].ep, .name = gadget_name, .dev = { .init_name = "gadget", }, }, /* control endpoint */ .ep[0] = { .num = 0, .ep = { .name = ep0name, .ops = &s3c2410_ep_ops, .maxpacket = EP0_FIFO_SIZE, }, .dev = &memory, }, /* first group of endpoints */ .ep[1] = { .num = 1, .ep = { .name = "ep1-bulk", .ops = &s3c2410_ep_ops, .maxpacket = EP_FIFO_SIZE, }, .dev = &memory, .fifo_size = EP_FIFO_SIZE, .bEndpointAddress = 1, .bmAttributes = USB_ENDPOINT_XFER_BULK, }, .ep[2] = { .num = 2, .ep = { .name = "ep2-bulk", .ops = &s3c2410_ep_ops, .maxpacket = EP_FIFO_SIZE, }, .dev = &memory, .fifo_size = EP_FIFO_SIZE, .bEndpointAddress = 2, .bmAttributes = USB_ENDPOINT_XFER_BULK, }, .ep[3] = { .num = 3, .ep = { .name = "ep3-bulk", .ops = &s3c2410_ep_ops, .maxpacket = EP_FIFO_SIZE, }, .dev = &memory, .fifo_size = EP_FIFO_SIZE, .bEndpointAddress = 3, .bmAttributes = USB_ENDPOINT_XFER_BULK, }, .ep[4] = { .num = 4, .ep = { .name = "ep4-bulk", .ops = &s3c2410_ep_ops, .maxpacket = EP_FIFO_SIZE, }, .dev = &memory, .fifo_size = EP_FIFO_SIZE, .bEndpointAddress = 4, .bmAttributes = USB_ENDPOINT_XFER_BULK, } }; /* * probe - binds to the platform device */ static int s3c2410_udc_probe(struct platform_device *pdev) { struct s3c2410_udc *udc = &memory; struct device *dev = &pdev->dev; int retval; int irq; dev_dbg(dev, "%s()\n", __func__); usb_bus_clock = clk_get(NULL, "usb-bus-gadget"); if (IS_ERR(usb_bus_clock)) { dev_err(dev, "failed to get usb bus clock source\n"); return PTR_ERR(usb_bus_clock); } clk_enable(usb_bus_clock); udc_clock = clk_get(NULL, "usb-device"); if (IS_ERR(udc_clock)) { dev_err(dev, "failed to get udc clock source\n"); return PTR_ERR(udc_clock); } clk_enable(udc_clock); mdelay(10); dev_dbg(dev, "got and enabled clocks\n"); if (strncmp(pdev->name, "s3c2440", 7) == 0) { dev_info(dev, "S3C2440: increasing FIFO to 128 bytes\n"); memory.ep[1].fifo_size = S3C2440_EP_FIFO_SIZE; memory.ep[2].fifo_size = S3C2440_EP_FIFO_SIZE; memory.ep[3].fifo_size = S3C2440_EP_FIFO_SIZE; memory.ep[4].fifo_size = S3C2440_EP_FIFO_SIZE; } spin_lock_init(&udc->lock); udc_info = pdev->dev.platform_data; rsrc_start = S3C2410_PA_USBDEV; rsrc_len = S3C24XX_SZ_USBDEV; if (!request_mem_region(rsrc_start, rsrc_len, gadget_name)) return -EBUSY; base_addr = ioremap(rsrc_start, rsrc_len); if (!base_addr) { retval = -ENOMEM; goto err_mem; } the_controller = udc; platform_set_drvdata(pdev, udc); s3c2410_udc_disable(udc); s3c2410_udc_reinit(udc); /* irq setup after old hardware state is cleaned up */ retval = request_irq(IRQ_USBD, s3c2410_udc_irq, 0, gadget_name, udc); if (retval != 0) { dev_err(dev, "cannot get irq %i, err %d\n", IRQ_USBD, retval); retval = -EBUSY; goto err_map; } dev_dbg(dev, "got irq %i\n", IRQ_USBD); if (udc_info && udc_info->vbus_pin > 0) { retval = gpio_request(udc_info->vbus_pin, "udc vbus"); if (retval < 0) { dev_err(dev, "cannot claim vbus pin\n"); goto err_int; } irq = gpio_to_irq(udc_info->vbus_pin); if (irq < 0) { dev_err(dev, "no irq for gpio vbus pin\n"); retval = irq; goto err_gpio_claim; } retval = request_irq(irq, s3c2410_udc_vbus_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_SHARED, gadget_name, udc); if (retval != 0) { dev_err(dev, "can't get vbus irq %d, err %d\n", irq, retval); retval = -EBUSY; goto err_gpio_claim; } dev_dbg(dev, "got irq %i\n", irq); } else { udc->vbus = 1; } if (udc_info && !udc_info->udc_command && gpio_is_valid(udc_info->pullup_pin)) { retval = gpio_request_one(udc_info->pullup_pin, udc_info->vbus_pin_inverted ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, "udc pullup"); if (retval) goto err_vbus_irq; } retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget); if (retval) goto err_add_udc; if (s3c2410_udc_debugfs_root) { udc->regs_info = debugfs_create_file("registers", S_IRUGO, s3c2410_udc_debugfs_root, udc, &s3c2410_udc_debugfs_fops); if (!udc->regs_info) dev_warn(dev, "debugfs file creation failed\n"); } dev_dbg(dev, "probe ok\n"); return 0; err_add_udc: if (udc_info && !udc_info->udc_command && gpio_is_valid(udc_info->pullup_pin)) gpio_free(udc_info->pullup_pin); err_vbus_irq: if (udc_info && udc_info->vbus_pin > 0) free_irq(gpio_to_irq(udc_info->vbus_pin), udc); err_gpio_claim: if (udc_info && udc_info->vbus_pin > 0) gpio_free(udc_info->vbus_pin); err_int: free_irq(IRQ_USBD, udc); err_map: iounmap(base_addr); err_mem: release_mem_region(rsrc_start, rsrc_len); return retval; } /* * s3c2410_udc_remove */ static int s3c2410_udc_remove(struct platform_device *pdev) { struct s3c2410_udc *udc = platform_get_drvdata(pdev); unsigned int irq; dev_dbg(&pdev->dev, "%s()\n", __func__); if (udc->driver) return -EBUSY; usb_del_gadget_udc(&udc->gadget); debugfs_remove(udc->regs_info); if (udc_info && !udc_info->udc_command && gpio_is_valid(udc_info->pullup_pin)) gpio_free(udc_info->pullup_pin); if (udc_info && udc_info->vbus_pin > 0) { irq = gpio_to_irq(udc_info->vbus_pin); free_irq(irq, udc); } free_irq(IRQ_USBD, udc); iounmap(base_addr); release_mem_region(rsrc_start, rsrc_len); if (!IS_ERR(udc_clock) && udc_clock != NULL) { clk_disable(udc_clock); clk_put(udc_clock); udc_clock = NULL; } if (!IS_ERR(usb_bus_clock) && usb_bus_clock != NULL) { clk_disable(usb_bus_clock); clk_put(usb_bus_clock); usb_bus_clock = NULL; } dev_dbg(&pdev->dev, "%s: remove ok\n", __func__); return 0; } #ifdef CONFIG_PM static int s3c2410_udc_suspend(struct platform_device *pdev, pm_message_t message) { s3c2410_udc_command(S3C2410_UDC_P_DISABLE); return 0; } static int s3c2410_udc_resume(struct platform_device *pdev) { s3c2410_udc_command(S3C2410_UDC_P_ENABLE); return 0; } #else #define s3c2410_udc_suspend NULL #define s3c2410_udc_resume NULL #endif static const struct platform_device_id s3c_udc_ids[] = { { "s3c2410-usbgadget", }, { "s3c2440-usbgadget", }, { } }; MODULE_DEVICE_TABLE(platform, s3c_udc_ids); static struct platform_driver udc_driver_24x0 = { .driver = { .name = "s3c24x0-usbgadget", .owner = THIS_MODULE, }, .probe = s3c2410_udc_probe, .remove = s3c2410_udc_remove, .suspend = s3c2410_udc_suspend, .resume = s3c2410_udc_resume, .id_table = s3c_udc_ids, }; static int __init udc_init(void) { int retval; dprintk(DEBUG_NORMAL, "%s: version %s\n", gadget_name, DRIVER_VERSION); s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name, NULL); if (IS_ERR(s3c2410_udc_debugfs_root)) { pr_err("%s: debugfs dir creation failed %ld\n", gadget_name, PTR_ERR(s3c2410_udc_debugfs_root)); s3c2410_udc_debugfs_root = NULL; } retval = platform_driver_register(&udc_driver_24x0); if (retval) goto err; return 0; err: debugfs_remove(s3c2410_udc_debugfs_root); return retval; } static void __exit udc_exit(void) { platform_driver_unregister(&udc_driver_24x0); debugfs_remove(s3c2410_udc_debugfs_root); } module_init(udc_init); module_exit(udc_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
BigBot96/android_kernel_samsung_espressovzw-jb
fs/xfs/xfs_iomap.c
2934
19997
/* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_itable.h" #include "xfs_rw.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_trans_space.h" #include "xfs_utils.h" #include "xfs_iomap.h" #include "xfs_trace.h" #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ << mp->m_writeio_log) #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP STATIC int xfs_iomap_eof_align_last_fsb( xfs_mount_t *mp, xfs_inode_t *ip, xfs_extlen_t extsize, xfs_fileoff_t *last_fsb) { xfs_fileoff_t new_last_fsb = 0; xfs_extlen_t align; int eof, error; if (XFS_IS_REALTIME_INODE(ip)) ; /* * If mounted with the "-o swalloc" option, roundup the allocation * request to a stripe width boundary if the file size is >= * stripe width and we are allocating past the allocation eof. */ else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) && (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_swidth))) new_last_fsb = roundup_64(*last_fsb, mp->m_swidth); /* * Roundup the allocation request to a stripe unit (m_dalign) boundary * if the file size is >= stripe unit size, and we are allocating past * the allocation eof. */ else if (mp->m_dalign && (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_dalign))) new_last_fsb = roundup_64(*last_fsb, mp->m_dalign); /* * Always round up the allocation request to an extent boundary * (when file on a real-time subvolume or has di_extsize hint). */ if (extsize) { if (new_last_fsb) align = roundup_64(new_last_fsb, extsize); else align = extsize; new_last_fsb = roundup_64(*last_fsb, align); } if (new_last_fsb) { error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); if (error) return error; if (eof) *last_fsb = new_last_fsb; } return 0; } STATIC int xfs_alert_fsblock_zero( xfs_inode_t *ip, xfs_bmbt_irec_t *imap) { xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, "Access to block zero in inode %llu " "start_block: %llx start_off: %llx " "blkcnt: %llx extent-state: %x\n", (unsigned long long)ip->i_ino, (unsigned long long)imap->br_startblock, (unsigned long long)imap->br_startoff, (unsigned long long)imap->br_blockcount, imap->br_state); return EFSCORRUPTED; } int xfs_iomap_write_direct( xfs_inode_t *ip, xfs_off_t offset, size_t count, xfs_bmbt_irec_t *imap, int nmaps) { xfs_mount_t *mp = ip->i_mount; xfs_fileoff_t offset_fsb; xfs_fileoff_t last_fsb; xfs_filblks_t count_fsb, resaligned; xfs_fsblock_t firstfsb; xfs_extlen_t extsz, temp; int nimaps; int bmapi_flag; int quota_flag; int rt; xfs_trans_t *tp; xfs_bmap_free_t free_list; uint qblocks, resblks, resrtextents; int committed; int error; /* * Make sure that the dquots are there. This doesn't hold * the ilock across a disk read. */ error = xfs_qm_dqattach_locked(ip, 0); if (error) return XFS_ERROR(error); rt = XFS_IS_REALTIME_INODE(ip); extsz = xfs_get_extsz_hint(ip); offset_fsb = XFS_B_TO_FSBT(mp, offset); last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); if ((offset + count) > ip->i_size) { error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); if (error) goto error_out; } else { if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) last_fsb = MIN(last_fsb, (xfs_fileoff_t) imap->br_blockcount + imap->br_startoff); } count_fsb = last_fsb - offset_fsb; ASSERT(count_fsb > 0); resaligned = count_fsb; if (unlikely(extsz)) { if ((temp = do_mod(offset_fsb, extsz))) resaligned += temp; if ((temp = do_mod(resaligned, extsz))) resaligned += extsz - temp; } if (unlikely(rt)) { resrtextents = qblocks = resaligned; resrtextents /= mp->m_sb.sb_rextsize; resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); quota_flag = XFS_QMOPT_RES_RTBLKS; } else { resrtextents = 0; resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); quota_flag = XFS_QMOPT_RES_REGBLKS; } /* * Allocate and setup the transaction */ xfs_iunlock(ip, XFS_ILOCK_EXCL); tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); error = xfs_trans_reserve(tp, resblks, XFS_WRITE_LOG_RES(mp), resrtextents, XFS_TRANS_PERM_LOG_RES, XFS_WRITE_LOG_COUNT); /* * Check for running out of space, note: need lock to return */ if (error) xfs_trans_cancel(tp, 0); xfs_ilock(ip, XFS_ILOCK_EXCL); if (error) goto error_out; error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); if (error) goto error1; xfs_trans_ijoin(tp, ip); bmapi_flag = XFS_BMAPI_WRITE; if (offset < ip->i_size || extsz) bmapi_flag |= XFS_BMAPI_PREALLOC; /* * Issue the xfs_bmapi() call to allocate the blocks. * * From this point onwards we overwrite the imap pointer that the * caller gave to us. */ xfs_bmap_init(&free_list, &firstfsb); nimaps = 1; error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, &firstfsb, 0, imap, &nimaps, &free_list); if (error) goto error0; /* * Complete the transaction */ error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) goto error0; error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) goto error_out; /* * Copy any maps to caller's array and return any error. */ if (nimaps == 0) { error = ENOSPC; goto error_out; } if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) { error = xfs_alert_fsblock_zero(ip, imap); goto error_out; } return 0; error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ xfs_bmap_cancel(&free_list); xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); error1: /* Just cancel transaction */ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); error_out: return XFS_ERROR(error); } /* * If the caller is doing a write at the end of the file, then extend the * allocation out to the file system's write iosize. We clean up any extra * space left over when the file is closed in xfs_inactive(). * * If we find we already have delalloc preallocation beyond EOF, don't do more * preallocation as it it not needed. */ STATIC int xfs_iomap_eof_want_preallocate( xfs_mount_t *mp, xfs_inode_t *ip, xfs_off_t offset, size_t count, xfs_bmbt_irec_t *imap, int nimaps, int *prealloc) { xfs_fileoff_t start_fsb; xfs_filblks_t count_fsb; xfs_fsblock_t firstblock; int n, error, imaps; int found_delalloc = 0; *prealloc = 0; if ((offset + count) <= ip->i_size) return 0; /* * If there are any real blocks past eof, then don't * do any speculative allocation. */ start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); while (count_fsb > 0) { imaps = nimaps; firstblock = NULLFSBLOCK; error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0, &firstblock, 0, imap, &imaps, NULL); if (error) return error; for (n = 0; n < imaps; n++) { if ((imap[n].br_startblock != HOLESTARTBLOCK) && (imap[n].br_startblock != DELAYSTARTBLOCK)) return 0; start_fsb += imap[n].br_blockcount; count_fsb -= imap[n].br_blockcount; if (imap[n].br_startblock == DELAYSTARTBLOCK) found_delalloc = 1; } } if (!found_delalloc) *prealloc = 1; return 0; } /* * If we don't have a user specified preallocation size, dynamically increase * the preallocation size as the size of the file grows. Cap the maximum size * at a single extent or less if the filesystem is near full. The closer the * filesystem is to full, the smaller the maximum prealocation. */ STATIC xfs_fsblock_t xfs_iomap_prealloc_size( struct xfs_mount *mp, struct xfs_inode *ip) { xfs_fsblock_t alloc_blocks = 0; if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { int shift = 0; int64_t freesp; /* * rounddown_pow_of_two() returns an undefined result * if we pass in alloc_blocks = 0. Hence the "+ 1" to * ensure we always pass in a non-zero value. */ alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size) + 1; alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN, rounddown_pow_of_two(alloc_blocks)); xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); freesp = mp->m_sb.sb_fdblocks; if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { shift = 2; if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) shift++; if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) shift++; if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) shift++; if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) shift++; } if (shift) alloc_blocks >>= shift; } if (alloc_blocks < mp->m_writeio_blocks) alloc_blocks = mp->m_writeio_blocks; return alloc_blocks; } int xfs_iomap_write_delay( xfs_inode_t *ip, xfs_off_t offset, size_t count, xfs_bmbt_irec_t *ret_imap) { xfs_mount_t *mp = ip->i_mount; xfs_fileoff_t offset_fsb; xfs_fileoff_t last_fsb; xfs_off_t aligned_offset; xfs_fileoff_t ioalign; xfs_fsblock_t firstblock; xfs_extlen_t extsz; int nimaps; xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; int prealloc, flushed = 0; int error; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); /* * Make sure that the dquots are there. This doesn't hold * the ilock across a disk read. */ error = xfs_qm_dqattach_locked(ip, 0); if (error) return XFS_ERROR(error); extsz = xfs_get_extsz_hint(ip); offset_fsb = XFS_B_TO_FSBT(mp, offset); error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, imap, XFS_WRITE_IMAPS, &prealloc); if (error) return error; retry: if (prealloc) { xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip); aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); ioalign = XFS_B_TO_FSBT(mp, aligned_offset); last_fsb = ioalign + alloc_blocks; } else { last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); } if (prealloc || extsz) { error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); if (error) return error; } nimaps = XFS_WRITE_IMAPS; firstblock = NULLFSBLOCK; error = xfs_bmapi(NULL, ip, offset_fsb, (xfs_filblks_t)(last_fsb - offset_fsb), XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | XFS_BMAPI_ENTIRE, &firstblock, 1, imap, &nimaps, NULL); switch (error) { case 0: case ENOSPC: case EDQUOT: break; default: return XFS_ERROR(error); } /* * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. For * ENOSPC, * flush all other inodes with delalloc blocks to free up * some of the excess reserved metadata space. For both cases, retry * without EOF preallocation. */ if (nimaps == 0) { trace_xfs_delalloc_enospc(ip, offset, count); if (flushed) return XFS_ERROR(error ? error : ENOSPC); if (error == ENOSPC) { xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_flush_inodes(ip); xfs_ilock(ip, XFS_ILOCK_EXCL); } flushed = 1; error = 0; prealloc = 0; goto retry; } if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) return xfs_alert_fsblock_zero(ip, &imap[0]); *ret_imap = imap[0]; return 0; } /* * Pass in a delayed allocate extent, convert it to real extents; * return to the caller the extent we create which maps on top of * the originating callers request. * * Called without a lock on the inode. * * We no longer bother to look at the incoming map - all we have to * guarantee is that whatever we allocate fills the required range. */ int xfs_iomap_write_allocate( xfs_inode_t *ip, xfs_off_t offset, size_t count, xfs_bmbt_irec_t *imap) { xfs_mount_t *mp = ip->i_mount; xfs_fileoff_t offset_fsb, last_block; xfs_fileoff_t end_fsb, map_start_fsb; xfs_fsblock_t first_block; xfs_bmap_free_t free_list; xfs_filblks_t count_fsb; xfs_trans_t *tp; int nimaps, committed; int error = 0; int nres; /* * Make sure that the dquots are there. */ error = xfs_qm_dqattach(ip, 0); if (error) return XFS_ERROR(error); offset_fsb = XFS_B_TO_FSBT(mp, offset); count_fsb = imap->br_blockcount; map_start_fsb = imap->br_startoff; XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); while (count_fsb != 0) { /* * Set up a transaction with which to allocate the * backing store for the file. Do allocations in a * loop until we get some space in the range we are * interested in. The other space that might be allocated * is in the delayed allocation extent on which we sit * but before our buffer starts. */ nimaps = 0; while (nimaps == 0) { tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); tp->t_flags |= XFS_TRANS_RESERVE; nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); error = xfs_trans_reserve(tp, nres, XFS_WRITE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_WRITE_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); return XFS_ERROR(error); } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); xfs_bmap_init(&free_list, &first_block); /* * it is possible that the extents have changed since * we did the read call as we dropped the ilock for a * while. We have to be careful about truncates or hole * punchs here - we are not allowed to allocate * non-delalloc blocks here. * * The only protection against truncation is the pages * for the range we are being asked to convert are * locked and hence a truncate will block on them * first. * * As a result, if we go beyond the range we really * need and hit an delalloc extent boundary followed by * a hole while we have excess blocks in the map, we * will fill the hole incorrectly and overrun the * transaction reservation. * * Using a single map prevents this as we are forced to * check each map we look for overlap with the desired * range and abort as soon as we find it. Also, given * that we only return a single map, having one beyond * what we can return is probably a bit silly. * * We also need to check that we don't go beyond EOF; * this is a truncate optimisation as a truncate sets * the new file size before block on the pages we * currently have locked under writeback. Because they * are about to be tossed, we don't need to write them * back.... */ nimaps = 1; end_fsb = XFS_B_TO_FSB(mp, ip->i_size); error = xfs_bmap_last_offset(NULL, ip, &last_block, XFS_DATA_FORK); if (error) goto trans_cancel; last_block = XFS_FILEOFF_MAX(last_block, end_fsb); if ((map_start_fsb + count_fsb) > last_block) { count_fsb = last_block - map_start_fsb; if (count_fsb == 0) { error = EAGAIN; goto trans_cancel; } } /* * Go get the actual blocks. * * From this point onwards we overwrite the imap * pointer that the caller gave to us. */ error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, XFS_BMAPI_WRITE, &first_block, 1, imap, &nimaps, &free_list); if (error) goto trans_cancel; error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) goto trans_cancel; error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) goto error0; xfs_iunlock(ip, XFS_ILOCK_EXCL); } /* * See if we were able to allocate an extent that * covers at least part of the callers request */ if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) return xfs_alert_fsblock_zero(ip, imap); if ((offset_fsb >= imap->br_startoff) && (offset_fsb < (imap->br_startoff + imap->br_blockcount))) { XFS_STATS_INC(xs_xstrat_quick); return 0; } /* * So far we have not mapped the requested part of the * file, just surrounding data, try again. */ count_fsb -= imap->br_blockcount; map_start_fsb = imap->br_startoff + imap->br_blockcount; } trans_cancel: xfs_bmap_cancel(&free_list); xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); error0: xfs_iunlock(ip, XFS_ILOCK_EXCL); return XFS_ERROR(error); } int xfs_iomap_write_unwritten( xfs_inode_t *ip, xfs_off_t offset, size_t count) { xfs_mount_t *mp = ip->i_mount; xfs_fileoff_t offset_fsb; xfs_filblks_t count_fsb; xfs_filblks_t numblks_fsb; xfs_fsblock_t firstfsb; int nimaps; xfs_trans_t *tp; xfs_bmbt_irec_t imap; xfs_bmap_free_t free_list; uint resblks; int committed; int error; trace_xfs_unwritten_convert(ip, offset, count); offset_fsb = XFS_B_TO_FSBT(mp, offset); count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); /* * Reserve enough blocks in this transaction for two complete extent * btree splits. We may be converting the middle part of an unwritten * extent and in this case we will insert two new extents in the btree * each of which could cause a full split. * * This reservation amount will be used in the first call to * xfs_bmbt_split() to select an AG with enough space to satisfy the * rest of the operation. */ resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; do { /* * set up a transaction to convert the range of extents * from unwritten to real. Do allocations in a loop until * we have covered the range passed in. * * Note that we open code the transaction allocation here * to pass KM_NOFS--we can't risk to recursing back into * the filesystem here as we might be asked to write out * the same inode that we complete here and might deadlock * on the iolock. */ xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); tp->t_flags |= XFS_TRANS_RESERVE; error = xfs_trans_reserve(tp, resblks, XFS_WRITE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_WRITE_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); return XFS_ERROR(error); } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); /* * Modify the unwritten extent state of the buffer. */ xfs_bmap_init(&free_list, &firstfsb); nimaps = 1; error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, 1, &imap, &nimaps, &free_list); if (error) goto error_on_bmapi_transaction; error = xfs_bmap_finish(&(tp), &(free_list), &committed); if (error) goto error_on_bmapi_transaction; error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) return XFS_ERROR(error); if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) return xfs_alert_fsblock_zero(ip, &imap); if ((numblks_fsb = imap.br_blockcount) == 0) { /* * The numblks_fsb value should always get * smaller, otherwise the loop is stuck. */ ASSERT(imap.br_blockcount); break; } offset_fsb += numblks_fsb; count_fsb -= numblks_fsb; } while (count_fsb > 0); return 0; error_on_bmapi_transaction: xfs_bmap_cancel(&free_list); xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); xfs_iunlock(ip, XFS_ILOCK_EXCL); return XFS_ERROR(error); }
gpl-2.0
EPDCenterSpain/kernel_rikomagic_mk808
drivers/media/video/hdpvr/hdpvr-i2c.c
2934
5697
/* * Hauppauge HD PVR USB driver * * Copyright (C) 2008 Janne Grunau (j@jannau.net) * * IR device registration code is * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) #include <linux/i2c.h> #include <linux/slab.h> #include "hdpvr.h" #define CTRL_READ_REQUEST 0xb8 #define CTRL_WRITE_REQUEST 0x38 #define REQTYPE_I2C_READ 0xb1 #define REQTYPE_I2C_WRITE 0xb0 #define REQTYPE_I2C_WRITE_STATT 0xd0 #define Z8F0811_IR_TX_I2C_ADDR 0x70 #define Z8F0811_IR_RX_I2C_ADDR 0x71 struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev) { struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data; struct i2c_board_info hdpvr_ir_tx_i2c_board_info = { I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR), }; init_data->name = "HD-PVR"; hdpvr_ir_tx_i2c_board_info.platform_data = init_data; return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_tx_i2c_board_info); } struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev) { struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data; struct i2c_board_info hdpvr_ir_rx_i2c_board_info = { I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR), }; /* Our default information for ir-kbd-i2c.c to use */ init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; init_data->type = RC_TYPE_RC5; init_data->name = "HD-PVR"; init_data->polling_interval = 405; /* ms, duplicated from Windows */ hdpvr_ir_rx_i2c_board_info.platform_data = init_data; return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_rx_i2c_board_info); } static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus, unsigned char addr, char *wdata, int wlen, char *data, int len) { int ret; if ((len > sizeof(dev->i2c_buf)) || (wlen > sizeof(dev->i2c_buf))) return -EINVAL; if (wlen) { memcpy(&dev->i2c_buf, wdata, wlen); ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST, (bus << 8) | addr, 0, &dev->i2c_buf, wlen, 1000); if (ret < 0) return ret; } ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), REQTYPE_I2C_READ, CTRL_READ_REQUEST, (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000); if (ret == len) { memcpy(data, &dev->i2c_buf, len); ret = 0; } else if (ret >= 0) ret = -EIO; return ret; } static int hdpvr_i2c_write(struct hdpvr_device *dev, int bus, unsigned char addr, char *data, int len) { int ret; if (len > sizeof(dev->i2c_buf)) return -EINVAL; memcpy(&dev->i2c_buf, data, len); ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST, (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000); if (ret < 0) return ret; ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST, 0, 0, &dev->i2c_buf, 2, 1000); if ((ret == 2) && (dev->i2c_buf[1] == (len - 1))) ret = 0; else if (ret >= 0) ret = -EIO; return ret; } static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs, int num) { struct hdpvr_device *dev = i2c_get_adapdata(i2c_adapter); int retval = 0, addr; if (num <= 0) return 0; mutex_lock(&dev->i2c_mutex); addr = msgs[0].addr << 1; if (num == 1) { if (msgs[0].flags & I2C_M_RD) retval = hdpvr_i2c_read(dev, 1, addr, NULL, 0, msgs[0].buf, msgs[0].len); else retval = hdpvr_i2c_write(dev, 1, addr, msgs[0].buf, msgs[0].len); } else if (num == 2) { if (msgs[0].addr != msgs[1].addr) { v4l2_warn(&dev->v4l2_dev, "refusing 2-phase i2c xfer " "with conflicting target addresses\n"); retval = -EINVAL; goto out; } if ((msgs[0].flags & I2C_M_RD) || !(msgs[1].flags & I2C_M_RD)) { v4l2_warn(&dev->v4l2_dev, "refusing complex xfer with " "r0=%d, r1=%d\n", msgs[0].flags & I2C_M_RD, msgs[1].flags & I2C_M_RD); retval = -EINVAL; goto out; } /* * Write followed by atomic read is the only complex xfer that * we actually support here. */ retval = hdpvr_i2c_read(dev, 1, addr, msgs[0].buf, msgs[0].len, msgs[1].buf, msgs[1].len); } else { v4l2_warn(&dev->v4l2_dev, "refusing %d-phase i2c xfer\n", num); } out: mutex_unlock(&dev->i2c_mutex); return retval ? retval : num; } static u32 hdpvr_functionality(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm hdpvr_algo = { .master_xfer = hdpvr_transfer, .functionality = hdpvr_functionality, }; static struct i2c_adapter hdpvr_i2c_adapter_template = { .name = "Hauppage HD PVR I2C", .owner = THIS_MODULE, .algo = &hdpvr_algo, }; static int hdpvr_activate_ir(struct hdpvr_device *dev) { char buffer[2]; mutex_lock(&dev->i2c_mutex); hdpvr_i2c_read(dev, 0, 0x54, NULL, 0, buffer, 1); buffer[0] = 0; buffer[1] = 0x8; hdpvr_i2c_write(dev, 1, 0x54, buffer, 2); buffer[1] = 0x18; hdpvr_i2c_write(dev, 1, 0x54, buffer, 2); mutex_unlock(&dev->i2c_mutex); return 0; } int hdpvr_register_i2c_adapter(struct hdpvr_device *dev) { int retval = -ENOMEM; hdpvr_activate_ir(dev); memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template, sizeof(struct i2c_adapter)); dev->i2c_adapter.dev.parent = &dev->udev->dev; i2c_set_adapdata(&dev->i2c_adapter, dev); retval = i2c_add_adapter(&dev->i2c_adapter); return retval; } #endif
gpl-2.0
santod/android_GE_kernel_htc_m7vzw
arch/arm/mach-davinci/board-dm646x-evm.c
4726
17718
/* * TI DaVinci DM646X EVM board * * Derived from: arch/arm/mach-davinci/board-evm.c * Copyright (C) 2006 Texas Instruments. * * (C) 2007-2008, MontaVista Software, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * */ /************************************************************************** * Included Files **************************************************************************/ #include <linux/kernel.h> #include <linux/init.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/i2c/at24.h> #include <linux/i2c/pcf857x.h> #include <media/tvp514x.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/clk.h> #include <linux/export.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/common.h> #include <mach/serial.h> #include <mach/i2c.h> #include <mach/nand.h> #include <mach/clock.h> #include <mach/cdce949.h> #include <mach/aemif.h> #include "davinci.h" #include "clock.h" #define NAND_BLOCK_SIZE SZ_128K /* Note: We are setting first partition as 'bootloader' constituting UBL, U-Boot * and U-Boot environment this avoids dependency on any particular combination * of UBL, U-Boot or flashing tools etc. */ static struct mtd_partition davinci_nand_partitions[] = { { /* UBL, U-Boot with environment */ .name = "bootloader", .offset = MTDPART_OFS_APPEND, .size = 16 * NAND_BLOCK_SIZE, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_4M, .mask_flags = 0, }, { .name = "filesystem", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0, } }; static struct davinci_aemif_timing dm6467tevm_nandflash_timing = { .wsetup = 29, .wstrobe = 24, .whold = 14, .rsetup = 19, .rstrobe = 33, .rhold = 0, .ta = 29, }; static struct davinci_nand_pdata davinci_nand_data = { .mask_cle = 0x80000, .mask_ale = 0x40000, .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), .ecc_mode = NAND_ECC_HW, .options = 0, }; static struct resource davinci_nand_resources[] = { { .start = DM646X_ASYNC_EMIF_CS2_SPACE_BASE, .end = DM646X_ASYNC_EMIF_CS2_SPACE_BASE + SZ_32M - 1, .flags = IORESOURCE_MEM, }, { .start = DM646X_ASYNC_EMIF_CONTROL_BASE, .end = DM646X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device davinci_nand_device = { .name = "davinci_nand", .id = 0, .num_resources = ARRAY_SIZE(davinci_nand_resources), .resource = davinci_nand_resources, .dev = { .platform_data = &davinci_nand_data, }, }; #if defined(CONFIG_BLK_DEV_PALMCHIP_BK3710) || \ defined(CONFIG_BLK_DEV_PALMCHIP_BK3710_MODULE) #define HAS_ATA 1 #else #define HAS_ATA 0 #endif /* CPLD Register 0 bits to control ATA */ #define DM646X_EVM_ATA_RST BIT(0) #define DM646X_EVM_ATA_PWD BIT(1) /* CPLD Register 0 Client: used for I/O Control */ static int cpld_reg0_probe(struct i2c_client *client, const struct i2c_device_id *id) { if (HAS_ATA) { u8 data; struct i2c_msg msg[2] = { { .addr = client->addr, .flags = I2C_M_RD, .len = 1, .buf = &data, }, { .addr = client->addr, .flags = 0, .len = 1, .buf = &data, }, }; /* Clear ATA_RSTn and ATA_PWD bits to enable ATA operation. */ i2c_transfer(client->adapter, msg, 1); data &= ~(DM646X_EVM_ATA_RST | DM646X_EVM_ATA_PWD); i2c_transfer(client->adapter, msg + 1, 1); } return 0; } static const struct i2c_device_id cpld_reg_ids[] = { { "cpld_reg0", 0, }, { }, }; static struct i2c_driver dm6467evm_cpld_driver = { .driver.name = "cpld_reg0", .id_table = cpld_reg_ids, .probe = cpld_reg0_probe, }; /* LEDS */ static struct gpio_led evm_leds[] = { { .name = "DS1", .active_low = 1, }, { .name = "DS2", .active_low = 1, }, { .name = "DS3", .active_low = 1, }, { .name = "DS4", .active_low = 1, }, }; static const struct gpio_led_platform_data evm_led_data = { .num_leds = ARRAY_SIZE(evm_leds), .leds = evm_leds, }; static struct platform_device *evm_led_dev; static int evm_led_setup(struct i2c_client *client, int gpio, unsigned int ngpio, void *c) { struct gpio_led *leds = evm_leds; int status; while (ngpio--) { leds->gpio = gpio++; leds++; }; evm_led_dev = platform_device_alloc("leds-gpio", 0); platform_device_add_data(evm_led_dev, &evm_led_data, sizeof(evm_led_data)); evm_led_dev->dev.parent = &client->dev; status = platform_device_add(evm_led_dev); if (status < 0) { platform_device_put(evm_led_dev); evm_led_dev = NULL; } return status; } static int evm_led_teardown(struct i2c_client *client, int gpio, unsigned ngpio, void *c) { if (evm_led_dev) { platform_device_unregister(evm_led_dev); evm_led_dev = NULL; } return 0; } static int evm_sw_gpio[4] = { -EINVAL, -EINVAL, -EINVAL, -EINVAL }; static int evm_sw_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c) { int status; int i; char label[10]; for (i = 0; i < 4; ++i) { snprintf(label, 10, "user_sw%d", i); status = gpio_request(gpio, label); if (status) goto out_free; evm_sw_gpio[i] = gpio++; status = gpio_direction_input(evm_sw_gpio[i]); if (status) { gpio_free(evm_sw_gpio[i]); evm_sw_gpio[i] = -EINVAL; goto out_free; } status = gpio_export(evm_sw_gpio[i], 0); if (status) { gpio_free(evm_sw_gpio[i]); evm_sw_gpio[i] = -EINVAL; goto out_free; } } return status; out_free: for (i = 0; i < 4; ++i) { if (evm_sw_gpio[i] != -EINVAL) { gpio_free(evm_sw_gpio[i]); evm_sw_gpio[i] = -EINVAL; } } return status; } static int evm_sw_teardown(struct i2c_client *client, int gpio, unsigned ngpio, void *c) { int i; for (i = 0; i < 4; ++i) { if (evm_sw_gpio[i] != -EINVAL) { gpio_unexport(evm_sw_gpio[i]); gpio_free(evm_sw_gpio[i]); evm_sw_gpio[i] = -EINVAL; } } return 0; } static int evm_pcf_setup(struct i2c_client *client, int gpio, unsigned int ngpio, void *c) { int status; if (ngpio < 8) return -EINVAL; status = evm_sw_setup(client, gpio, 4, c); if (status) return status; return evm_led_setup(client, gpio+4, 4, c); } static int evm_pcf_teardown(struct i2c_client *client, int gpio, unsigned int ngpio, void *c) { BUG_ON(ngpio < 8); evm_sw_teardown(client, gpio, 4, c); evm_led_teardown(client, gpio+4, 4, c); return 0; } static struct pcf857x_platform_data pcf_data = { .gpio_base = DAVINCI_N_GPIO+1, .setup = evm_pcf_setup, .teardown = evm_pcf_teardown, }; /* Most of this EEPROM is unused, but U-Boot uses some data: * - 0x7f00, 6 bytes Ethernet Address * - ... newer boards may have more */ static struct at24_platform_data eeprom_info = { .byte_len = (256*1024) / 8, .page_size = 64, .flags = AT24_FLAG_ADDR16, .setup = davinci_get_mac_addr, .context = (void *)0x7f00, }; static u8 dm646x_iis_serializer_direction[] = { TX_MODE, RX_MODE, INACTIVE_MODE, INACTIVE_MODE, }; static u8 dm646x_dit_serializer_direction[] = { TX_MODE, }; static struct snd_platform_data dm646x_evm_snd_data[] = { { .tx_dma_offset = 0x400, .rx_dma_offset = 0x400, .op_mode = DAVINCI_MCASP_IIS_MODE, .num_serializer = ARRAY_SIZE(dm646x_iis_serializer_direction), .tdm_slots = 2, .serial_dir = dm646x_iis_serializer_direction, .asp_chan_q = EVENTQ_0, }, { .tx_dma_offset = 0x400, .rx_dma_offset = 0, .op_mode = DAVINCI_MCASP_DIT_MODE, .num_serializer = ARRAY_SIZE(dm646x_dit_serializer_direction), .tdm_slots = 32, .serial_dir = dm646x_dit_serializer_direction, .asp_chan_q = EVENTQ_0, }, }; static struct i2c_client *cpld_client; static int cpld_video_probe(struct i2c_client *client, const struct i2c_device_id *id) { cpld_client = client; return 0; } static int __devexit cpld_video_remove(struct i2c_client *client) { cpld_client = NULL; return 0; } static const struct i2c_device_id cpld_video_id[] = { { "cpld_video", 0 }, { } }; static struct i2c_driver cpld_video_driver = { .driver = { .name = "cpld_video", }, .probe = cpld_video_probe, .remove = cpld_video_remove, .id_table = cpld_video_id, }; static void evm_init_cpld(void) { i2c_add_driver(&cpld_video_driver); } static struct i2c_board_info __initdata i2c_info[] = { { I2C_BOARD_INFO("24c256", 0x50), .platform_data = &eeprom_info, }, { I2C_BOARD_INFO("pcf8574a", 0x38), .platform_data = &pcf_data, }, { I2C_BOARD_INFO("cpld_reg0", 0x3a), }, { I2C_BOARD_INFO("tlv320aic33", 0x18), }, { I2C_BOARD_INFO("cpld_video", 0x3b), }, { I2C_BOARD_INFO("cdce949", 0x6c), }, }; static struct davinci_i2c_platform_data i2c_pdata = { .bus_freq = 100 /* kHz */, .bus_delay = 0 /* usec */, }; #define VCH2CLK_MASK (BIT_MASK(10) | BIT_MASK(9) | BIT_MASK(8)) #define VCH2CLK_SYSCLK8 (BIT(9)) #define VCH2CLK_AUXCLK (BIT(9) | BIT(8)) #define VCH3CLK_MASK (BIT_MASK(14) | BIT_MASK(13) | BIT_MASK(12)) #define VCH3CLK_SYSCLK8 (BIT(13)) #define VCH3CLK_AUXCLK (BIT(14) | BIT(13)) #define VIDCH2CLK (BIT(10)) #define VIDCH3CLK (BIT(11)) #define VIDCH1CLK (BIT(4)) #define TVP7002_INPUT (BIT(4)) #define TVP5147_INPUT (~BIT(4)) #define VPIF_INPUT_ONE_CHANNEL (BIT(5)) #define VPIF_INPUT_TWO_CHANNEL (~BIT(5)) #define TVP5147_CH0 "tvp514x-0" #define TVP5147_CH1 "tvp514x-1" /* spin lock for updating above registers */ static spinlock_t vpif_reg_lock; static int set_vpif_clock(int mux_mode, int hd) { unsigned long flags; unsigned int value; int val = 0; int err = 0; if (!cpld_client) return -ENXIO; /* disable the clock */ spin_lock_irqsave(&vpif_reg_lock, flags); value = __raw_readl(DAVINCI_SYSMOD_VIRT(SYSMOD_VSCLKDIS)); value |= (VIDCH3CLK | VIDCH2CLK); __raw_writel(value, DAVINCI_SYSMOD_VIRT(SYSMOD_VSCLKDIS)); spin_unlock_irqrestore(&vpif_reg_lock, flags); val = i2c_smbus_read_byte(cpld_client); if (val < 0) return val; if (mux_mode == 1) val &= ~0x40; else val |= 0x40; err = i2c_smbus_write_byte(cpld_client, val); if (err) return err; value = __raw_readl(DAVINCI_SYSMOD_VIRT(SYSMOD_VIDCLKCTL)); value &= ~(VCH2CLK_MASK); value &= ~(VCH3CLK_MASK); if (hd >= 1) value |= (VCH2CLK_SYSCLK8 | VCH3CLK_SYSCLK8); else value |= (VCH2CLK_AUXCLK | VCH3CLK_AUXCLK); __raw_writel(value, DAVINCI_SYSMOD_VIRT(SYSMOD_VIDCLKCTL)); spin_lock_irqsave(&vpif_reg_lock, flags); value = __raw_readl(DAVINCI_SYSMOD_VIRT(SYSMOD_VSCLKDIS)); /* enable the clock */ value &= ~(VIDCH3CLK | VIDCH2CLK); __raw_writel(value, DAVINCI_SYSMOD_VIRT(SYSMOD_VSCLKDIS)); spin_unlock_irqrestore(&vpif_reg_lock, flags); return 0; } static struct vpif_subdev_info dm646x_vpif_subdev[] = { { .name = "adv7343", .board_info = { I2C_BOARD_INFO("adv7343", 0x2a), }, }, { .name = "ths7303", .board_info = { I2C_BOARD_INFO("ths7303", 0x2c), }, }, }; static const char *output[] = { "Composite", "Component", "S-Video", }; static struct vpif_display_config dm646x_vpif_display_config = { .set_clock = set_vpif_clock, .subdevinfo = dm646x_vpif_subdev, .subdev_count = ARRAY_SIZE(dm646x_vpif_subdev), .output = output, .output_count = ARRAY_SIZE(output), .card_name = "DM646x EVM", }; /** * setup_vpif_input_path() * @channel: channel id (0 - CH0, 1 - CH1) * @sub_dev_name: ptr sub device name * * This will set vpif input to capture data from tvp514x or * tvp7002. */ static int setup_vpif_input_path(int channel, const char *sub_dev_name) { int err = 0; int val; /* for channel 1, we don't do anything */ if (channel != 0) return 0; if (!cpld_client) return -ENXIO; val = i2c_smbus_read_byte(cpld_client); if (val < 0) return val; if (!strcmp(sub_dev_name, TVP5147_CH0) || !strcmp(sub_dev_name, TVP5147_CH1)) val &= TVP5147_INPUT; else val |= TVP7002_INPUT; err = i2c_smbus_write_byte(cpld_client, val); if (err) return err; return 0; } /** * setup_vpif_input_channel_mode() * @mux_mode: mux mode. 0 - 1 channel or (1) - 2 channel * * This will setup input mode to one channel (TVP7002) or 2 channel (TVP5147) */ static int setup_vpif_input_channel_mode(int mux_mode) { unsigned long flags; int err = 0; int val; u32 value; if (!cpld_client) return -ENXIO; val = i2c_smbus_read_byte(cpld_client); if (val < 0) return val; spin_lock_irqsave(&vpif_reg_lock, flags); value = __raw_readl(DAVINCI_SYSMOD_VIRT(SYSMOD_VIDCLKCTL)); if (mux_mode) { val &= VPIF_INPUT_TWO_CHANNEL; value |= VIDCH1CLK; } else { val |= VPIF_INPUT_ONE_CHANNEL; value &= ~VIDCH1CLK; } __raw_writel(value, DAVINCI_SYSMOD_VIRT(SYSMOD_VIDCLKCTL)); spin_unlock_irqrestore(&vpif_reg_lock, flags); err = i2c_smbus_write_byte(cpld_client, val); if (err) return err; return 0; } static struct tvp514x_platform_data tvp5146_pdata = { .clk_polarity = 0, .hs_polarity = 1, .vs_polarity = 1 }; #define TVP514X_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL) static struct vpif_subdev_info vpif_capture_sdev_info[] = { { .name = TVP5147_CH0, .board_info = { I2C_BOARD_INFO("tvp5146", 0x5d), .platform_data = &tvp5146_pdata, }, .input = INPUT_CVBS_VI2B, .output = OUTPUT_10BIT_422_EMBEDDED_SYNC, .can_route = 1, .vpif_if = { .if_type = VPIF_IF_BT656, .hd_pol = 1, .vd_pol = 1, .fid_pol = 0, }, }, { .name = TVP5147_CH1, .board_info = { I2C_BOARD_INFO("tvp5146", 0x5c), .platform_data = &tvp5146_pdata, }, .input = INPUT_SVIDEO_VI2C_VI1C, .output = OUTPUT_10BIT_422_EMBEDDED_SYNC, .can_route = 1, .vpif_if = { .if_type = VPIF_IF_BT656, .hd_pol = 1, .vd_pol = 1, .fid_pol = 0, }, }, }; static const struct vpif_input dm6467_ch0_inputs[] = { { .input = { .index = 0, .name = "Composite", .type = V4L2_INPUT_TYPE_CAMERA, .std = TVP514X_STD_ALL, }, .subdev_name = TVP5147_CH0, }, }; static const struct vpif_input dm6467_ch1_inputs[] = { { .input = { .index = 0, .name = "S-Video", .type = V4L2_INPUT_TYPE_CAMERA, .std = TVP514X_STD_ALL, }, .subdev_name = TVP5147_CH1, }, }; static struct vpif_capture_config dm646x_vpif_capture_cfg = { .setup_input_path = setup_vpif_input_path, .setup_input_channel_mode = setup_vpif_input_channel_mode, .subdev_info = vpif_capture_sdev_info, .subdev_count = ARRAY_SIZE(vpif_capture_sdev_info), .chan_config[0] = { .inputs = dm6467_ch0_inputs, .input_count = ARRAY_SIZE(dm6467_ch0_inputs), }, .chan_config[1] = { .inputs = dm6467_ch1_inputs, .input_count = ARRAY_SIZE(dm6467_ch1_inputs), }, }; static void __init evm_init_video(void) { spin_lock_init(&vpif_reg_lock); dm646x_setup_vpif(&dm646x_vpif_display_config, &dm646x_vpif_capture_cfg); } static void __init evm_init_i2c(void) { davinci_init_i2c(&i2c_pdata); i2c_add_driver(&dm6467evm_cpld_driver); i2c_register_board_info(1, i2c_info, ARRAY_SIZE(i2c_info)); evm_init_cpld(); evm_init_video(); } #define CDCE949_XIN_RATE 27000000 /* CDCE949 support - "lpsc" field is overridden to work as clock number */ static struct clk cdce_clk_in = { .name = "cdce_xin", .rate = CDCE949_XIN_RATE, }; static struct clk_lookup cdce_clks[] = { CLK(NULL, "xin", &cdce_clk_in), CLK(NULL, NULL, NULL), }; static void __init cdce_clk_init(void) { struct clk_lookup *c; struct clk *clk; for (c = cdce_clks; c->clk; c++) { clk = c->clk; clkdev_add(c); clk_register(clk); } } #define DM6467T_EVM_REF_FREQ 33000000 static void __init davinci_map_io(void) { dm646x_init(); if (machine_is_davinci_dm6467tevm()) davinci_set_refclk_rate(DM6467T_EVM_REF_FREQ); cdce_clk_init(); } static struct davinci_uart_config uart_config __initdata = { .enabled_uarts = (1 << 0), }; #define DM646X_EVM_PHY_ID "davinci_mdio-0:01" /* * The following EDMA channels/slots are not being used by drivers (for * example: Timer, GPIO, UART events etc) on dm646x, hence they are being * reserved for codecs on the DSP side. */ static const s16 dm646x_dma_rsv_chans[][2] = { /* (offset, number) */ { 0, 4}, {13, 3}, {24, 4}, {30, 2}, {54, 3}, {-1, -1} }; static const s16 dm646x_dma_rsv_slots[][2] = { /* (offset, number) */ { 0, 4}, {13, 3}, {24, 4}, {30, 2}, {54, 3}, {128, 384}, {-1, -1} }; static struct edma_rsv_info dm646x_edma_rsv[] = { { .rsv_chans = dm646x_dma_rsv_chans, .rsv_slots = dm646x_dma_rsv_slots, }, }; static __init void evm_init(void) { struct davinci_soc_info *soc_info = &davinci_soc_info; evm_init_i2c(); davinci_serial_init(&uart_config); dm646x_init_mcasp0(&dm646x_evm_snd_data[0]); dm646x_init_mcasp1(&dm646x_evm_snd_data[1]); if (machine_is_davinci_dm6467tevm()) davinci_nand_data.timing = &dm6467tevm_nandflash_timing; platform_device_register(&davinci_nand_device); dm646x_init_edma(dm646x_edma_rsv); if (HAS_ATA) davinci_init_ide(); soc_info->emac_pdata->phy_id = DM646X_EVM_PHY_ID; } MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM") .atag_offset = 0x100, .map_io = davinci_map_io, .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = evm_init, .dma_zone_size = SZ_128M, .restart = davinci_restart, MACHINE_END MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") .atag_offset = 0x100, .map_io = davinci_map_io, .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = evm_init, .dma_zone_size = SZ_128M, .restart = davinci_restart, MACHINE_END
gpl-2.0
silence-star/android_kernel_nubia_NX503A
kernel/wait.c
4726
8441
/* * Generic waiting primitives. * * (C) 2004 William Irwin, Oracle */ #include <linux/init.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/wait.h> #include <linux/hash.h> void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key) { spin_lock_init(&q->lock); lockdep_set_class_and_name(&q->lock, key, name); INIT_LIST_HEAD(&q->task_list); } EXPORT_SYMBOL(__init_waitqueue_head); void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; wait->flags &= ~WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); __add_wait_queue(q, wait); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(add_wait_queue); void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; wait->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); __add_wait_queue_tail(q, wait); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(add_wait_queue_exclusive); void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; spin_lock_irqsave(&q->lock, flags); __remove_wait_queue(q, wait); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(remove_wait_queue); /* * Note: we use "set_current_state()" _after_ the wait-queue add, * because we need a memory barrier there on SMP, so that any * wake-function that tests for the wait-queue being active * will be guaranteed to see waitqueue addition _or_ subsequent * tests in this thread will see the wakeup having taken place. * * The spin_unlock() itself is semi-permeable and only protects * one way (it only protects stuff inside the critical region and * stops them from bleeding out - it would still allow subsequent * loads to move into the critical region). */ void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) { unsigned long flags; wait->flags &= ~WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) __add_wait_queue(q, wait); set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(prepare_to_wait); void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) { unsigned long flags; wait->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) __add_wait_queue_tail(q, wait); set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(prepare_to_wait_exclusive); /** * finish_wait - clean up after waiting in a queue * @q: waitqueue waited on * @wait: wait descriptor * * Sets current thread back to running state and removes * the wait descriptor from the given waitqueue if still * queued. */ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; __set_current_state(TASK_RUNNING); /* * We can check for list emptiness outside the lock * IFF: * - we use the "careful" check that verifies both * the next and prev pointers, so that there cannot * be any half-pending updates in progress on other * CPU's that we haven't seen yet (and that might * still change the stack area. * and * - all other users take the lock (ie we can only * have _one_ other CPU that looks at or modifies * the list). */ if (!list_empty_careful(&wait->task_list)) { spin_lock_irqsave(&q->lock, flags); list_del_init(&wait->task_list); spin_unlock_irqrestore(&q->lock, flags); } } EXPORT_SYMBOL(finish_wait); /** * abort_exclusive_wait - abort exclusive waiting in a queue * @q: waitqueue waited on * @wait: wait descriptor * @mode: runstate of the waiter to be woken * @key: key to identify a wait bit queue or %NULL * * Sets current thread back to running state and removes * the wait descriptor from the given waitqueue if still * queued. * * Wakes up the next waiter if the caller is concurrently * woken up through the queue. * * This prevents waiter starvation where an exclusive waiter * aborts and is woken up concurrently and no one wakes up * the next waiter. */ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key) { unsigned long flags; __set_current_state(TASK_RUNNING); spin_lock_irqsave(&q->lock, flags); if (!list_empty(&wait->task_list)) list_del_init(&wait->task_list); else if (waitqueue_active(q)) __wake_up_locked_key(q, mode, key); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(abort_exclusive_wait); int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) { int ret = default_wake_function(wait, mode, sync, key); if (ret) list_del_init(&wait->task_list); return ret; } EXPORT_SYMBOL(autoremove_wake_function); int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) { struct wait_bit_key *key = arg; struct wait_bit_queue *wait_bit = container_of(wait, struct wait_bit_queue, wait); if (wait_bit->key.flags != key->flags || wait_bit->key.bit_nr != key->bit_nr || test_bit(key->bit_nr, key->flags)) return 0; else return autoremove_wake_function(wait, mode, sync, key); } EXPORT_SYMBOL(wake_bit_function); /* * To allow interruptible waiting and asynchronous (i.e. nonblocking) * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are * permitted return codes. Nonzero return codes halt waiting and return. */ int __sched __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, int (*action)(void *), unsigned mode) { int ret = 0; do { prepare_to_wait(wq, &q->wait, mode); if (test_bit(q->key.bit_nr, q->key.flags)) ret = (*action)(q->key.flags); } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); finish_wait(wq, &q->wait); return ret; } EXPORT_SYMBOL(__wait_on_bit); int __sched out_of_line_wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) { wait_queue_head_t *wq = bit_waitqueue(word, bit); DEFINE_WAIT_BIT(wait, word, bit); return __wait_on_bit(wq, &wait, action, mode); } EXPORT_SYMBOL(out_of_line_wait_on_bit); int __sched __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, int (*action)(void *), unsigned mode) { do { int ret; prepare_to_wait_exclusive(wq, &q->wait, mode); if (!test_bit(q->key.bit_nr, q->key.flags)) continue; ret = action(q->key.flags); if (!ret) continue; abort_exclusive_wait(wq, &q->wait, mode, &q->key); return ret; } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); finish_wait(wq, &q->wait); return 0; } EXPORT_SYMBOL(__wait_on_bit_lock); int __sched out_of_line_wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode) { wait_queue_head_t *wq = bit_waitqueue(word, bit); DEFINE_WAIT_BIT(wait, word, bit); return __wait_on_bit_lock(wq, &wait, action, mode); } EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) { struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); if (waitqueue_active(wq)) __wake_up(wq, TASK_NORMAL, 1, &key); } EXPORT_SYMBOL(__wake_up_bit); /** * wake_up_bit - wake up a waiter on a bit * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on * * There is a standard hashed waitqueue table for generic use. This * is the part of the hashtable's accessor API that wakes up waiters * on a bit. For instance, if one were to have waiters on a bitflag, * one would call wake_up_bit() after clearing the bit. * * In order for this to function properly, as it uses waitqueue_active() * internally, some kind of memory barrier must be done prior to calling * this. Typically, this will be smp_mb__after_clear_bit(), but in some * cases where bitflags are manipulated non-atomically under a lock, one * may need to use a less regular barrier, such fs/inode.c's smp_mb(), * because spin_unlock() does not guarantee a memory barrier. */ void wake_up_bit(void *word, int bit) { __wake_up_bit(bit_waitqueue(word, bit), word, bit); } EXPORT_SYMBOL(wake_up_bit); wait_queue_head_t *bit_waitqueue(void *word, int bit) { const int shift = BITS_PER_LONG == 32 ? 5 : 6; const struct zone *zone = page_zone(virt_to_page(word)); unsigned long val = (unsigned long)word << shift | bit; return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; } EXPORT_SYMBOL(bit_waitqueue);
gpl-2.0
hyperion70/j608_kernel
drivers/ssb/driver_extif.c
4726
5127
/* * Sonics Silicon Backplane * Broadcom EXTIF core driver * * Copyright 2005, Broadcom Corporation * Copyright 2006, 2007, Michael Buesch <m@bues.ch> * Copyright 2006, 2007, Felix Fietkau <nbd@openwrt.org> * Copyright 2007, Aurelien Jarno <aurelien@aurel32.net> * * Licensed under the GNU/GPL. See COPYING for details. */ #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/serial_reg.h> #include "ssb_private.h" static inline u32 extif_read32(struct ssb_extif *extif, u16 offset) { return ssb_read32(extif->dev, offset); } static inline void extif_write32(struct ssb_extif *extif, u16 offset, u32 value) { ssb_write32(extif->dev, offset, value); } static inline u32 extif_write32_masked(struct ssb_extif *extif, u16 offset, u32 mask, u32 value) { value &= mask; value |= extif_read32(extif, offset) & ~mask; extif_write32(extif, offset, value); return value; } #ifdef CONFIG_SSB_SERIAL static bool serial_exists(u8 *regs) { u8 save_mcr, msr = 0; if (regs) { save_mcr = regs[UART_MCR]; regs[UART_MCR] = (UART_MCR_LOOP | UART_MCR_OUT2 | UART_MCR_RTS); msr = regs[UART_MSR] & (UART_MSR_DCD | UART_MSR_RI | UART_MSR_CTS | UART_MSR_DSR); regs[UART_MCR] = save_mcr; } return (msr == (UART_MSR_DCD | UART_MSR_CTS)); } int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports) { u32 i, nr_ports = 0; /* Disable GPIO interrupt initially */ extif_write32(extif, SSB_EXTIF_GPIO_INTPOL, 0); extif_write32(extif, SSB_EXTIF_GPIO_INTMASK, 0); for (i = 0; i < 2; i++) { void __iomem *uart_regs; uart_regs = ioremap_nocache(SSB_EUART, 16); if (uart_regs) { uart_regs += (i * 8); if (serial_exists(uart_regs) && ports) { extif_write32(extif, SSB_EXTIF_GPIO_INTMASK, 2); nr_ports++; ports[i].regs = uart_regs; ports[i].irq = 2; ports[i].baud_base = 13500000; ports[i].reg_shift = 0; } iounmap(uart_regs); } } return nr_ports; } #endif /* CONFIG_SSB_SERIAL */ void ssb_extif_timing_init(struct ssb_extif *extif, unsigned long ns) { u32 tmp; /* Initialize extif so we can get to the LEDs and external UART */ extif_write32(extif, SSB_EXTIF_PROG_CFG, SSB_EXTCFG_EN); /* Set timing for the flash */ tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; tmp |= DIV_ROUND_UP(40, ns) << SSB_PROG_WCNT_1_SHIFT; tmp |= DIV_ROUND_UP(120, ns); extif_write32(extif, SSB_EXTIF_PROG_WAITCNT, tmp); /* Set programmable interface timing for external uart */ tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; tmp |= DIV_ROUND_UP(20, ns) << SSB_PROG_WCNT_2_SHIFT; tmp |= DIV_ROUND_UP(100, ns) << SSB_PROG_WCNT_1_SHIFT; tmp |= DIV_ROUND_UP(120, ns); extif_write32(extif, SSB_EXTIF_PROG_WAITCNT, tmp); } void ssb_extif_get_clockcontrol(struct ssb_extif *extif, u32 *pll_type, u32 *n, u32 *m) { *pll_type = SSB_PLLTYPE_1; *n = extif_read32(extif, SSB_EXTIF_CLOCK_N); *m = extif_read32(extif, SSB_EXTIF_CLOCK_SB); } u32 ssb_extif_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks) { struct ssb_extif *extif = bcm47xx_wdt_get_drvdata(wdt); return ssb_extif_watchdog_timer_set(extif, ticks); } u32 ssb_extif_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms) { struct ssb_extif *extif = bcm47xx_wdt_get_drvdata(wdt); u32 ticks = (SSB_EXTIF_WATCHDOG_CLK / 1000) * ms; ticks = ssb_extif_watchdog_timer_set(extif, ticks); return (ticks * 1000) / SSB_EXTIF_WATCHDOG_CLK; } u32 ssb_extif_watchdog_timer_set(struct ssb_extif *extif, u32 ticks) { if (ticks > SSB_EXTIF_WATCHDOG_MAX_TIMER) ticks = SSB_EXTIF_WATCHDOG_MAX_TIMER; extif_write32(extif, SSB_EXTIF_WATCHDOG, ticks); return ticks; } void ssb_extif_init(struct ssb_extif *extif) { if (!extif->dev) return; /* We don't have a Extif core */ spin_lock_init(&extif->gpio_lock); } u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask) { return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask; } u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value) { unsigned long flags; u32 res = 0; spin_lock_irqsave(&extif->gpio_lock, flags); res = extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0), mask, value); spin_unlock_irqrestore(&extif->gpio_lock, flags); return res; } u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value) { unsigned long flags; u32 res = 0; spin_lock_irqsave(&extif->gpio_lock, flags); res = extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0), mask, value); spin_unlock_irqrestore(&extif->gpio_lock, flags); return res; } u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value) { unsigned long flags; u32 res = 0; spin_lock_irqsave(&extif->gpio_lock, flags); res = extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value); spin_unlock_irqrestore(&extif->gpio_lock, flags); return res; } u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value) { unsigned long flags; u32 res = 0; spin_lock_irqsave(&extif->gpio_lock, flags); res = extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value); spin_unlock_irqrestore(&extif->gpio_lock, flags); return res; }
gpl-2.0
ystk/sched-deadline
drivers/net/ethernet/nuvoton/w90p910_ether.c
4982
26427
/* * Copyright (c) 2008-2009 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gfp.h> #define DRV_MODULE_NAME "w90p910-emc" #define DRV_MODULE_VERSION "0.1" /* Ethernet MAC Registers */ #define REG_CAMCMR 0x00 #define REG_CAMEN 0x04 #define REG_CAMM_BASE 0x08 #define REG_CAML_BASE 0x0c #define REG_TXDLSA 0x88 #define REG_RXDLSA 0x8C #define REG_MCMDR 0x90 #define REG_MIID 0x94 #define REG_MIIDA 0x98 #define REG_FFTCR 0x9C #define REG_TSDR 0xa0 #define REG_RSDR 0xa4 #define REG_DMARFC 0xa8 #define REG_MIEN 0xac #define REG_MISTA 0xb0 #define REG_CTXDSA 0xcc #define REG_CTXBSA 0xd0 #define REG_CRXDSA 0xd4 #define REG_CRXBSA 0xd8 /* mac controller bit */ #define MCMDR_RXON 0x01 #define MCMDR_ACP (0x01 << 3) #define MCMDR_SPCRC (0x01 << 5) #define MCMDR_TXON (0x01 << 8) #define MCMDR_FDUP (0x01 << 18) #define MCMDR_ENMDC (0x01 << 19) #define MCMDR_OPMOD (0x01 << 20) #define SWR (0x01 << 24) /* cam command regiser */ #define CAMCMR_AUP 0x01 #define CAMCMR_AMP (0x01 << 1) #define CAMCMR_ABP (0x01 << 2) #define CAMCMR_CCAM (0x01 << 3) #define CAMCMR_ECMP (0x01 << 4) #define CAM0EN 0x01 /* mac mii controller bit */ #define MDCCR (0x0a << 20) #define PHYAD (0x01 << 8) #define PHYWR (0x01 << 16) #define PHYBUSY (0x01 << 17) #define PHYPRESP (0x01 << 18) #define CAM_ENTRY_SIZE 0x08 /* rx and tx status */ #define TXDS_TXCP (0x01 << 19) #define RXDS_CRCE (0x01 << 17) #define RXDS_PTLE (0x01 << 19) #define RXDS_RXGD (0x01 << 20) #define RXDS_ALIE (0x01 << 21) #define RXDS_RP (0x01 << 22) /* mac interrupt status*/ #define MISTA_EXDEF (0x01 << 19) #define MISTA_TXBERR (0x01 << 24) #define MISTA_TDU (0x01 << 23) #define MISTA_RDU (0x01 << 10) #define MISTA_RXBERR (0x01 << 11) #define ENSTART 0x01 #define ENRXINTR 0x01 #define ENRXGD (0x01 << 4) #define ENRXBERR (0x01 << 11) #define ENTXINTR (0x01 << 16) #define ENTXCP (0x01 << 18) #define ENTXABT (0x01 << 21) #define ENTXBERR (0x01 << 24) #define ENMDC (0x01 << 19) #define PHYBUSY (0x01 << 17) #define MDCCR_VAL 0xa00000 /* rx and tx owner bit */ #define RX_OWEN_DMA (0x01 << 31) #define RX_OWEN_CPU (~(0x03 << 30)) #define TX_OWEN_DMA (0x01 << 31) #define TX_OWEN_CPU (~(0x01 << 31)) /* tx frame desc controller bit */ #define MACTXINTEN 0x04 #define CRCMODE 0x02 #define PADDINGMODE 0x01 /* fftcr controller bit */ #define TXTHD (0x03 << 8) #define BLENGTH (0x01 << 20) /* global setting for driver */ #define RX_DESC_SIZE 50 #define TX_DESC_SIZE 10 #define MAX_RBUFF_SZ 0x600 #define MAX_TBUFF_SZ 0x600 #define TX_TIMEOUT (HZ/2) #define DELAY 1000 #define CAM0 0x0 static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg); struct w90p910_rxbd { unsigned int sl; unsigned int buffer; unsigned int reserved; unsigned int next; }; struct w90p910_txbd { unsigned int mode; unsigned int buffer; unsigned int sl; unsigned int next; }; struct recv_pdesc { struct w90p910_rxbd desclist[RX_DESC_SIZE]; char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ]; }; struct tran_pdesc { struct w90p910_txbd desclist[TX_DESC_SIZE]; char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ]; }; struct w90p910_ether { struct recv_pdesc *rdesc; struct tran_pdesc *tdesc; dma_addr_t rdesc_phys; dma_addr_t tdesc_phys; struct net_device_stats stats; struct platform_device *pdev; struct resource *res; struct sk_buff *skb; struct clk *clk; struct clk *rmiiclk; struct mii_if_info mii; struct timer_list check_timer; void __iomem *reg; int rxirq; int txirq; unsigned int cur_tx; unsigned int cur_rx; unsigned int finish_tx; unsigned int rx_packets; unsigned int rx_bytes; unsigned int start_tx_ptr; unsigned int start_rx_ptr; unsigned int linkflag; }; static void update_linkspeed_register(struct net_device *dev, unsigned int speed, unsigned int duplex) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); if (speed == SPEED_100) { /* 100 full/half duplex */ if (duplex == DUPLEX_FULL) { val |= (MCMDR_OPMOD | MCMDR_FDUP); } else { val |= MCMDR_OPMOD; val &= ~MCMDR_FDUP; } } else { /* 10 full/half duplex */ if (duplex == DUPLEX_FULL) { val |= MCMDR_FDUP; val &= ~MCMDR_OPMOD; } else { val &= ~(MCMDR_FDUP | MCMDR_OPMOD); } } __raw_writel(val, ether->reg + REG_MCMDR); } static void update_linkspeed(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; unsigned int bmsr, bmcr, lpa, speed, duplex; pdev = ether->pdev; if (!mii_link_ok(&ether->mii)) { ether->linkflag = 0x0; netif_carrier_off(dev); dev_warn(&pdev->dev, "%s: Link down.\n", dev->name); return; } if (ether->linkflag == 1) return; bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR); bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR); if (bmcr & BMCR_ANENABLE) { if (!(bmsr & BMSR_ANEGCOMPLETE)) return; lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA); if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100; else speed = SPEED_10; if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL; else duplex = DUPLEX_HALF; } else { speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } update_linkspeed_register(dev, speed, duplex); dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); ether->linkflag = 0x01; netif_carrier_on(dev); } static void w90p910_check_link(unsigned long dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct w90p910_ether *ether = netdev_priv(dev); update_linkspeed(dev); mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000)); } static void w90p910_write_cam(struct net_device *dev, unsigned int x, unsigned char *pval) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int msw, lsw; msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3]; lsw = (pval[4] << 24) | (pval[5] << 16); __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE); __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE); } static int w90p910_init_desc(struct net_device *dev) { struct w90p910_ether *ether; struct w90p910_txbd *tdesc; struct w90p910_rxbd *rdesc; struct platform_device *pdev; unsigned int i; ether = netdev_priv(dev); pdev = ether->pdev; ether->tdesc = (struct tran_pdesc *) dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc), &ether->tdesc_phys, GFP_KERNEL); if (!ether->tdesc) { dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n"); return -ENOMEM; } ether->rdesc = (struct recv_pdesc *) dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc), &ether->rdesc_phys, GFP_KERNEL); if (!ether->rdesc) { dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n"); dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), ether->tdesc, ether->tdesc_phys); return -ENOMEM; } for (i = 0; i < TX_DESC_SIZE; i++) { unsigned int offset; tdesc = &(ether->tdesc->desclist[i]); if (i == TX_DESC_SIZE - 1) offset = offsetof(struct tran_pdesc, desclist[0]); else offset = offsetof(struct tran_pdesc, desclist[i + 1]); tdesc->next = ether->tdesc_phys + offset; tdesc->buffer = ether->tdesc_phys + offsetof(struct tran_pdesc, tran_buf[i]); tdesc->sl = 0; tdesc->mode = 0; } ether->start_tx_ptr = ether->tdesc_phys; for (i = 0; i < RX_DESC_SIZE; i++) { unsigned int offset; rdesc = &(ether->rdesc->desclist[i]); if (i == RX_DESC_SIZE - 1) offset = offsetof(struct recv_pdesc, desclist[0]); else offset = offsetof(struct recv_pdesc, desclist[i + 1]); rdesc->next = ether->rdesc_phys + offset; rdesc->sl = RX_OWEN_DMA; rdesc->buffer = ether->rdesc_phys + offsetof(struct recv_pdesc, recv_buf[i]); } ether->start_rx_ptr = ether->rdesc_phys; return 0; } static void w90p910_set_fifo_threshold(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = TXTHD | BLENGTH; __raw_writel(val, ether->reg + REG_FFTCR); } static void w90p910_return_default_idle(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); val |= SWR; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_trigger_rx(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); __raw_writel(ENSTART, ether->reg + REG_RSDR); } static void w90p910_trigger_tx(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); __raw_writel(ENSTART, ether->reg + REG_TSDR); } static void w90p910_enable_mac_interrupt(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP; val |= ENTXBERR | ENRXBERR | ENTXABT; __raw_writel(val, ether->reg + REG_MIEN); } static void w90p910_get_and_clear_int(struct net_device *dev, unsigned int *val) { struct w90p910_ether *ether = netdev_priv(dev); *val = __raw_readl(ether->reg + REG_MISTA); __raw_writel(*val, ether->reg + REG_MISTA); } static void w90p910_set_global_maccmd(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_enable_cam(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; w90p910_write_cam(dev, CAM0, dev->dev_addr); val = __raw_readl(ether->reg + REG_CAMEN); val |= CAM0EN; __raw_writel(val, ether->reg + REG_CAMEN); } static void w90p910_enable_cam_command(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP; __raw_writel(val, ether->reg + REG_CAMCMR); } static void w90p910_enable_tx(struct net_device *dev, unsigned int enable) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); if (enable) val |= MCMDR_TXON; else val &= ~MCMDR_TXON; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_enable_rx(struct net_device *dev, unsigned int enable) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); if (enable) val |= MCMDR_RXON; else val &= ~MCMDR_RXON; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_set_curdest(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA); __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA); } static void w90p910_reset_mac(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); w90p910_enable_tx(dev, 0); w90p910_enable_rx(dev, 0); w90p910_set_fifo_threshold(dev); w90p910_return_default_idle(dev); if (!netif_queue_stopped(dev)) netif_stop_queue(dev); w90p910_init_desc(dev); dev->trans_start = jiffies; /* prevent tx timeout */ ether->cur_tx = 0x0; ether->finish_tx = 0x0; ether->cur_rx = 0x0; w90p910_set_curdest(dev); w90p910_enable_cam(dev); w90p910_enable_cam_command(dev); w90p910_enable_mac_interrupt(dev); w90p910_enable_tx(dev, 1); w90p910_enable_rx(dev, 1); w90p910_trigger_tx(dev); w90p910_trigger_rx(dev); dev->trans_start = jiffies; /* prevent tx timeout */ if (netif_queue_stopped(dev)) netif_wake_queue(dev); } static void w90p910_mdio_write(struct net_device *dev, int phy_id, int reg, int data) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; unsigned int val, i; pdev = ether->pdev; __raw_writel(data, ether->reg + REG_MIID); val = (phy_id << 0x08) | reg; val |= PHYBUSY | PHYWR | MDCCR_VAL; __raw_writel(val, ether->reg + REG_MIIDA); for (i = 0; i < DELAY; i++) { if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) break; } if (i == DELAY) dev_warn(&pdev->dev, "mdio write timed out\n"); } static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; unsigned int val, i, data; pdev = ether->pdev; val = (phy_id << 0x08) | reg; val |= PHYBUSY | MDCCR_VAL; __raw_writel(val, ether->reg + REG_MIIDA); for (i = 0; i < DELAY; i++) { if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) break; } if (i == DELAY) { dev_warn(&pdev->dev, "mdio read timed out\n"); data = 0xffff; } else { data = __raw_readl(ether->reg + REG_MIID); } return data; } static int w90p910_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *address = addr; if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, address->sa_data, dev->addr_len); w90p910_write_cam(dev, CAM0, dev->dev_addr); return 0; } static int w90p910_ether_close(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; pdev = ether->pdev; dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc), ether->rdesc, ether->rdesc_phys); dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), ether->tdesc, ether->tdesc_phys); netif_stop_queue(dev); del_timer_sync(&ether->check_timer); clk_disable(ether->rmiiclk); clk_disable(ether->clk); free_irq(ether->txirq, dev); free_irq(ether->rxirq, dev); return 0; } static struct net_device_stats *w90p910_ether_stats(struct net_device *dev) { struct w90p910_ether *ether; ether = netdev_priv(dev); return &ether->stats; } static int w90p910_send_frame(struct net_device *dev, unsigned char *data, int length) { struct w90p910_ether *ether; struct w90p910_txbd *txbd; struct platform_device *pdev; unsigned char *buffer; ether = netdev_priv(dev); pdev = ether->pdev; txbd = &ether->tdesc->desclist[ether->cur_tx]; buffer = ether->tdesc->tran_buf[ether->cur_tx]; if (length > 1514) { dev_err(&pdev->dev, "send data %d bytes, check it\n", length); length = 1514; } txbd->sl = length & 0xFFFF; memcpy(buffer, data, length); txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN; w90p910_enable_tx(dev, 1); w90p910_trigger_tx(dev); if (++ether->cur_tx >= TX_DESC_SIZE) ether->cur_tx = 0; txbd = &ether->tdesc->desclist[ether->cur_tx]; if (txbd->mode & TX_OWEN_DMA) netif_stop_queue(dev); return 0; } static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); if (!(w90p910_send_frame(dev, skb->data, skb->len))) { ether->skb = skb; dev_kfree_skb_irq(skb); return 0; } return -EAGAIN; } static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id) { struct w90p910_ether *ether; struct w90p910_txbd *txbd; struct platform_device *pdev; struct net_device *dev; unsigned int cur_entry, entry, status; dev = dev_id; ether = netdev_priv(dev); pdev = ether->pdev; w90p910_get_and_clear_int(dev, &status); cur_entry = __raw_readl(ether->reg + REG_CTXDSA); entry = ether->tdesc_phys + offsetof(struct tran_pdesc, desclist[ether->finish_tx]); while (entry != cur_entry) { txbd = &ether->tdesc->desclist[ether->finish_tx]; if (++ether->finish_tx >= TX_DESC_SIZE) ether->finish_tx = 0; if (txbd->sl & TXDS_TXCP) { ether->stats.tx_packets++; ether->stats.tx_bytes += txbd->sl & 0xFFFF; } else { ether->stats.tx_errors++; } txbd->sl = 0x0; txbd->mode = 0x0; if (netif_queue_stopped(dev)) netif_wake_queue(dev); entry = ether->tdesc_phys + offsetof(struct tran_pdesc, desclist[ether->finish_tx]); } if (status & MISTA_EXDEF) { dev_err(&pdev->dev, "emc defer exceed interrupt\n"); } else if (status & MISTA_TXBERR) { dev_err(&pdev->dev, "emc bus error interrupt\n"); w90p910_reset_mac(dev); } else if (status & MISTA_TDU) { if (netif_queue_stopped(dev)) netif_wake_queue(dev); } return IRQ_HANDLED; } static void netdev_rx(struct net_device *dev) { struct w90p910_ether *ether; struct w90p910_rxbd *rxbd; struct platform_device *pdev; struct sk_buff *skb; unsigned char *data; unsigned int length, status, val, entry; ether = netdev_priv(dev); pdev = ether->pdev; rxbd = &ether->rdesc->desclist[ether->cur_rx]; do { val = __raw_readl(ether->reg + REG_CRXDSA); entry = ether->rdesc_phys + offsetof(struct recv_pdesc, desclist[ether->cur_rx]); if (val == entry) break; status = rxbd->sl; length = status & 0xFFFF; if (status & RXDS_RXGD) { data = ether->rdesc->recv_buf[ether->cur_rx]; skb = netdev_alloc_skb(dev, length + 2); if (!skb) { dev_err(&pdev->dev, "get skb buffer error\n"); ether->stats.rx_dropped++; return; } skb_reserve(skb, 2); skb_put(skb, length); skb_copy_to_linear_data(skb, data, length); skb->protocol = eth_type_trans(skb, dev); ether->stats.rx_packets++; ether->stats.rx_bytes += length; netif_rx(skb); } else { ether->stats.rx_errors++; if (status & RXDS_RP) { dev_err(&pdev->dev, "rx runt err\n"); ether->stats.rx_length_errors++; } else if (status & RXDS_CRCE) { dev_err(&pdev->dev, "rx crc err\n"); ether->stats.rx_crc_errors++; } else if (status & RXDS_ALIE) { dev_err(&pdev->dev, "rx aligment err\n"); ether->stats.rx_frame_errors++; } else if (status & RXDS_PTLE) { dev_err(&pdev->dev, "rx longer err\n"); ether->stats.rx_over_errors++; } } rxbd->sl = RX_OWEN_DMA; rxbd->reserved = 0x0; if (++ether->cur_rx >= RX_DESC_SIZE) ether->cur_rx = 0; rxbd = &ether->rdesc->desclist[ether->cur_rx]; } while (1); } static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id) { struct net_device *dev; struct w90p910_ether *ether; struct platform_device *pdev; unsigned int status; dev = dev_id; ether = netdev_priv(dev); pdev = ether->pdev; w90p910_get_and_clear_int(dev, &status); if (status & MISTA_RDU) { netdev_rx(dev); w90p910_trigger_rx(dev); return IRQ_HANDLED; } else if (status & MISTA_RXBERR) { dev_err(&pdev->dev, "emc rx bus error\n"); w90p910_reset_mac(dev); } netdev_rx(dev); return IRQ_HANDLED; } static int w90p910_ether_open(struct net_device *dev) { struct w90p910_ether *ether; struct platform_device *pdev; ether = netdev_priv(dev); pdev = ether->pdev; w90p910_reset_mac(dev); w90p910_set_fifo_threshold(dev); w90p910_set_curdest(dev); w90p910_enable_cam(dev); w90p910_enable_cam_command(dev); w90p910_enable_mac_interrupt(dev); w90p910_set_global_maccmd(dev); w90p910_enable_rx(dev, 1); clk_enable(ether->rmiiclk); clk_enable(ether->clk); ether->rx_packets = 0x0; ether->rx_bytes = 0x0; if (request_irq(ether->txirq, w90p910_tx_interrupt, 0x0, pdev->name, dev)) { dev_err(&pdev->dev, "register irq tx failed\n"); return -EAGAIN; } if (request_irq(ether->rxirq, w90p910_rx_interrupt, 0x0, pdev->name, dev)) { dev_err(&pdev->dev, "register irq rx failed\n"); free_irq(ether->txirq, dev); return -EAGAIN; } mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000)); netif_start_queue(dev); w90p910_trigger_rx(dev); dev_info(&pdev->dev, "%s is OPENED\n", dev->name); return 0; } static void w90p910_ether_set_multicast_list(struct net_device *dev) { struct w90p910_ether *ether; unsigned int rx_mode; ether = netdev_priv(dev); if (dev->flags & IFF_PROMISC) rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; else rx_mode = CAMCMR_ECMP | CAMCMR_ABP; __raw_writel(rx_mode, ether->reg + REG_CAMCMR); } static int w90p910_ether_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct w90p910_ether *ether = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(ifr); return generic_mii_ioctl(&ether->mii, data, cmd, NULL); } static void w90p910_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRV_MODULE_NAME); strcpy(info->version, DRV_MODULE_VERSION); } static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct w90p910_ether *ether = netdev_priv(dev); return mii_ethtool_gset(&ether->mii, cmd); } static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct w90p910_ether *ether = netdev_priv(dev); return mii_ethtool_sset(&ether->mii, cmd); } static int w90p910_nway_reset(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); return mii_nway_restart(&ether->mii); } static u32 w90p910_get_link(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); return mii_link_ok(&ether->mii); } static const struct ethtool_ops w90p910_ether_ethtool_ops = { .get_settings = w90p910_get_settings, .set_settings = w90p910_set_settings, .get_drvinfo = w90p910_get_drvinfo, .nway_reset = w90p910_nway_reset, .get_link = w90p910_get_link, }; static const struct net_device_ops w90p910_ether_netdev_ops = { .ndo_open = w90p910_ether_open, .ndo_stop = w90p910_ether_close, .ndo_start_xmit = w90p910_ether_start_xmit, .ndo_get_stats = w90p910_ether_stats, .ndo_set_rx_mode = w90p910_ether_set_multicast_list, .ndo_set_mac_address = w90p910_set_mac_address, .ndo_do_ioctl = w90p910_ether_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, }; static void __init get_mac_address(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; char addr[6]; pdev = ether->pdev; addr[0] = 0x00; addr[1] = 0x02; addr[2] = 0xac; addr[3] = 0x55; addr[4] = 0x88; addr[5] = 0xa8; if (is_valid_ether_addr(addr)) memcpy(dev->dev_addr, &addr, 0x06); else dev_err(&pdev->dev, "invalid mac address\n"); } static int w90p910_ether_setup(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); ether_setup(dev); dev->netdev_ops = &w90p910_ether_netdev_ops; dev->ethtool_ops = &w90p910_ether_ethtool_ops; dev->tx_queue_len = 16; dev->dma = 0x0; dev->watchdog_timeo = TX_TIMEOUT; get_mac_address(dev); ether->cur_tx = 0x0; ether->cur_rx = 0x0; ether->finish_tx = 0x0; ether->linkflag = 0x0; ether->mii.phy_id = 0x01; ether->mii.phy_id_mask = 0x1f; ether->mii.reg_num_mask = 0x1f; ether->mii.dev = dev; ether->mii.mdio_read = w90p910_mdio_read; ether->mii.mdio_write = w90p910_mdio_write; setup_timer(&ether->check_timer, w90p910_check_link, (unsigned long)dev); return 0; } static int __devinit w90p910_ether_probe(struct platform_device *pdev) { struct w90p910_ether *ether; struct net_device *dev; int error; dev = alloc_etherdev(sizeof(struct w90p910_ether)); if (!dev) return -ENOMEM; ether = netdev_priv(dev); ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (ether->res == NULL) { dev_err(&pdev->dev, "failed to get I/O memory\n"); error = -ENXIO; goto failed_free; } if (!request_mem_region(ether->res->start, resource_size(ether->res), pdev->name)) { dev_err(&pdev->dev, "failed to request I/O memory\n"); error = -EBUSY; goto failed_free; } ether->reg = ioremap(ether->res->start, resource_size(ether->res)); if (ether->reg == NULL) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -ENXIO; goto failed_free_mem; } ether->txirq = platform_get_irq(pdev, 0); if (ether->txirq < 0) { dev_err(&pdev->dev, "failed to get ether tx irq\n"); error = -ENXIO; goto failed_free_io; } ether->rxirq = platform_get_irq(pdev, 1); if (ether->rxirq < 0) { dev_err(&pdev->dev, "failed to get ether rx irq\n"); error = -ENXIO; goto failed_free_txirq; } platform_set_drvdata(pdev, dev); ether->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(ether->clk)) { dev_err(&pdev->dev, "failed to get ether clock\n"); error = PTR_ERR(ether->clk); goto failed_free_rxirq; } ether->rmiiclk = clk_get(&pdev->dev, "RMII"); if (IS_ERR(ether->rmiiclk)) { dev_err(&pdev->dev, "failed to get ether clock\n"); error = PTR_ERR(ether->rmiiclk); goto failed_put_clk; } ether->pdev = pdev; w90p910_ether_setup(dev); error = register_netdev(dev); if (error != 0) { dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n"); error = -ENODEV; goto failed_put_rmiiclk; } return 0; failed_put_rmiiclk: clk_put(ether->rmiiclk); failed_put_clk: clk_put(ether->clk); failed_free_rxirq: free_irq(ether->rxirq, pdev); platform_set_drvdata(pdev, NULL); failed_free_txirq: free_irq(ether->txirq, pdev); failed_free_io: iounmap(ether->reg); failed_free_mem: release_mem_region(ether->res->start, resource_size(ether->res)); failed_free: free_netdev(dev); return error; } static int __devexit w90p910_ether_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct w90p910_ether *ether = netdev_priv(dev); unregister_netdev(dev); clk_put(ether->rmiiclk); clk_put(ether->clk); iounmap(ether->reg); release_mem_region(ether->res->start, resource_size(ether->res)); free_irq(ether->txirq, dev); free_irq(ether->rxirq, dev); del_timer_sync(&ether->check_timer); platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; } static struct platform_driver w90p910_ether_driver = { .probe = w90p910_ether_probe, .remove = __devexit_p(w90p910_ether_remove), .driver = { .name = "nuc900-emc", .owner = THIS_MODULE, }, }; module_platform_driver(w90p910_ether_driver); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("w90p910 MAC driver!"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:nuc900-emc");
gpl-2.0
bryan2894/D851_Kernel
drivers/video/backlight/platform_lcd.c
4982
3596
/* drivers/video/backlight/platform_lcd.c * * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * Generic platform-device LCD power control interface. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/lcd.h> #include <linux/slab.h> #include <video/platform_lcd.h> struct platform_lcd { struct device *us; struct lcd_device *lcd; struct plat_lcd_data *pdata; unsigned int power; unsigned int suspended : 1; }; static inline struct platform_lcd *to_our_lcd(struct lcd_device *lcd) { return lcd_get_data(lcd); } static int platform_lcd_get_power(struct lcd_device *lcd) { struct platform_lcd *plcd = to_our_lcd(lcd); return plcd->power; } static int platform_lcd_set_power(struct lcd_device *lcd, int power) { struct platform_lcd *plcd = to_our_lcd(lcd); int lcd_power = 1; if (power == FB_BLANK_POWERDOWN || plcd->suspended) lcd_power = 0; plcd->pdata->set_power(plcd->pdata, lcd_power); plcd->power = power; return 0; } static int platform_lcd_match(struct lcd_device *lcd, struct fb_info *info) { struct platform_lcd *plcd = to_our_lcd(lcd); struct plat_lcd_data *pdata = plcd->pdata; if (pdata->match_fb) return pdata->match_fb(pdata, info); return plcd->us->parent == info->device; } static struct lcd_ops platform_lcd_ops = { .get_power = platform_lcd_get_power, .set_power = platform_lcd_set_power, .check_fb = platform_lcd_match, }; static int __devinit platform_lcd_probe(struct platform_device *pdev) { struct plat_lcd_data *pdata; struct platform_lcd *plcd; struct device *dev = &pdev->dev; int err; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(dev, "no platform data supplied\n"); return -EINVAL; } plcd = devm_kzalloc(&pdev->dev, sizeof(struct platform_lcd), GFP_KERNEL); if (!plcd) { dev_err(dev, "no memory for state\n"); return -ENOMEM; } plcd->us = dev; plcd->pdata = pdata; plcd->lcd = lcd_device_register(dev_name(dev), dev, plcd, &platform_lcd_ops); if (IS_ERR(plcd->lcd)) { dev_err(dev, "cannot register lcd device\n"); err = PTR_ERR(plcd->lcd); goto err; } platform_set_drvdata(pdev, plcd); platform_lcd_set_power(plcd->lcd, FB_BLANK_NORMAL); return 0; err: return err; } static int __devexit platform_lcd_remove(struct platform_device *pdev) { struct platform_lcd *plcd = platform_get_drvdata(pdev); lcd_device_unregister(plcd->lcd); return 0; } #ifdef CONFIG_PM static int platform_lcd_suspend(struct device *dev) { struct platform_lcd *plcd = dev_get_drvdata(dev); plcd->suspended = 1; platform_lcd_set_power(plcd->lcd, plcd->power); return 0; } static int platform_lcd_resume(struct device *dev) { struct platform_lcd *plcd = dev_get_drvdata(dev); plcd->suspended = 0; platform_lcd_set_power(plcd->lcd, plcd->power); return 0; } static SIMPLE_DEV_PM_OPS(platform_lcd_pm_ops, platform_lcd_suspend, platform_lcd_resume); #endif static struct platform_driver platform_lcd_driver = { .driver = { .name = "platform-lcd", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &platform_lcd_pm_ops, #endif }, .probe = platform_lcd_probe, .remove = __devexit_p(platform_lcd_remove), }; module_platform_driver(platform_lcd_driver); MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:platform-lcd");
gpl-2.0
Team-Hydra/android_kernel_samsung_klte
drivers/net/ethernet/nuvoton/w90p910_ether.c
4982
26427
/* * Copyright (c) 2008-2009 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gfp.h> #define DRV_MODULE_NAME "w90p910-emc" #define DRV_MODULE_VERSION "0.1" /* Ethernet MAC Registers */ #define REG_CAMCMR 0x00 #define REG_CAMEN 0x04 #define REG_CAMM_BASE 0x08 #define REG_CAML_BASE 0x0c #define REG_TXDLSA 0x88 #define REG_RXDLSA 0x8C #define REG_MCMDR 0x90 #define REG_MIID 0x94 #define REG_MIIDA 0x98 #define REG_FFTCR 0x9C #define REG_TSDR 0xa0 #define REG_RSDR 0xa4 #define REG_DMARFC 0xa8 #define REG_MIEN 0xac #define REG_MISTA 0xb0 #define REG_CTXDSA 0xcc #define REG_CTXBSA 0xd0 #define REG_CRXDSA 0xd4 #define REG_CRXBSA 0xd8 /* mac controller bit */ #define MCMDR_RXON 0x01 #define MCMDR_ACP (0x01 << 3) #define MCMDR_SPCRC (0x01 << 5) #define MCMDR_TXON (0x01 << 8) #define MCMDR_FDUP (0x01 << 18) #define MCMDR_ENMDC (0x01 << 19) #define MCMDR_OPMOD (0x01 << 20) #define SWR (0x01 << 24) /* cam command regiser */ #define CAMCMR_AUP 0x01 #define CAMCMR_AMP (0x01 << 1) #define CAMCMR_ABP (0x01 << 2) #define CAMCMR_CCAM (0x01 << 3) #define CAMCMR_ECMP (0x01 << 4) #define CAM0EN 0x01 /* mac mii controller bit */ #define MDCCR (0x0a << 20) #define PHYAD (0x01 << 8) #define PHYWR (0x01 << 16) #define PHYBUSY (0x01 << 17) #define PHYPRESP (0x01 << 18) #define CAM_ENTRY_SIZE 0x08 /* rx and tx status */ #define TXDS_TXCP (0x01 << 19) #define RXDS_CRCE (0x01 << 17) #define RXDS_PTLE (0x01 << 19) #define RXDS_RXGD (0x01 << 20) #define RXDS_ALIE (0x01 << 21) #define RXDS_RP (0x01 << 22) /* mac interrupt status*/ #define MISTA_EXDEF (0x01 << 19) #define MISTA_TXBERR (0x01 << 24) #define MISTA_TDU (0x01 << 23) #define MISTA_RDU (0x01 << 10) #define MISTA_RXBERR (0x01 << 11) #define ENSTART 0x01 #define ENRXINTR 0x01 #define ENRXGD (0x01 << 4) #define ENRXBERR (0x01 << 11) #define ENTXINTR (0x01 << 16) #define ENTXCP (0x01 << 18) #define ENTXABT (0x01 << 21) #define ENTXBERR (0x01 << 24) #define ENMDC (0x01 << 19) #define PHYBUSY (0x01 << 17) #define MDCCR_VAL 0xa00000 /* rx and tx owner bit */ #define RX_OWEN_DMA (0x01 << 31) #define RX_OWEN_CPU (~(0x03 << 30)) #define TX_OWEN_DMA (0x01 << 31) #define TX_OWEN_CPU (~(0x01 << 31)) /* tx frame desc controller bit */ #define MACTXINTEN 0x04 #define CRCMODE 0x02 #define PADDINGMODE 0x01 /* fftcr controller bit */ #define TXTHD (0x03 << 8) #define BLENGTH (0x01 << 20) /* global setting for driver */ #define RX_DESC_SIZE 50 #define TX_DESC_SIZE 10 #define MAX_RBUFF_SZ 0x600 #define MAX_TBUFF_SZ 0x600 #define TX_TIMEOUT (HZ/2) #define DELAY 1000 #define CAM0 0x0 static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg); struct w90p910_rxbd { unsigned int sl; unsigned int buffer; unsigned int reserved; unsigned int next; }; struct w90p910_txbd { unsigned int mode; unsigned int buffer; unsigned int sl; unsigned int next; }; struct recv_pdesc { struct w90p910_rxbd desclist[RX_DESC_SIZE]; char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ]; }; struct tran_pdesc { struct w90p910_txbd desclist[TX_DESC_SIZE]; char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ]; }; struct w90p910_ether { struct recv_pdesc *rdesc; struct tran_pdesc *tdesc; dma_addr_t rdesc_phys; dma_addr_t tdesc_phys; struct net_device_stats stats; struct platform_device *pdev; struct resource *res; struct sk_buff *skb; struct clk *clk; struct clk *rmiiclk; struct mii_if_info mii; struct timer_list check_timer; void __iomem *reg; int rxirq; int txirq; unsigned int cur_tx; unsigned int cur_rx; unsigned int finish_tx; unsigned int rx_packets; unsigned int rx_bytes; unsigned int start_tx_ptr; unsigned int start_rx_ptr; unsigned int linkflag; }; static void update_linkspeed_register(struct net_device *dev, unsigned int speed, unsigned int duplex) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); if (speed == SPEED_100) { /* 100 full/half duplex */ if (duplex == DUPLEX_FULL) { val |= (MCMDR_OPMOD | MCMDR_FDUP); } else { val |= MCMDR_OPMOD; val &= ~MCMDR_FDUP; } } else { /* 10 full/half duplex */ if (duplex == DUPLEX_FULL) { val |= MCMDR_FDUP; val &= ~MCMDR_OPMOD; } else { val &= ~(MCMDR_FDUP | MCMDR_OPMOD); } } __raw_writel(val, ether->reg + REG_MCMDR); } static void update_linkspeed(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; unsigned int bmsr, bmcr, lpa, speed, duplex; pdev = ether->pdev; if (!mii_link_ok(&ether->mii)) { ether->linkflag = 0x0; netif_carrier_off(dev); dev_warn(&pdev->dev, "%s: Link down.\n", dev->name); return; } if (ether->linkflag == 1) return; bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR); bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR); if (bmcr & BMCR_ANENABLE) { if (!(bmsr & BMSR_ANEGCOMPLETE)) return; lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA); if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100; else speed = SPEED_10; if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL; else duplex = DUPLEX_HALF; } else { speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } update_linkspeed_register(dev, speed, duplex); dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); ether->linkflag = 0x01; netif_carrier_on(dev); } static void w90p910_check_link(unsigned long dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct w90p910_ether *ether = netdev_priv(dev); update_linkspeed(dev); mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000)); } static void w90p910_write_cam(struct net_device *dev, unsigned int x, unsigned char *pval) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int msw, lsw; msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3]; lsw = (pval[4] << 24) | (pval[5] << 16); __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE); __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE); } static int w90p910_init_desc(struct net_device *dev) { struct w90p910_ether *ether; struct w90p910_txbd *tdesc; struct w90p910_rxbd *rdesc; struct platform_device *pdev; unsigned int i; ether = netdev_priv(dev); pdev = ether->pdev; ether->tdesc = (struct tran_pdesc *) dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc), &ether->tdesc_phys, GFP_KERNEL); if (!ether->tdesc) { dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n"); return -ENOMEM; } ether->rdesc = (struct recv_pdesc *) dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc), &ether->rdesc_phys, GFP_KERNEL); if (!ether->rdesc) { dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n"); dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), ether->tdesc, ether->tdesc_phys); return -ENOMEM; } for (i = 0; i < TX_DESC_SIZE; i++) { unsigned int offset; tdesc = &(ether->tdesc->desclist[i]); if (i == TX_DESC_SIZE - 1) offset = offsetof(struct tran_pdesc, desclist[0]); else offset = offsetof(struct tran_pdesc, desclist[i + 1]); tdesc->next = ether->tdesc_phys + offset; tdesc->buffer = ether->tdesc_phys + offsetof(struct tran_pdesc, tran_buf[i]); tdesc->sl = 0; tdesc->mode = 0; } ether->start_tx_ptr = ether->tdesc_phys; for (i = 0; i < RX_DESC_SIZE; i++) { unsigned int offset; rdesc = &(ether->rdesc->desclist[i]); if (i == RX_DESC_SIZE - 1) offset = offsetof(struct recv_pdesc, desclist[0]); else offset = offsetof(struct recv_pdesc, desclist[i + 1]); rdesc->next = ether->rdesc_phys + offset; rdesc->sl = RX_OWEN_DMA; rdesc->buffer = ether->rdesc_phys + offsetof(struct recv_pdesc, recv_buf[i]); } ether->start_rx_ptr = ether->rdesc_phys; return 0; } static void w90p910_set_fifo_threshold(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = TXTHD | BLENGTH; __raw_writel(val, ether->reg + REG_FFTCR); } static void w90p910_return_default_idle(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); val |= SWR; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_trigger_rx(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); __raw_writel(ENSTART, ether->reg + REG_RSDR); } static void w90p910_trigger_tx(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); __raw_writel(ENSTART, ether->reg + REG_TSDR); } static void w90p910_enable_mac_interrupt(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP; val |= ENTXBERR | ENRXBERR | ENTXABT; __raw_writel(val, ether->reg + REG_MIEN); } static void w90p910_get_and_clear_int(struct net_device *dev, unsigned int *val) { struct w90p910_ether *ether = netdev_priv(dev); *val = __raw_readl(ether->reg + REG_MISTA); __raw_writel(*val, ether->reg + REG_MISTA); } static void w90p910_set_global_maccmd(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_enable_cam(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; w90p910_write_cam(dev, CAM0, dev->dev_addr); val = __raw_readl(ether->reg + REG_CAMEN); val |= CAM0EN; __raw_writel(val, ether->reg + REG_CAMEN); } static void w90p910_enable_cam_command(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP; __raw_writel(val, ether->reg + REG_CAMCMR); } static void w90p910_enable_tx(struct net_device *dev, unsigned int enable) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); if (enable) val |= MCMDR_TXON; else val &= ~MCMDR_TXON; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_enable_rx(struct net_device *dev, unsigned int enable) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); if (enable) val |= MCMDR_RXON; else val &= ~MCMDR_RXON; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_set_curdest(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA); __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA); } static void w90p910_reset_mac(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); w90p910_enable_tx(dev, 0); w90p910_enable_rx(dev, 0); w90p910_set_fifo_threshold(dev); w90p910_return_default_idle(dev); if (!netif_queue_stopped(dev)) netif_stop_queue(dev); w90p910_init_desc(dev); dev->trans_start = jiffies; /* prevent tx timeout */ ether->cur_tx = 0x0; ether->finish_tx = 0x0; ether->cur_rx = 0x0; w90p910_set_curdest(dev); w90p910_enable_cam(dev); w90p910_enable_cam_command(dev); w90p910_enable_mac_interrupt(dev); w90p910_enable_tx(dev, 1); w90p910_enable_rx(dev, 1); w90p910_trigger_tx(dev); w90p910_trigger_rx(dev); dev->trans_start = jiffies; /* prevent tx timeout */ if (netif_queue_stopped(dev)) netif_wake_queue(dev); } static void w90p910_mdio_write(struct net_device *dev, int phy_id, int reg, int data) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; unsigned int val, i; pdev = ether->pdev; __raw_writel(data, ether->reg + REG_MIID); val = (phy_id << 0x08) | reg; val |= PHYBUSY | PHYWR | MDCCR_VAL; __raw_writel(val, ether->reg + REG_MIIDA); for (i = 0; i < DELAY; i++) { if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) break; } if (i == DELAY) dev_warn(&pdev->dev, "mdio write timed out\n"); } static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; unsigned int val, i, data; pdev = ether->pdev; val = (phy_id << 0x08) | reg; val |= PHYBUSY | MDCCR_VAL; __raw_writel(val, ether->reg + REG_MIIDA); for (i = 0; i < DELAY; i++) { if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) break; } if (i == DELAY) { dev_warn(&pdev->dev, "mdio read timed out\n"); data = 0xffff; } else { data = __raw_readl(ether->reg + REG_MIID); } return data; } static int w90p910_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *address = addr; if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, address->sa_data, dev->addr_len); w90p910_write_cam(dev, CAM0, dev->dev_addr); return 0; } static int w90p910_ether_close(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; pdev = ether->pdev; dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc), ether->rdesc, ether->rdesc_phys); dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), ether->tdesc, ether->tdesc_phys); netif_stop_queue(dev); del_timer_sync(&ether->check_timer); clk_disable(ether->rmiiclk); clk_disable(ether->clk); free_irq(ether->txirq, dev); free_irq(ether->rxirq, dev); return 0; } static struct net_device_stats *w90p910_ether_stats(struct net_device *dev) { struct w90p910_ether *ether; ether = netdev_priv(dev); return &ether->stats; } static int w90p910_send_frame(struct net_device *dev, unsigned char *data, int length) { struct w90p910_ether *ether; struct w90p910_txbd *txbd; struct platform_device *pdev; unsigned char *buffer; ether = netdev_priv(dev); pdev = ether->pdev; txbd = &ether->tdesc->desclist[ether->cur_tx]; buffer = ether->tdesc->tran_buf[ether->cur_tx]; if (length > 1514) { dev_err(&pdev->dev, "send data %d bytes, check it\n", length); length = 1514; } txbd->sl = length & 0xFFFF; memcpy(buffer, data, length); txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN; w90p910_enable_tx(dev, 1); w90p910_trigger_tx(dev); if (++ether->cur_tx >= TX_DESC_SIZE) ether->cur_tx = 0; txbd = &ether->tdesc->desclist[ether->cur_tx]; if (txbd->mode & TX_OWEN_DMA) netif_stop_queue(dev); return 0; } static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); if (!(w90p910_send_frame(dev, skb->data, skb->len))) { ether->skb = skb; dev_kfree_skb_irq(skb); return 0; } return -EAGAIN; } static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id) { struct w90p910_ether *ether; struct w90p910_txbd *txbd; struct platform_device *pdev; struct net_device *dev; unsigned int cur_entry, entry, status; dev = dev_id; ether = netdev_priv(dev); pdev = ether->pdev; w90p910_get_and_clear_int(dev, &status); cur_entry = __raw_readl(ether->reg + REG_CTXDSA); entry = ether->tdesc_phys + offsetof(struct tran_pdesc, desclist[ether->finish_tx]); while (entry != cur_entry) { txbd = &ether->tdesc->desclist[ether->finish_tx]; if (++ether->finish_tx >= TX_DESC_SIZE) ether->finish_tx = 0; if (txbd->sl & TXDS_TXCP) { ether->stats.tx_packets++; ether->stats.tx_bytes += txbd->sl & 0xFFFF; } else { ether->stats.tx_errors++; } txbd->sl = 0x0; txbd->mode = 0x0; if (netif_queue_stopped(dev)) netif_wake_queue(dev); entry = ether->tdesc_phys + offsetof(struct tran_pdesc, desclist[ether->finish_tx]); } if (status & MISTA_EXDEF) { dev_err(&pdev->dev, "emc defer exceed interrupt\n"); } else if (status & MISTA_TXBERR) { dev_err(&pdev->dev, "emc bus error interrupt\n"); w90p910_reset_mac(dev); } else if (status & MISTA_TDU) { if (netif_queue_stopped(dev)) netif_wake_queue(dev); } return IRQ_HANDLED; } static void netdev_rx(struct net_device *dev) { struct w90p910_ether *ether; struct w90p910_rxbd *rxbd; struct platform_device *pdev; struct sk_buff *skb; unsigned char *data; unsigned int length, status, val, entry; ether = netdev_priv(dev); pdev = ether->pdev; rxbd = &ether->rdesc->desclist[ether->cur_rx]; do { val = __raw_readl(ether->reg + REG_CRXDSA); entry = ether->rdesc_phys + offsetof(struct recv_pdesc, desclist[ether->cur_rx]); if (val == entry) break; status = rxbd->sl; length = status & 0xFFFF; if (status & RXDS_RXGD) { data = ether->rdesc->recv_buf[ether->cur_rx]; skb = netdev_alloc_skb(dev, length + 2); if (!skb) { dev_err(&pdev->dev, "get skb buffer error\n"); ether->stats.rx_dropped++; return; } skb_reserve(skb, 2); skb_put(skb, length); skb_copy_to_linear_data(skb, data, length); skb->protocol = eth_type_trans(skb, dev); ether->stats.rx_packets++; ether->stats.rx_bytes += length; netif_rx(skb); } else { ether->stats.rx_errors++; if (status & RXDS_RP) { dev_err(&pdev->dev, "rx runt err\n"); ether->stats.rx_length_errors++; } else if (status & RXDS_CRCE) { dev_err(&pdev->dev, "rx crc err\n"); ether->stats.rx_crc_errors++; } else if (status & RXDS_ALIE) { dev_err(&pdev->dev, "rx aligment err\n"); ether->stats.rx_frame_errors++; } else if (status & RXDS_PTLE) { dev_err(&pdev->dev, "rx longer err\n"); ether->stats.rx_over_errors++; } } rxbd->sl = RX_OWEN_DMA; rxbd->reserved = 0x0; if (++ether->cur_rx >= RX_DESC_SIZE) ether->cur_rx = 0; rxbd = &ether->rdesc->desclist[ether->cur_rx]; } while (1); } static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id) { struct net_device *dev; struct w90p910_ether *ether; struct platform_device *pdev; unsigned int status; dev = dev_id; ether = netdev_priv(dev); pdev = ether->pdev; w90p910_get_and_clear_int(dev, &status); if (status & MISTA_RDU) { netdev_rx(dev); w90p910_trigger_rx(dev); return IRQ_HANDLED; } else if (status & MISTA_RXBERR) { dev_err(&pdev->dev, "emc rx bus error\n"); w90p910_reset_mac(dev); } netdev_rx(dev); return IRQ_HANDLED; } static int w90p910_ether_open(struct net_device *dev) { struct w90p910_ether *ether; struct platform_device *pdev; ether = netdev_priv(dev); pdev = ether->pdev; w90p910_reset_mac(dev); w90p910_set_fifo_threshold(dev); w90p910_set_curdest(dev); w90p910_enable_cam(dev); w90p910_enable_cam_command(dev); w90p910_enable_mac_interrupt(dev); w90p910_set_global_maccmd(dev); w90p910_enable_rx(dev, 1); clk_enable(ether->rmiiclk); clk_enable(ether->clk); ether->rx_packets = 0x0; ether->rx_bytes = 0x0; if (request_irq(ether->txirq, w90p910_tx_interrupt, 0x0, pdev->name, dev)) { dev_err(&pdev->dev, "register irq tx failed\n"); return -EAGAIN; } if (request_irq(ether->rxirq, w90p910_rx_interrupt, 0x0, pdev->name, dev)) { dev_err(&pdev->dev, "register irq rx failed\n"); free_irq(ether->txirq, dev); return -EAGAIN; } mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000)); netif_start_queue(dev); w90p910_trigger_rx(dev); dev_info(&pdev->dev, "%s is OPENED\n", dev->name); return 0; } static void w90p910_ether_set_multicast_list(struct net_device *dev) { struct w90p910_ether *ether; unsigned int rx_mode; ether = netdev_priv(dev); if (dev->flags & IFF_PROMISC) rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; else rx_mode = CAMCMR_ECMP | CAMCMR_ABP; __raw_writel(rx_mode, ether->reg + REG_CAMCMR); } static int w90p910_ether_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct w90p910_ether *ether = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(ifr); return generic_mii_ioctl(&ether->mii, data, cmd, NULL); } static void w90p910_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRV_MODULE_NAME); strcpy(info->version, DRV_MODULE_VERSION); } static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct w90p910_ether *ether = netdev_priv(dev); return mii_ethtool_gset(&ether->mii, cmd); } static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct w90p910_ether *ether = netdev_priv(dev); return mii_ethtool_sset(&ether->mii, cmd); } static int w90p910_nway_reset(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); return mii_nway_restart(&ether->mii); } static u32 w90p910_get_link(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); return mii_link_ok(&ether->mii); } static const struct ethtool_ops w90p910_ether_ethtool_ops = { .get_settings = w90p910_get_settings, .set_settings = w90p910_set_settings, .get_drvinfo = w90p910_get_drvinfo, .nway_reset = w90p910_nway_reset, .get_link = w90p910_get_link, }; static const struct net_device_ops w90p910_ether_netdev_ops = { .ndo_open = w90p910_ether_open, .ndo_stop = w90p910_ether_close, .ndo_start_xmit = w90p910_ether_start_xmit, .ndo_get_stats = w90p910_ether_stats, .ndo_set_rx_mode = w90p910_ether_set_multicast_list, .ndo_set_mac_address = w90p910_set_mac_address, .ndo_do_ioctl = w90p910_ether_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, }; static void __init get_mac_address(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; char addr[6]; pdev = ether->pdev; addr[0] = 0x00; addr[1] = 0x02; addr[2] = 0xac; addr[3] = 0x55; addr[4] = 0x88; addr[5] = 0xa8; if (is_valid_ether_addr(addr)) memcpy(dev->dev_addr, &addr, 0x06); else dev_err(&pdev->dev, "invalid mac address\n"); } static int w90p910_ether_setup(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); ether_setup(dev); dev->netdev_ops = &w90p910_ether_netdev_ops; dev->ethtool_ops = &w90p910_ether_ethtool_ops; dev->tx_queue_len = 16; dev->dma = 0x0; dev->watchdog_timeo = TX_TIMEOUT; get_mac_address(dev); ether->cur_tx = 0x0; ether->cur_rx = 0x0; ether->finish_tx = 0x0; ether->linkflag = 0x0; ether->mii.phy_id = 0x01; ether->mii.phy_id_mask = 0x1f; ether->mii.reg_num_mask = 0x1f; ether->mii.dev = dev; ether->mii.mdio_read = w90p910_mdio_read; ether->mii.mdio_write = w90p910_mdio_write; setup_timer(&ether->check_timer, w90p910_check_link, (unsigned long)dev); return 0; } static int __devinit w90p910_ether_probe(struct platform_device *pdev) { struct w90p910_ether *ether; struct net_device *dev; int error; dev = alloc_etherdev(sizeof(struct w90p910_ether)); if (!dev) return -ENOMEM; ether = netdev_priv(dev); ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (ether->res == NULL) { dev_err(&pdev->dev, "failed to get I/O memory\n"); error = -ENXIO; goto failed_free; } if (!request_mem_region(ether->res->start, resource_size(ether->res), pdev->name)) { dev_err(&pdev->dev, "failed to request I/O memory\n"); error = -EBUSY; goto failed_free; } ether->reg = ioremap(ether->res->start, resource_size(ether->res)); if (ether->reg == NULL) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -ENXIO; goto failed_free_mem; } ether->txirq = platform_get_irq(pdev, 0); if (ether->txirq < 0) { dev_err(&pdev->dev, "failed to get ether tx irq\n"); error = -ENXIO; goto failed_free_io; } ether->rxirq = platform_get_irq(pdev, 1); if (ether->rxirq < 0) { dev_err(&pdev->dev, "failed to get ether rx irq\n"); error = -ENXIO; goto failed_free_txirq; } platform_set_drvdata(pdev, dev); ether->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(ether->clk)) { dev_err(&pdev->dev, "failed to get ether clock\n"); error = PTR_ERR(ether->clk); goto failed_free_rxirq; } ether->rmiiclk = clk_get(&pdev->dev, "RMII"); if (IS_ERR(ether->rmiiclk)) { dev_err(&pdev->dev, "failed to get ether clock\n"); error = PTR_ERR(ether->rmiiclk); goto failed_put_clk; } ether->pdev = pdev; w90p910_ether_setup(dev); error = register_netdev(dev); if (error != 0) { dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n"); error = -ENODEV; goto failed_put_rmiiclk; } return 0; failed_put_rmiiclk: clk_put(ether->rmiiclk); failed_put_clk: clk_put(ether->clk); failed_free_rxirq: free_irq(ether->rxirq, pdev); platform_set_drvdata(pdev, NULL); failed_free_txirq: free_irq(ether->txirq, pdev); failed_free_io: iounmap(ether->reg); failed_free_mem: release_mem_region(ether->res->start, resource_size(ether->res)); failed_free: free_netdev(dev); return error; } static int __devexit w90p910_ether_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct w90p910_ether *ether = netdev_priv(dev); unregister_netdev(dev); clk_put(ether->rmiiclk); clk_put(ether->clk); iounmap(ether->reg); release_mem_region(ether->res->start, resource_size(ether->res)); free_irq(ether->txirq, dev); free_irq(ether->rxirq, dev); del_timer_sync(&ether->check_timer); platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; } static struct platform_driver w90p910_ether_driver = { .probe = w90p910_ether_probe, .remove = __devexit_p(w90p910_ether_remove), .driver = { .name = "nuc900-emc", .owner = THIS_MODULE, }, }; module_platform_driver(w90p910_ether_driver); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("w90p910 MAC driver!"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:nuc900-emc");
gpl-2.0
Gilbert32/leo-3.4
drivers/staging/rtl8192e/rtllib_crypt_tkip.c
5238
20359
/* * Host AP crypt: host-based TKIP encryption implementation for Host AP driver * * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <linux/string.h> #include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/crc32.h> #include "rtllib.h" struct rtllib_tkip_data { #define TKIP_KEY_LEN 32 u8 key[TKIP_KEY_LEN]; int key_set; u32 tx_iv32; u16 tx_iv16; u16 tx_ttak[5]; int tx_phase1_done; u32 rx_iv32; u16 rx_iv16; bool initialized; u16 rx_ttak[5]; int rx_phase1_done; u32 rx_iv32_new; u16 rx_iv16_new; u32 dot11RSNAStatsTKIPReplays; u32 dot11RSNAStatsTKIPICVErrors; u32 dot11RSNAStatsTKIPLocalMICFailures; int key_idx; struct crypto_blkcipher *rx_tfm_arc4; struct crypto_hash *rx_tfm_michael; struct crypto_blkcipher *tx_tfm_arc4; struct crypto_hash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16], tx_hdr[16]; }; static void *rtllib_tkip_init(int key_idx) { struct rtllib_tkip_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = key_idx; priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_arc4)) { printk(KERN_DEBUG "rtllib_crypt_tkip: could not allocate " "crypto API arc4\n"); priv->tx_tfm_arc4 = NULL; goto fail; } priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_michael)) { printk(KERN_DEBUG "rtllib_crypt_tkip: could not allocate " "crypto API michael_mic\n"); priv->tx_tfm_michael = NULL; goto fail; } priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_arc4)) { printk(KERN_DEBUG "rtllib_crypt_tkip: could not allocate " "crypto API arc4\n"); priv->rx_tfm_arc4 = NULL; goto fail; } priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_michael)) { printk(KERN_DEBUG "rtllib_crypt_tkip: could not allocate " "crypto API michael_mic\n"); priv->rx_tfm_michael = NULL; goto fail; } return priv; fail: if (priv) { if (priv->tx_tfm_michael) crypto_free_hash(priv->tx_tfm_michael); if (priv->tx_tfm_arc4) crypto_free_blkcipher(priv->tx_tfm_arc4); if (priv->rx_tfm_michael) crypto_free_hash(priv->rx_tfm_michael); if (priv->rx_tfm_arc4) crypto_free_blkcipher(priv->rx_tfm_arc4); kfree(priv); } return NULL; } static void rtllib_tkip_deinit(void *priv) { struct rtllib_tkip_data *_priv = priv; if (_priv) { if (_priv->tx_tfm_michael) crypto_free_hash(_priv->tx_tfm_michael); if (_priv->tx_tfm_arc4) crypto_free_blkcipher(_priv->tx_tfm_arc4); if (_priv->rx_tfm_michael) crypto_free_hash(_priv->rx_tfm_michael); if (_priv->rx_tfm_arc4) crypto_free_blkcipher(_priv->rx_tfm_arc4); } kfree(priv); } static inline u16 RotR1(u16 val) { return (val >> 1) | (val << 15); } static inline u8 Lo8(u16 val) { return val & 0xff; } static inline u8 Hi8(u16 val) { return val >> 8; } static inline u16 Lo16(u32 val) { return val & 0xffff; } static inline u16 Hi16(u32 val) { return val >> 16; } static inline u16 Mk16(u8 hi, u8 lo) { return lo | (((u16) hi) << 8); } static inline u16 Mk16_le(u16 *v) { return le16_to_cpu(*v); } static const u16 Sbox[256] = { 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, }; static inline u16 _S_(u16 v) { u16 t = Sbox[Hi8(v)]; return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8)); } #define PHASE1_LOOP_COUNT 8 static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32) { int i, j; /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */ TTAK[0] = Lo16(IV32); TTAK[1] = Hi16(IV32); TTAK[2] = Mk16(TA[1], TA[0]); TTAK[3] = Mk16(TA[3], TA[2]); TTAK[4] = Mk16(TA[5], TA[4]); for (i = 0; i < PHASE1_LOOP_COUNT; i++) { j = 2 * (i & 1); TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j])); TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j])); TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j])); TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j])); TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i; } } static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK, u16 IV16) { /* Make temporary area overlap WEP seed so that the final copy can be * avoided on little endian hosts. */ u16 *PPK = (u16 *) &WEPSeed[4]; /* Step 1 - make copy of TTAK and bring in TSC */ PPK[0] = TTAK[0]; PPK[1] = TTAK[1]; PPK[2] = TTAK[2]; PPK[3] = TTAK[3]; PPK[4] = TTAK[4]; PPK[5] = TTAK[4] + IV16; /* Step 2 - 96-bit bijective mixing using S-box */ PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0])); PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2])); PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4])); PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6])); PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8])); PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10])); PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12])); PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14])); PPK[2] += RotR1(PPK[1]); PPK[3] += RotR1(PPK[2]); PPK[4] += RotR1(PPK[3]); PPK[5] += RotR1(PPK[4]); /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value * WEPSeed[0..2] is transmitted as WEP IV */ WEPSeed[0] = Hi8(IV16); WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F; WEPSeed[2] = Lo8(IV16); WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1); #ifdef __BIG_ENDIAN { int i; for (i = 0; i < 6; i++) PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8); } #endif } static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct rtllib_tkip_data *tkey = priv; int len; u8 *pos; struct rtllib_hdr_4addr *hdr; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4}; int ret = 0; u8 rc4key[16], *icv; u32 crc; struct scatterlist sg; if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 || skb->len < hdr_len) return -1; hdr = (struct rtllib_hdr_4addr *) skb->data; if (!tcb_desc->bHwSec) { if (!tkey->tx_phase1_done) { tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, tkey->tx_iv32); tkey->tx_phase1_done = 1; } tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); } else tkey->tx_phase1_done = 1; len = skb->len - hdr_len; pos = skb_push(skb, 8); memmove(pos, pos + 8, hdr_len); pos += hdr_len; if (tcb_desc->bHwSec) { *pos++ = Hi8(tkey->tx_iv16); *pos++ = (Hi8(tkey->tx_iv16) | 0x20) & 0x7F; *pos++ = Lo8(tkey->tx_iv16); } else { *pos++ = rc4key[0]; *pos++ = rc4key[1]; *pos++ = rc4key[2]; } *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */; *pos++ = tkey->tx_iv32 & 0xff; *pos++ = (tkey->tx_iv32 >> 8) & 0xff; *pos++ = (tkey->tx_iv32 >> 16) & 0xff; *pos++ = (tkey->tx_iv32 >> 24) & 0xff; if (!tcb_desc->bHwSec) { icv = skb_put(skb, 4); crc = ~crc32_le(~0, pos, len); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; sg_init_one(&sg, pos, len+4); crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); } tkey->tx_iv16++; if (tkey->tx_iv16 == 0) { tkey->tx_phase1_done = 0; tkey->tx_iv32++; } if (!tcb_desc->bHwSec) return ret; else return 0; } static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct rtllib_tkip_data *tkey = priv; u8 keyidx, *pos; u32 iv32; u16 iv16; struct rtllib_hdr_4addr *hdr; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4}; u8 rc4key[16]; u8 icv[4]; u32 crc; struct scatterlist sg; int plen; if (skb->len < hdr_len + 8 + 4) return -1; hdr = (struct rtllib_hdr_4addr *) skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: received packet without ExtIV" " flag from %pM\n", hdr->addr2); } return -2; } keyidx >>= 6; if (tkey->key_idx != keyidx) { printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame " "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv); return -6; } if (!tkey->key_set) { if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: received packet from %pM" " with keyid=%d that does not have a configured" " key\n", hdr->addr2, keyidx); } return -3; } iv16 = (pos[0] << 8) | pos[2]; iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); pos += 8; if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) { if ((iv32 < tkey->rx_iv32 || (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) && tkey->initialized) { if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: replay detected: STA=" " %pM previous TSC %08x%04x received " "TSC %08x%04x\n",hdr->addr2, tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); } tkey->dot11RSNAStatsTKIPReplays++; return -4; } tkey->initialized = true; if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) { tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32); tkey->rx_phase1_done = 1; } tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16); plen = skb->len - hdr_len - 12; sg_init_one(&sg, pos, plen+4); crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { if (net_ratelimit()) { printk(KERN_DEBUG ": TKIP: failed to decrypt " "received packet from %pM\n", hdr->addr2); } return -7; } crc = ~crc32_le(~0, pos, plen); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; if (memcmp(icv, pos + plen, 4) != 0) { if (iv32 != tkey->rx_iv32) { /* Previously cached Phase1 result was already * lost, so it needs to be recalculated for the * next packet. */ tkey->rx_phase1_done = 0; } if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: ICV error detected: STA=" " %pM\n", hdr->addr2); } tkey->dot11RSNAStatsTKIPICVErrors++; return -5; } } /* Update real counters only after Michael MIC verification has * completed */ tkey->rx_iv32_new = iv32; tkey->rx_iv16_new = iv16; /* Remove IV and ICV */ memmove(skb->data + 8, skb->data, hdr_len); skb_pull(skb, 8); skb_trim(skb, skb->len - 4); return keyidx; } static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr, u8 *data, size_t data_len, u8 *mic) { struct hash_desc desc; struct scatterlist sg[2]; if (tfm_michael == NULL) { printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); return -1; } sg_init_table(sg, 2); sg_set_buf(&sg[0], hdr, 16); sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, 8)) return -1; desc.tfm = tfm_michael; desc.flags = 0; return crypto_hash_digest(&desc, sg, data_len + 16, mic); } static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr) { struct rtllib_hdr_4addr *hdr11; hdr11 = (struct rtllib_hdr_4addr *) skb->data; switch (le16_to_cpu(hdr11->frame_ctl) & (RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) { case RTLLIB_FCTL_TODS: memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ break; case RTLLIB_FCTL_FROMDS: memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */ break; case RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS: memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */ break; case 0: memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ break; } hdr[12] = 0; /* priority */ hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ } static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv) { struct rtllib_tkip_data *tkey = priv; u8 *pos; struct rtllib_hdr_4addr *hdr; hdr = (struct rtllib_hdr_4addr *) skb->data; if (skb_tailroom(skb) < 8 || skb->len < hdr_len) { printk(KERN_DEBUG "Invalid packet for Michael MIC add " "(tailroom=%d hdr_len=%d skb->len=%d)\n", skb_tailroom(skb), hdr_len, skb->len); return -1; } michael_mic_hdr(skb, tkey->tx_hdr); if (RTLLIB_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07; pos = skb_put(skb, 8); if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr, skb->data + hdr_len, skb->len - 8 - hdr_len, pos)) return -1; return 0; } static void rtllib_michael_mic_failure(struct net_device *dev, struct rtllib_hdr_4addr *hdr, int keyidx) { union iwreq_data wrqu; struct iw_michaelmicfailure ev; /* TODO: needed parameters: count, keyid, key type, TSC */ memset(&ev, 0, sizeof(ev)); ev.flags = keyidx & IW_MICFAILURE_KEY_ID; if (hdr->addr1[0] & 0x01) ev.flags |= IW_MICFAILURE_GROUP; else ev.flags |= IW_MICFAILURE_PAIRWISE; ev.src_addr.sa_family = ARPHRD_ETHER; memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN); memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = sizeof(ev); wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev); } static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx, int hdr_len, void *priv) { struct rtllib_tkip_data *tkey = priv; u8 mic[8]; struct rtllib_hdr_4addr *hdr; hdr = (struct rtllib_hdr_4addr *) skb->data; if (!tkey->key_set) return -1; michael_mic_hdr(skb, tkey->rx_hdr); if (RTLLIB_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07; if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr, skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) return -1; if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { struct rtllib_hdr_4addr *hdr; hdr = (struct rtllib_hdr_4addr *) skb->data; printk(KERN_DEBUG "%s: Michael MIC verification failed for " "MSDU from %pM keyidx=%d\n", skb->dev ? skb->dev->name : "N/A", hdr->addr2, keyidx); printk(KERN_DEBUG "%d\n", memcmp(mic, skb->data + skb->len - 8, 8) != 0); if (skb->dev) { printk(KERN_INFO "skb->dev != NULL\n"); rtllib_michael_mic_failure(skb->dev, hdr, keyidx); } tkey->dot11RSNAStatsTKIPLocalMICFailures++; return -1; } /* Update TSC counters for RX now that the packet verification has * completed. */ tkey->rx_iv32 = tkey->rx_iv32_new; tkey->rx_iv16 = tkey->rx_iv16_new; skb_trim(skb, skb->len - 8); return 0; } static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv) { struct rtllib_tkip_data *tkey = priv; int keyidx; struct crypto_hash *tfm = tkey->tx_tfm_michael; struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4; struct crypto_hash *tfm3 = tkey->rx_tfm_michael; struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4; keyidx = tkey->key_idx; memset(tkey, 0, sizeof(*tkey)); tkey->key_idx = keyidx; tkey->tx_tfm_michael = tfm; tkey->tx_tfm_arc4 = tfm2; tkey->rx_tfm_michael = tfm3; tkey->rx_tfm_arc4 = tfm4; if (len == TKIP_KEY_LEN) { memcpy(tkey->key, key, TKIP_KEY_LEN); tkey->key_set = 1; tkey->tx_iv16 = 1; /* TSC is initialized to 1 */ if (seq) { tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) | (seq[3] << 8) | seq[2]; tkey->rx_iv16 = (seq[1] << 8) | seq[0]; } } else if (len == 0) tkey->key_set = 0; else return -1; return 0; } static int rtllib_tkip_get_key(void *key, int len, u8 *seq, void *priv) { struct rtllib_tkip_data *tkey = priv; if (len < TKIP_KEY_LEN) return -1; if (!tkey->key_set) return 0; memcpy(key, tkey->key, TKIP_KEY_LEN); if (seq) { /* Return the sequence number of the last transmitted frame. */ u16 iv16 = tkey->tx_iv16; u32 iv32 = tkey->tx_iv32; if (iv16 == 0) iv32--; iv16--; seq[0] = tkey->tx_iv16; seq[1] = tkey->tx_iv16 >> 8; seq[2] = tkey->tx_iv32; seq[3] = tkey->tx_iv32 >> 8; seq[4] = tkey->tx_iv32 >> 16; seq[5] = tkey->tx_iv32 >> 24; } return TKIP_KEY_LEN; } static char *rtllib_tkip_print_stats(char *p, void *priv) { struct rtllib_tkip_data *tkip = priv; p += sprintf(p, "key[%d] alg=TKIP key_set=%d " "tx_pn=%02x%02x%02x%02x%02x%02x " "rx_pn=%02x%02x%02x%02x%02x%02x " "replays=%d icv_errors=%d local_mic_failures=%d\n", tkip->key_idx, tkip->key_set, (tkip->tx_iv32 >> 24) & 0xff, (tkip->tx_iv32 >> 16) & 0xff, (tkip->tx_iv32 >> 8) & 0xff, tkip->tx_iv32 & 0xff, (tkip->tx_iv16 >> 8) & 0xff, tkip->tx_iv16 & 0xff, (tkip->rx_iv32 >> 24) & 0xff, (tkip->rx_iv32 >> 16) & 0xff, (tkip->rx_iv32 >> 8) & 0xff, tkip->rx_iv32 & 0xff, (tkip->rx_iv16 >> 8) & 0xff, tkip->rx_iv16 & 0xff, tkip->dot11RSNAStatsTKIPReplays, tkip->dot11RSNAStatsTKIPICVErrors, tkip->dot11RSNAStatsTKIPLocalMICFailures); return p; } static struct lib80211_crypto_ops rtllib_crypt_tkip = { .name = "R-TKIP", .init = rtllib_tkip_init, .deinit = rtllib_tkip_deinit, .encrypt_mpdu = rtllib_tkip_encrypt, .decrypt_mpdu = rtllib_tkip_decrypt, .encrypt_msdu = rtllib_michael_mic_add, .decrypt_msdu = rtllib_michael_mic_verify, .set_key = rtllib_tkip_set_key, .get_key = rtllib_tkip_get_key, .print_stats = rtllib_tkip_print_stats, .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */ .extra_mpdu_postfix_len = 4, /* ICV */ .extra_msdu_postfix_len = 8, /* MIC */ .owner = THIS_MODULE, }; int __init rtllib_crypto_tkip_init(void) { return lib80211_register_crypto_ops(&rtllib_crypt_tkip); } void __exit rtllib_crypto_tkip_exit(void) { lib80211_unregister_crypto_ops(&rtllib_crypt_tkip); } module_init(rtllib_crypto_tkip_init); module_exit(rtllib_crypto_tkip_exit); MODULE_LICENSE("GPL");
gpl-2.0
omnirom/android_kernel_xiaomi_aries
drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
5238
10925
/* * Host AP crypt: host-based CCMP encryption implementation for Host AP driver * * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <linux/string.h> #include <linux/wireless.h> #include "rtllib.h" #include <linux/crypto.h> #include <linux/scatterlist.h> #define AES_BLOCK_LEN 16 #define CCMP_HDR_LEN 8 #define CCMP_MIC_LEN 8 #define CCMP_TK_LEN 16 #define CCMP_PN_LEN 6 struct rtllib_ccmp_data { u8 key[CCMP_TK_LEN]; int key_set; u8 tx_pn[CCMP_PN_LEN]; u8 rx_pn[CCMP_PN_LEN]; u32 dot11RSNAStatsCCMPFormatErrors; u32 dot11RSNAStatsCCMPReplays; u32 dot11RSNAStatsCCMPDecryptErrors; int key_idx; struct crypto_tfm *tfm; /* scratch buffers for virt_to_page() (crypto API) */ u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN]; u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; }; static void rtllib_ccmp_aes_encrypt(struct crypto_tfm *tfm, const u8 pt[16], u8 ct[16]) { crypto_cipher_encrypt_one((void *)tfm, ct, pt); } static void *rtllib_ccmp_init(int key_idx) { struct rtllib_ccmp_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = key_idx; priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tfm)) { printk(KERN_DEBUG "rtllib_crypt_ccmp: could not allocate " "crypto API aes\n"); priv->tfm = NULL; goto fail; } return priv; fail: if (priv) { if (priv->tfm) crypto_free_cipher((void *)priv->tfm); kfree(priv); } return NULL; } static void rtllib_ccmp_deinit(void *priv) { struct rtllib_ccmp_data *_priv = priv; if (_priv && _priv->tfm) crypto_free_cipher((void *)_priv->tfm); kfree(priv); } static inline void xor_block(u8 *b, u8 *a, size_t len) { int i; for (i = 0; i < len; i++) b[i] ^= a[i]; } static void ccmp_init_blocks(struct crypto_tfm *tfm, struct rtllib_hdr_4addr *hdr, u8 *pn, size_t dlen, u8 *b0, u8 *auth, u8 *s0) { u8 *pos, qc = 0; size_t aad_len; u16 fc; int a4_included, qc_included; u8 aad[2 * AES_BLOCK_LEN]; fc = le16_to_cpu(hdr->frame_ctl); a4_included = ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) == (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)); /* qc_included = ((WLAN_FC_GET_TYPE(fc) == RTLLIB_FTYPE_DATA) && (WLAN_FC_GET_STYPE(fc) & 0x08)); */ qc_included = ((WLAN_FC_GET_TYPE(fc) == RTLLIB_FTYPE_DATA) && (WLAN_FC_GET_STYPE(fc) & 0x80)); aad_len = 22; if (a4_included) aad_len += 6; if (qc_included) { pos = (u8 *) &hdr->addr4; if (a4_included) pos += 6; qc = *pos & 0x0f; aad_len += 2; } /* CCM Initial Block: * Flag (Include authentication header, M=3 (8-octet MIC), * L=1 (2-octet Dlen)) * Nonce: 0x00 | A2 | PN * Dlen */ b0[0] = 0x59; b0[1] = qc; memcpy(b0 + 2, hdr->addr2, ETH_ALEN); memcpy(b0 + 8, pn, CCMP_PN_LEN); b0[14] = (dlen >> 8) & 0xff; b0[15] = dlen & 0xff; /* AAD: * FC with bits 4..6 and 11..13 masked to zero; 14 is always one * A1 | A2 | A3 * SC with bits 4..15 (seq#) masked to zero * A4 (if present) * QC (if present) */ pos = (u8 *) hdr; aad[0] = 0; /* aad_len >> 8 */ aad[1] = aad_len & 0xff; aad[2] = pos[0] & 0x8f; aad[3] = pos[1] & 0xc7; memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN); pos = (u8 *) &hdr->seq_ctl; aad[22] = pos[0] & 0x0f; aad[23] = 0; /* all bits masked */ memset(aad + 24, 0, 8); if (a4_included) memcpy(aad + 24, hdr->addr4, ETH_ALEN); if (qc_included) { aad[a4_included ? 30 : 24] = qc; /* rest of QC masked */ } /* Start with the first block and AAD */ rtllib_ccmp_aes_encrypt(tfm, b0, auth); xor_block(auth, aad, AES_BLOCK_LEN); rtllib_ccmp_aes_encrypt(tfm, auth, auth); xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN); rtllib_ccmp_aes_encrypt(tfm, auth, auth); b0[0] &= 0x07; b0[14] = b0[15] = 0; rtllib_ccmp_aes_encrypt(tfm, b0, s0); } static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct rtllib_ccmp_data *key = priv; int data_len, i; u8 *pos; struct rtllib_hdr_4addr *hdr; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); if (skb_headroom(skb) < CCMP_HDR_LEN || skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) return -1; data_len = skb->len - hdr_len; pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdr_len); pos += hdr_len; i = CCMP_PN_LEN - 1; while (i >= 0) { key->tx_pn[i]++; if (key->tx_pn[i] != 0) break; i--; } *pos++ = key->tx_pn[5]; *pos++ = key->tx_pn[4]; *pos++ = 0; *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */; *pos++ = key->tx_pn[3]; *pos++ = key->tx_pn[2]; *pos++ = key->tx_pn[1]; *pos++ = key->tx_pn[0]; hdr = (struct rtllib_hdr_4addr *) skb->data; if (!tcb_desc->bHwSec) { int blocks, last, len; u8 *mic; u8 *b0 = key->tx_b0; u8 *b = key->tx_b; u8 *e = key->tx_e; u8 *s0 = key->tx_s0; mic = skb_put(skb, CCMP_MIC_LEN); ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { len = (i == blocks && last) ? last : AES_BLOCK_LEN; /* Authentication */ xor_block(b, pos, len); rtllib_ccmp_aes_encrypt(key->tfm, b, b); /* Encryption, with counter */ b0[14] = (i >> 8) & 0xff; b0[15] = i & 0xff; rtllib_ccmp_aes_encrypt(key->tfm, b0, e); xor_block(pos, e, len); pos += len; } for (i = 0; i < CCMP_MIC_LEN; i++) mic[i] = b[i] ^ s0[i]; } return 0; } static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct rtllib_ccmp_data *key = priv; u8 keyidx, *pos; struct rtllib_hdr_4addr *hdr; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); u8 pn[6]; if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) { key->dot11RSNAStatsCCMPFormatErrors++; return -1; } hdr = (struct rtllib_hdr_4addr *) skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: received packet without ExtIV" " flag from %pM\n", hdr->addr2); } key->dot11RSNAStatsCCMPFormatErrors++; return -2; } keyidx >>= 6; if (key->key_idx != keyidx) { printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame " "keyidx=%d priv=%p\n", key->key_idx, keyidx, priv); return -6; } if (!key->key_set) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: received packet from %pM" " with keyid=%d that does not have a configured" " key\n", hdr->addr2, keyidx); } return -3; } pn[0] = pos[7]; pn[1] = pos[6]; pn[2] = pos[5]; pn[3] = pos[4]; pn[4] = pos[1]; pn[5] = pos[0]; pos += 8; if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) { key->dot11RSNAStatsCCMPReplays++; return -4; } if (!tcb_desc->bHwSec) { size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN; u8 *mic = skb->data + skb->len - CCMP_MIC_LEN; u8 *b0 = key->rx_b0; u8 *b = key->rx_b; u8 *a = key->rx_a; int i, blocks, last, len; ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); xor_block(mic, b, CCMP_MIC_LEN); blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { len = (i == blocks && last) ? last : AES_BLOCK_LEN; /* Decrypt, with counter */ b0[14] = (i >> 8) & 0xff; b0[15] = i & 0xff; rtllib_ccmp_aes_encrypt(key->tfm, b0, b); xor_block(pos, b, len); /* Authentication */ xor_block(a, pos, len); rtllib_ccmp_aes_encrypt(key->tfm, a, a); pos += len; } if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: decrypt failed: STA=" " %pM\n", hdr->addr2); } key->dot11RSNAStatsCCMPDecryptErrors++; return -5; } memcpy(key->rx_pn, pn, CCMP_PN_LEN); } /* Remove hdr and MIC */ memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len); skb_pull(skb, CCMP_HDR_LEN); skb_trim(skb, skb->len - CCMP_MIC_LEN); return keyidx; } static int rtllib_ccmp_set_key(void *key, int len, u8 *seq, void *priv) { struct rtllib_ccmp_data *data = priv; int keyidx; struct crypto_tfm *tfm = data->tfm; keyidx = data->key_idx; memset(data, 0, sizeof(*data)); data->key_idx = keyidx; data->tfm = tfm; if (len == CCMP_TK_LEN) { memcpy(data->key, key, CCMP_TK_LEN); data->key_set = 1; if (seq) { data->rx_pn[0] = seq[5]; data->rx_pn[1] = seq[4]; data->rx_pn[2] = seq[3]; data->rx_pn[3] = seq[2]; data->rx_pn[4] = seq[1]; data->rx_pn[5] = seq[0]; } crypto_cipher_setkey((void *)data->tfm, data->key, CCMP_TK_LEN); } else if (len == 0) data->key_set = 0; else return -1; return 0; } static int rtllib_ccmp_get_key(void *key, int len, u8 *seq, void *priv) { struct rtllib_ccmp_data *data = priv; if (len < CCMP_TK_LEN) return -1; if (!data->key_set) return 0; memcpy(key, data->key, CCMP_TK_LEN); if (seq) { seq[0] = data->tx_pn[5]; seq[1] = data->tx_pn[4]; seq[2] = data->tx_pn[3]; seq[3] = data->tx_pn[2]; seq[4] = data->tx_pn[1]; seq[5] = data->tx_pn[0]; } return CCMP_TK_LEN; } static char *rtllib_ccmp_print_stats(char *p, void *priv) { struct rtllib_ccmp_data *ccmp = priv; p += sprintf(p, "key[%d] alg=CCMP key_set=%d " "tx_pn=%pM rx_pn=%pM " "format_errors=%d replays=%d decrypt_errors=%d\n", ccmp->key_idx, ccmp->key_set, ccmp->tx_pn, ccmp->rx_pn, ccmp->dot11RSNAStatsCCMPFormatErrors, ccmp->dot11RSNAStatsCCMPReplays, ccmp->dot11RSNAStatsCCMPDecryptErrors); return p; } static struct lib80211_crypto_ops rtllib_crypt_ccmp = { .name = "R-CCMP", .init = rtllib_ccmp_init, .deinit = rtllib_ccmp_deinit, .encrypt_mpdu = rtllib_ccmp_encrypt, .decrypt_mpdu = rtllib_ccmp_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = rtllib_ccmp_set_key, .get_key = rtllib_ccmp_get_key, .print_stats = rtllib_ccmp_print_stats, .extra_mpdu_prefix_len = CCMP_HDR_LEN, .extra_mpdu_postfix_len = CCMP_MIC_LEN, .owner = THIS_MODULE, }; int __init rtllib_crypto_ccmp_init(void) { return lib80211_register_crypto_ops(&rtllib_crypt_ccmp); } void __exit rtllib_crypto_ccmp_exit(void) { lib80211_unregister_crypto_ops(&rtllib_crypt_ccmp); } module_init(rtllib_crypto_ccmp_init); module_exit(rtllib_crypto_ccmp_exit); MODULE_LICENSE("GPL");
gpl-2.0
F4uzan/lge-kernel-lproj
drivers/net/wireless/iwlegacy/3945-debug.c
7542
19561
/****************************************************************************** * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #include "common.h" #include "3945.h" static int il3945_stats_flag(struct il_priv *il, char *buf, int bufsz) { int p = 0; p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", le32_to_cpu(il->_3945.stats.flag)); if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK) p += scnprintf(buf + p, bufsz - p, "\tStatistics have been cleared\n"); p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz"); p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled"); return p; } ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = sizeof(struct iwl39_stats_rx_phy) * 40 + sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400; ssize_t ret; struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; struct iwl39_stats_rx_non_phy *general, *accum_general; struct iwl39_stats_rx_non_phy *delta_general, *max_general; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* * The statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ ofdm = &il->_3945.stats.rx.ofdm; cck = &il->_3945.stats.rx.cck; general = &il->_3945.stats.rx.general; accum_ofdm = &il->_3945.accum_stats.rx.ofdm; accum_cck = &il->_3945.accum_stats.rx.cck; accum_general = &il->_3945.accum_stats.rx.general; delta_ofdm = &il->_3945.delta_stats.rx.ofdm; delta_cck = &il->_3945.delta_stats.rx.cck; delta_general = &il->_3945.delta_stats.rx.general; max_ofdm = &il->_3945.max_delta.rx.ofdm; max_cck = &il->_3945.max_delta.rx.cck; max_general = &il->_3945.max_delta.rx.general; pos += il3945_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Rx - OFDM:"); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "ina_cnt:", le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt, delta_ofdm->ina_cnt, max_ofdm->ina_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_cnt:", le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, delta_ofdm->fina_cnt, max_ofdm->fina_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "plcp_err:", le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, delta_ofdm->plcp_err, max_ofdm->plcp_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "crc32_err:", le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, delta_ofdm->crc32_err, max_ofdm->crc32_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "overrun_err:", le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err, delta_ofdm->overrun_err, max_ofdm->overrun_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "early_overrun_err:", le32_to_cpu(ofdm->early_overrun_err), accum_ofdm->early_overrun_err, delta_ofdm->early_overrun_err, max_ofdm->early_overrun_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "crc32_good:", le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good, delta_ofdm->crc32_good, max_ofdm->crc32_good); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", le32_to_cpu(ofdm->false_alarm_cnt), accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt, max_ofdm->false_alarm_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:", le32_to_cpu(ofdm->fina_sync_err_cnt), accum_ofdm->fina_sync_err_cnt, delta_ofdm->fina_sync_err_cnt, max_ofdm->fina_sync_err_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sfd_timeout:", le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_timeout:", le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, max_ofdm->fina_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "unresponded_rts:", le32_to_cpu(ofdm->unresponded_rts), accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts, max_ofdm->unresponded_rts); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "rxe_frame_lmt_ovrun:", le32_to_cpu(ofdm->rxe_frame_limit_overrun), accum_ofdm->rxe_frame_limit_overrun, delta_ofdm->rxe_frame_limit_overrun, max_ofdm->rxe_frame_limit_overrun); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Rx - CCK:"); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "ina_cnt:", le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, delta_cck->ina_cnt, max_cck->ina_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_cnt:", le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, delta_cck->fina_cnt, max_cck->fina_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "plcp_err:", le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, delta_cck->plcp_err, max_cck->plcp_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "crc32_err:", le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, delta_cck->crc32_err, max_cck->crc32_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "overrun_err:", le32_to_cpu(cck->overrun_err), accum_cck->overrun_err, delta_cck->overrun_err, max_cck->overrun_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "early_overrun_err:", le32_to_cpu(cck->early_overrun_err), accum_cck->early_overrun_err, delta_cck->early_overrun_err, max_cck->early_overrun_err); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "crc32_good:", le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, delta_cck->crc32_good, max_cck->crc32_good); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", le32_to_cpu(cck->false_alarm_cnt), accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:", le32_to_cpu(cck->fina_sync_err_cnt), accum_cck->fina_sync_err_cnt, delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sfd_timeout:", le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout, delta_cck->sfd_timeout, max_cck->sfd_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "fina_timeout:", le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout, delta_cck->fina_timeout, max_cck->fina_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "unresponded_rts:", le32_to_cpu(cck->unresponded_rts), accum_cck->unresponded_rts, delta_cck->unresponded_rts, max_cck->unresponded_rts); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "rxe_frame_lmt_ovrun:", le32_to_cpu(cck->rxe_frame_limit_overrun), accum_cck->rxe_frame_limit_overrun, delta_cck->rxe_frame_limit_overrun, max_cck->rxe_frame_limit_overrun); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Rx - GENERAL:"); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "bogus_cts:", le32_to_cpu(general->bogus_cts), accum_general->bogus_cts, delta_general->bogus_cts, max_general->bogus_cts); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "bogus_ack:", le32_to_cpu(general->bogus_ack), accum_general->bogus_ack, delta_general->bogus_ack, max_general->bogus_ack); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:", le32_to_cpu(general->non_bssid_frames), accum_general->non_bssid_frames, delta_general->non_bssid_frames, max_general->non_bssid_frames); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "filtered_frames:", le32_to_cpu(general->filtered_frames), accum_general->filtered_frames, delta_general->filtered_frames, max_general->filtered_frames); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "non_channel_beacons:", le32_to_cpu(general->non_channel_beacons), accum_general->non_channel_beacons, delta_general->non_channel_beacons, max_general->non_channel_beacons); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250; ssize_t ret; struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* * The statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ tx = &il->_3945.stats.tx; accum_tx = &il->_3945.accum_stats.tx; delta_tx = &il->_3945.delta_stats.tx; max_tx = &il->_3945.max_delta.tx; pos += il3945_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Tx:"); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "preamble:", le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt, delta_tx->preamble_cnt, max_tx->preamble_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:", le32_to_cpu(tx->rx_detected_cnt), accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:", le32_to_cpu(tx->bt_prio_defer_cnt), accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt, max_tx->bt_prio_defer_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:", le32_to_cpu(tx->bt_prio_kill_cnt), accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt, max_tx->bt_prio_kill_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:", le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt, delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "cts_timeout:", le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, delta_tx->cts_timeout, max_tx->cts_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "ack_timeout:", le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout, delta_tx->ack_timeout, max_tx->ack_timeout); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:", le32_to_cpu(tx->expected_ack_cnt), accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt, max_tx->expected_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:", le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt, delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } ssize_t il3945_ucode_general_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300; ssize_t ret; struct iwl39_stats_general *general, *accum_general; struct iwl39_stats_general *delta_general, *max_general; struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* * The statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ general = &il->_3945.stats.general; dbg = &il->_3945.stats.general.dbg; div = &il->_3945.stats.general.div; accum_general = &il->_3945.accum_stats.general; delta_general = &il->_3945.delta_stats.general; max_general = &il->_3945.max_delta.general; accum_dbg = &il->_3945.accum_stats.general.dbg; delta_dbg = &il->_3945.delta_stats.general.dbg; max_dbg = &il->_3945.max_delta.general.dbg; accum_div = &il->_3945.accum_stats.general.div; delta_div = &il->_3945.delta_stats.general.div; max_div = &il->_3945.max_delta.general.div; pos += il3945_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_General:"); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "burst_check:", le32_to_cpu(dbg->burst_check), accum_dbg->burst_check, delta_dbg->burst_check, max_dbg->burst_check); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "burst_count:", le32_to_cpu(dbg->burst_count), accum_dbg->burst_count, delta_dbg->burst_count, max_dbg->burst_count); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sleep_time:", le32_to_cpu(general->sleep_time), accum_general->sleep_time, delta_general->sleep_time, max_general->sleep_time); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "slots_out:", le32_to_cpu(general->slots_out), accum_general->slots_out, delta_general->slots_out, max_general->slots_out); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "slots_idle:", le32_to_cpu(general->slots_idle), accum_general->slots_idle, delta_general->slots_idle, max_general->slots_idle); pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n", le32_to_cpu(general->ttl_timestamp)); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "tx_on_a:", le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, delta_div->tx_on_a, max_div->tx_on_a); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "tx_on_b:", le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, delta_div->tx_on_b, max_div->tx_on_b); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "exec_time:", le32_to_cpu(div->exec_time), accum_div->exec_time, delta_div->exec_time, max_div->exec_time); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "probe_time:", le32_to_cpu(div->probe_time), accum_div->probe_time, delta_div->probe_time, max_div->probe_time); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } const struct il_debugfs_ops il3945_debugfs_ops = { .rx_stats_read = il3945_ucode_rx_stats_read, .tx_stats_read = il3945_ucode_tx_stats_read, .general_stats_read = il3945_ucode_general_stats_read, };
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_cyanogen_msm8916
arch/arm/mach-w90x900/irq.c
8822
4136
/* * linux/arch/arm/mach-w90x900/irq.c * * based on linux/arch/arm/plat-s3c24xx/irq.c by Ben Dooks * * Copyright (c) 2008 Nuvoton technology corporation * All rights reserved. * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/ptrace.h> #include <linux/device.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/regs-irq.h> #include "nuc9xx.h" struct group_irq { unsigned long gpen; unsigned int enabled; void (*enable)(struct group_irq *, int enable); }; static DEFINE_SPINLOCK(groupirq_lock); #define DEFINE_GROUP(_name, _ctrlbit, _num) \ struct group_irq group_##_name = { \ .enable = nuc900_group_enable, \ .gpen = ((1 << _num) - 1) << _ctrlbit, \ } static void nuc900_group_enable(struct group_irq *gpirq, int enable); static DEFINE_GROUP(nirq0, 0, 4); static DEFINE_GROUP(nirq1, 4, 4); static DEFINE_GROUP(usbh, 8, 2); static DEFINE_GROUP(ottimer, 16, 3); static DEFINE_GROUP(gdma, 20, 2); static DEFINE_GROUP(sc, 24, 2); static DEFINE_GROUP(i2c, 26, 2); static DEFINE_GROUP(ps2, 28, 2); static int group_irq_enable(struct group_irq *group_irq) { unsigned long flags; spin_lock_irqsave(&groupirq_lock, flags); if (group_irq->enabled++ == 0) (group_irq->enable)(group_irq, 1); spin_unlock_irqrestore(&groupirq_lock, flags); return 0; } static void group_irq_disable(struct group_irq *group_irq) { unsigned long flags; WARN_ON(group_irq->enabled == 0); spin_lock_irqsave(&groupirq_lock, flags); if (--group_irq->enabled == 0) (group_irq->enable)(group_irq, 0); spin_unlock_irqrestore(&groupirq_lock, flags); } static void nuc900_group_enable(struct group_irq *gpirq, int enable) { unsigned int groupen = gpirq->gpen; unsigned long regval; regval = __raw_readl(REG_AIC_GEN); if (enable) regval |= groupen; else regval &= ~groupen; __raw_writel(regval, REG_AIC_GEN); } static void nuc900_irq_mask(struct irq_data *d) { struct group_irq *group_irq; group_irq = NULL; __raw_writel(1 << d->irq, REG_AIC_MDCR); switch (d->irq) { case IRQ_GROUP0: group_irq = &group_nirq0; break; case IRQ_GROUP1: group_irq = &group_nirq1; break; case IRQ_USBH: group_irq = &group_usbh; break; case IRQ_T_INT_GROUP: group_irq = &group_ottimer; break; case IRQ_GDMAGROUP: group_irq = &group_gdma; break; case IRQ_SCGROUP: group_irq = &group_sc; break; case IRQ_I2CGROUP: group_irq = &group_i2c; break; case IRQ_P2SGROUP: group_irq = &group_ps2; break; } if (group_irq) group_irq_disable(group_irq); } /* * By the w90p910 spec,any irq,only write 1 * to REG_AIC_EOSCR for ACK */ static void nuc900_irq_ack(struct irq_data *d) { __raw_writel(0x01, REG_AIC_EOSCR); } static void nuc900_irq_unmask(struct irq_data *d) { struct group_irq *group_irq; group_irq = NULL; __raw_writel(1 << d->irq, REG_AIC_MECR); switch (d->irq) { case IRQ_GROUP0: group_irq = &group_nirq0; break; case IRQ_GROUP1: group_irq = &group_nirq1; break; case IRQ_USBH: group_irq = &group_usbh; break; case IRQ_T_INT_GROUP: group_irq = &group_ottimer; break; case IRQ_GDMAGROUP: group_irq = &group_gdma; break; case IRQ_SCGROUP: group_irq = &group_sc; break; case IRQ_I2CGROUP: group_irq = &group_i2c; break; case IRQ_P2SGROUP: group_irq = &group_ps2; break; } if (group_irq) group_irq_enable(group_irq); } static struct irq_chip nuc900_irq_chip = { .irq_ack = nuc900_irq_ack, .irq_mask = nuc900_irq_mask, .irq_unmask = nuc900_irq_unmask, }; void __init nuc900_init_irq(void) { int irqno; __raw_writel(0xFFFFFFFE, REG_AIC_MDCR); for (irqno = IRQ_WDT; irqno <= IRQ_ADC; irqno++) { irq_set_chip_and_handler(irqno, &nuc900_irq_chip, handle_level_irq); set_irq_flags(irqno, IRQF_VALID); } }
gpl-2.0
luca020400/android_kernel_motorola_msm8226
drivers/char/nvram.c
8822
17468
/* * CMOS/NV-RAM driver for Linux * * Copyright (C) 1997 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> * idea by and with help from Richard Jelinek <rj@suse.de> * Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) * * This driver allows you to access the contents of the non-volatile memory in * the mc146818rtc.h real-time clock. This chip is built into all PCs and into * many Atari machines. In the former it's called "CMOS-RAM", in the latter * "NVRAM" (NV stands for non-volatile). * * The data are supplied as a (seekable) character device, /dev/nvram. The * size of this file is dependent on the controller. The usual size is 114, * the number of freely available bytes in the memory (i.e., not used by the * RTC itself). * * Checksums over the NVRAM contents are managed by this driver. In case of a * bad checksum, reads and writes return -EIO. The checksum can be initialized * to a sane state either by ioctl(NVRAM_INIT) (clear whole NVRAM) or * ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid * again; use with care!) * * This file also provides some functions for other parts of the kernel that * want to access the NVRAM: nvram_{read,write,check_checksum,set_checksum}. * Obviously this can be used only if this driver is always configured into * the kernel and is not a module. Since the functions are used by some Atari * drivers, this is the case on the Atari. * * * 1.1 Cesar Barros: SMP locking fixes * added changelog * 1.2 Erik Gilling: Cobalt Networks support * Tim Hockin: general cleanup, Cobalt support * 1.3 Wim Van Sebroeck: convert PRINT_PROC to seq_file */ #define NVRAM_VERSION "1.3" #include <linux/module.h> #include <linux/nvram.h> #define PC 1 #define ATARI 2 /* select machine configuration */ #if defined(CONFIG_ATARI) # define MACH ATARI #elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) /* and ?? */ # define MACH PC #else # error Cannot build nvram driver for this machine configuration. #endif #if MACH == PC /* RTC in a PC */ #define CHECK_DRIVER_INIT() 1 /* On PCs, the checksum is built only over bytes 2..31 */ #define PC_CKS_RANGE_START 2 #define PC_CKS_RANGE_END 31 #define PC_CKS_LOC 32 #define NVRAM_BYTES (128-NVRAM_FIRST_BYTE) #define mach_check_checksum pc_check_checksum #define mach_set_checksum pc_set_checksum #define mach_proc_infos pc_proc_infos #endif #if MACH == ATARI /* Special parameters for RTC in Atari machines */ #include <asm/atarihw.h> #include <asm/atariints.h> #define RTC_PORT(x) (TT_RTC_BAS + 2*(x)) #define CHECK_DRIVER_INIT() (MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK)) #define NVRAM_BYTES 50 /* On Ataris, the checksum is over all bytes except the checksum bytes * themselves; these are at the very end */ #define ATARI_CKS_RANGE_START 0 #define ATARI_CKS_RANGE_END 47 #define ATARI_CKS_LOC 48 #define mach_check_checksum atari_check_checksum #define mach_set_checksum atari_set_checksum #define mach_proc_infos atari_proc_infos #endif /* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with * rtc_lock held. Due to the index-port/data-port design of the RTC, we * don't want two different things trying to get to it at once. (e.g. the * periodic 11 min sync from kernel/time/ntp.c vs. this driver.) */ #include <linux/types.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/mc146818rtc.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/mutex.h> static DEFINE_MUTEX(nvram_mutex); static DEFINE_SPINLOCK(nvram_state_lock); static int nvram_open_cnt; /* #times opened */ static int nvram_open_mode; /* special open modes */ #define NVRAM_WRITE 1 /* opened for writing (exclusive) */ #define NVRAM_EXCL 2 /* opened with O_EXCL */ static int mach_check_checksum(void); static void mach_set_checksum(void); #ifdef CONFIG_PROC_FS static void mach_proc_infos(unsigned char *contents, struct seq_file *seq, void *offset); #endif /* * These functions are provided to be called internally or by other parts of * the kernel. It's up to the caller to ensure correct checksum before reading * or after writing (needs to be done only once). * * It is worth noting that these functions all access bytes of general * purpose memory in the NVRAM - that is to say, they all add the * NVRAM_FIRST_BYTE offset. Pass them offsets into NVRAM as if you did not * know about the RTC cruft. */ unsigned char __nvram_read_byte(int i) { return CMOS_READ(NVRAM_FIRST_BYTE + i); } EXPORT_SYMBOL(__nvram_read_byte); unsigned char nvram_read_byte(int i) { unsigned long flags; unsigned char c; spin_lock_irqsave(&rtc_lock, flags); c = __nvram_read_byte(i); spin_unlock_irqrestore(&rtc_lock, flags); return c; } EXPORT_SYMBOL(nvram_read_byte); /* This races nicely with trying to read with checksum checking (nvram_read) */ void __nvram_write_byte(unsigned char c, int i) { CMOS_WRITE(c, NVRAM_FIRST_BYTE + i); } EXPORT_SYMBOL(__nvram_write_byte); void nvram_write_byte(unsigned char c, int i) { unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); __nvram_write_byte(c, i); spin_unlock_irqrestore(&rtc_lock, flags); } EXPORT_SYMBOL(nvram_write_byte); int __nvram_check_checksum(void) { return mach_check_checksum(); } EXPORT_SYMBOL(__nvram_check_checksum); int nvram_check_checksum(void) { unsigned long flags; int rv; spin_lock_irqsave(&rtc_lock, flags); rv = __nvram_check_checksum(); spin_unlock_irqrestore(&rtc_lock, flags); return rv; } EXPORT_SYMBOL(nvram_check_checksum); static void __nvram_set_checksum(void) { mach_set_checksum(); } #if 0 void nvram_set_checksum(void) { unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); __nvram_set_checksum(); spin_unlock_irqrestore(&rtc_lock, flags); } #endif /* 0 */ /* * The are the file operation function for user access to /dev/nvram */ static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) { switch (origin) { case 0: /* nothing to do */ break; case 1: offset += file->f_pos; break; case 2: offset += NVRAM_BYTES; break; default: return -EINVAL; } return (offset >= 0) ? (file->f_pos = offset) : -EINVAL; } static ssize_t nvram_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned char contents[NVRAM_BYTES]; unsigned i = *ppos; unsigned char *tmp; spin_lock_irq(&rtc_lock); if (!__nvram_check_checksum()) goto checksum_err; for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp) *tmp = __nvram_read_byte(i); spin_unlock_irq(&rtc_lock); if (copy_to_user(buf, contents, tmp - contents)) return -EFAULT; *ppos = i; return tmp - contents; checksum_err: spin_unlock_irq(&rtc_lock); return -EIO; } static ssize_t nvram_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned char contents[NVRAM_BYTES]; unsigned i = *ppos; unsigned char *tmp; if (i >= NVRAM_BYTES) return 0; /* Past EOF */ if (count > NVRAM_BYTES - i) count = NVRAM_BYTES - i; if (count > NVRAM_BYTES) return -EFAULT; /* Can't happen, but prove it to gcc */ if (copy_from_user(contents, buf, count)) return -EFAULT; spin_lock_irq(&rtc_lock); if (!__nvram_check_checksum()) goto checksum_err; for (tmp = contents; count--; ++i, ++tmp) __nvram_write_byte(*tmp, i); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); *ppos = i; return tmp - contents; checksum_err: spin_unlock_irq(&rtc_lock); return -EIO; } static long nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int i; switch (cmd) { case NVRAM_INIT: /* initialize NVRAM contents and checksum */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; mutex_lock(&nvram_mutex); spin_lock_irq(&rtc_lock); for (i = 0; i < NVRAM_BYTES; ++i) __nvram_write_byte(0, i); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); mutex_unlock(&nvram_mutex); return 0; case NVRAM_SETCKS: /* just set checksum, contents unchanged (maybe useful after * checksum garbaged somehow...) */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; mutex_lock(&nvram_mutex); spin_lock_irq(&rtc_lock); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); mutex_unlock(&nvram_mutex); return 0; default: return -ENOTTY; } } static int nvram_open(struct inode *inode, struct file *file) { spin_lock(&nvram_state_lock); if ((nvram_open_cnt && (file->f_flags & O_EXCL)) || (nvram_open_mode & NVRAM_EXCL) || ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) { spin_unlock(&nvram_state_lock); return -EBUSY; } if (file->f_flags & O_EXCL) nvram_open_mode |= NVRAM_EXCL; if (file->f_mode & FMODE_WRITE) nvram_open_mode |= NVRAM_WRITE; nvram_open_cnt++; spin_unlock(&nvram_state_lock); return 0; } static int nvram_release(struct inode *inode, struct file *file) { spin_lock(&nvram_state_lock); nvram_open_cnt--; /* if only one instance is open, clear the EXCL bit */ if (nvram_open_mode & NVRAM_EXCL) nvram_open_mode &= ~NVRAM_EXCL; if (file->f_mode & FMODE_WRITE) nvram_open_mode &= ~NVRAM_WRITE; spin_unlock(&nvram_state_lock); return 0; } #ifndef CONFIG_PROC_FS static int nvram_add_proc_fs(void) { return 0; } #else static int nvram_proc_read(struct seq_file *seq, void *offset) { unsigned char contents[NVRAM_BYTES]; int i = 0; spin_lock_irq(&rtc_lock); for (i = 0; i < NVRAM_BYTES; ++i) contents[i] = __nvram_read_byte(i); spin_unlock_irq(&rtc_lock); mach_proc_infos(contents, seq, offset); return 0; } static int nvram_proc_open(struct inode *inode, struct file *file) { return single_open(file, nvram_proc_read, NULL); } static const struct file_operations nvram_proc_fops = { .owner = THIS_MODULE, .open = nvram_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int nvram_add_proc_fs(void) { if (!proc_create("driver/nvram", 0, NULL, &nvram_proc_fops)) return -ENOMEM; return 0; } #endif /* CONFIG_PROC_FS */ static const struct file_operations nvram_fops = { .owner = THIS_MODULE, .llseek = nvram_llseek, .read = nvram_read, .write = nvram_write, .unlocked_ioctl = nvram_ioctl, .open = nvram_open, .release = nvram_release, }; static struct miscdevice nvram_dev = { NVRAM_MINOR, "nvram", &nvram_fops }; static int __init nvram_init(void) { int ret; /* First test whether the driver should init at all */ if (!CHECK_DRIVER_INIT()) return -ENODEV; ret = misc_register(&nvram_dev); if (ret) { printk(KERN_ERR "nvram: can't misc_register on minor=%d\n", NVRAM_MINOR); goto out; } ret = nvram_add_proc_fs(); if (ret) { printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n"); goto outmisc; } ret = 0; printk(KERN_INFO "Non-volatile memory driver v" NVRAM_VERSION "\n"); out: return ret; outmisc: misc_deregister(&nvram_dev); goto out; } static void __exit nvram_cleanup_module(void) { remove_proc_entry("driver/nvram", NULL); misc_deregister(&nvram_dev); } module_init(nvram_init); module_exit(nvram_cleanup_module); /* * Machine specific functions */ #if MACH == PC static int pc_check_checksum(void) { int i; unsigned short sum = 0; unsigned short expect; for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i) sum += __nvram_read_byte(i); expect = __nvram_read_byte(PC_CKS_LOC)<<8 | __nvram_read_byte(PC_CKS_LOC+1); return (sum & 0xffff) == expect; } static void pc_set_checksum(void) { int i; unsigned short sum = 0; for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i) sum += __nvram_read_byte(i); __nvram_write_byte(sum >> 8, PC_CKS_LOC); __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1); } #ifdef CONFIG_PROC_FS static char *floppy_types[] = { "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", "3.5'' 2.88M", "3.5'' 2.88M" }; static char *gfx_types[] = { "EGA, VGA, ... (with BIOS)", "CGA (40 cols)", "CGA (80 cols)", "monochrome", }; static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq, void *offset) { int checksum; int type; spin_lock_irq(&rtc_lock); checksum = __nvram_check_checksum(); spin_unlock_irq(&rtc_lock); seq_printf(seq, "Checksum status: %svalid\n", checksum ? "" : "not "); seq_printf(seq, "# floppies : %d\n", (nvram[6] & 1) ? (nvram[6] >> 6) + 1 : 0); seq_printf(seq, "Floppy 0 type : "); type = nvram[2] >> 4; if (type < ARRAY_SIZE(floppy_types)) seq_printf(seq, "%s\n", floppy_types[type]); else seq_printf(seq, "%d (unknown)\n", type); seq_printf(seq, "Floppy 1 type : "); type = nvram[2] & 0x0f; if (type < ARRAY_SIZE(floppy_types)) seq_printf(seq, "%s\n", floppy_types[type]); else seq_printf(seq, "%d (unknown)\n", type); seq_printf(seq, "HD 0 type : "); type = nvram[4] >> 4; if (type) seq_printf(seq, "%02x\n", type == 0x0f ? nvram[11] : type); else seq_printf(seq, "none\n"); seq_printf(seq, "HD 1 type : "); type = nvram[4] & 0x0f; if (type) seq_printf(seq, "%02x\n", type == 0x0f ? nvram[12] : type); else seq_printf(seq, "none\n"); seq_printf(seq, "HD type 48 data: %d/%d/%d C/H/S, precomp %d, lz %d\n", nvram[18] | (nvram[19] << 8), nvram[20], nvram[25], nvram[21] | (nvram[22] << 8), nvram[23] | (nvram[24] << 8)); seq_printf(seq, "HD type 49 data: %d/%d/%d C/H/S, precomp %d, lz %d\n", nvram[39] | (nvram[40] << 8), nvram[41], nvram[46], nvram[42] | (nvram[43] << 8), nvram[44] | (nvram[45] << 8)); seq_printf(seq, "DOS base memory: %d kB\n", nvram[7] | (nvram[8] << 8)); seq_printf(seq, "Extended memory: %d kB (configured), %d kB (tested)\n", nvram[9] | (nvram[10] << 8), nvram[34] | (nvram[35] << 8)); seq_printf(seq, "Gfx adapter : %s\n", gfx_types[(nvram[6] >> 4) & 3]); seq_printf(seq, "FPU : %sinstalled\n", (nvram[6] & 2) ? "" : "not "); return; } #endif #endif /* MACH == PC */ #if MACH == ATARI static int atari_check_checksum(void) { int i; unsigned char sum = 0; for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) sum += __nvram_read_byte(i); return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) && (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff)); } static void atari_set_checksum(void) { int i; unsigned char sum = 0; for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) sum += __nvram_read_byte(i); __nvram_write_byte(~sum, ATARI_CKS_LOC); __nvram_write_byte(sum, ATARI_CKS_LOC + 1); } #ifdef CONFIG_PROC_FS static struct { unsigned char val; char *name; } boot_prefs[] = { { 0x80, "TOS" }, { 0x40, "ASV" }, { 0x20, "NetBSD (?)" }, { 0x10, "Linux" }, { 0x00, "unspecified" } }; static char *languages[] = { "English (US)", "German", "French", "English (UK)", "Spanish", "Italian", "6 (undefined)", "Swiss (French)", "Swiss (German)" }; static char *dateformat[] = { "MM%cDD%cYY", "DD%cMM%cYY", "YY%cMM%cDD", "YY%cDD%cMM", "4 (undefined)", "5 (undefined)", "6 (undefined)", "7 (undefined)" }; static char *colors[] = { "2", "4", "16", "256", "65536", "??", "??", "??" }; static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq, void *offset) { int checksum = nvram_check_checksum(); int i; unsigned vmode; seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not "); seq_printf(seq, "Boot preference : "); for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) { if (nvram[1] == boot_prefs[i].val) { seq_printf(seq, "%s\n", boot_prefs[i].name); break; } } if (i < 0) seq_printf(seq, "0x%02x (undefined)\n", nvram[1]); seq_printf(seq, "SCSI arbitration : %s\n", (nvram[16] & 0x80) ? "on" : "off"); seq_printf(seq, "SCSI host ID : "); if (nvram[16] & 0x80) seq_printf(seq, "%d\n", nvram[16] & 7); else seq_printf(seq, "n/a\n"); /* the following entries are defined only for the Falcon */ if ((atari_mch_cookie >> 16) != ATARI_MCH_FALCON) return; seq_printf(seq, "OS language : "); if (nvram[6] < ARRAY_SIZE(languages)) seq_printf(seq, "%s\n", languages[nvram[6]]); else seq_printf(seq, "%u (undefined)\n", nvram[6]); seq_printf(seq, "Keyboard language: "); if (nvram[7] < ARRAY_SIZE(languages)) seq_printf(seq, "%s\n", languages[nvram[7]]); else seq_printf(seq, "%u (undefined)\n", nvram[7]); seq_printf(seq, "Date format : "); seq_printf(seq, dateformat[nvram[8] & 7], nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/'); seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12); seq_printf(seq, "Boot delay : "); if (nvram[10] == 0) seq_printf(seq, "default"); else seq_printf(seq, "%ds%s\n", nvram[10], nvram[10] < 8 ? ", no memory test" : ""); vmode = (nvram[14] << 8) || nvram[15]; seq_printf(seq, "Video mode : %s colors, %d columns, %s %s monitor\n", colors[vmode & 7], vmode & 8 ? 80 : 40, vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC"); seq_printf(seq, " %soverscan, compat. mode %s%s\n", vmode & 64 ? "" : "no ", vmode & 128 ? "on" : "off", vmode & 256 ? (vmode & 16 ? ", line doubling" : ", half screen") : ""); return; } #endif #endif /* MACH == ATARI */ MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(NVRAM_MINOR);
gpl-2.0
jbott/android_kernel_lge_hammerhead
fs/ocfs2/cluster/masklog.c
11126
3894
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2004, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/string.h> #include <asm/uaccess.h> #include "masklog.h" struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK); EXPORT_SYMBOL_GPL(mlog_and_bits); struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(0); EXPORT_SYMBOL_GPL(mlog_not_bits); static ssize_t mlog_mask_show(u64 mask, char *buf) { char *state; if (__mlog_test_u64(mask, mlog_and_bits)) state = "allow"; else if (__mlog_test_u64(mask, mlog_not_bits)) state = "deny"; else state = "off"; return snprintf(buf, PAGE_SIZE, "%s\n", state); } static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count) { if (!strnicmp(buf, "allow", 5)) { __mlog_set_u64(mask, mlog_and_bits); __mlog_clear_u64(mask, mlog_not_bits); } else if (!strnicmp(buf, "deny", 4)) { __mlog_set_u64(mask, mlog_not_bits); __mlog_clear_u64(mask, mlog_and_bits); } else if (!strnicmp(buf, "off", 3)) { __mlog_clear_u64(mask, mlog_not_bits); __mlog_clear_u64(mask, mlog_and_bits); } else return -EINVAL; return count; } struct mlog_attribute { struct attribute attr; u64 mask; }; #define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr) #define define_mask(_name) { \ .attr = { \ .name = #_name, \ .mode = S_IRUGO | S_IWUSR, \ }, \ .mask = ML_##_name, \ } static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { define_mask(TCP), define_mask(MSG), define_mask(SOCKET), define_mask(HEARTBEAT), define_mask(HB_BIO), define_mask(DLMFS), define_mask(DLM), define_mask(DLM_DOMAIN), define_mask(DLM_THREAD), define_mask(DLM_MASTER), define_mask(DLM_RECOVERY), define_mask(DLM_GLUE), define_mask(VOTE), define_mask(CONN), define_mask(QUORUM), define_mask(BASTS), define_mask(CLUSTER), define_mask(ERROR), define_mask(NOTICE), define_mask(KTHREAD), }; static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, }; static ssize_t mlog_show(struct kobject *obj, struct attribute *attr, char *buf) { struct mlog_attribute *mlog_attr = to_mlog_attr(attr); return mlog_mask_show(mlog_attr->mask, buf); } static ssize_t mlog_store(struct kobject *obj, struct attribute *attr, const char *buf, size_t count) { struct mlog_attribute *mlog_attr = to_mlog_attr(attr); return mlog_mask_store(mlog_attr->mask, buf, count); } static const struct sysfs_ops mlog_attr_ops = { .show = mlog_show, .store = mlog_store, }; static struct kobj_type mlog_ktype = { .default_attrs = mlog_attr_ptrs, .sysfs_ops = &mlog_attr_ops, }; static struct kset mlog_kset = { .kobj = {.ktype = &mlog_ktype}, }; int mlog_sys_init(struct kset *o2cb_kset) { int i = 0; while (mlog_attrs[i].attr.mode) { mlog_attr_ptrs[i] = &mlog_attrs[i].attr; i++; } mlog_attr_ptrs[i] = NULL; kobject_set_name(&mlog_kset.kobj, "logmask"); mlog_kset.kobj.kset = o2cb_kset; return kset_register(&mlog_kset); } void mlog_sys_shutdown(void) { kset_unregister(&mlog_kset); }
gpl-2.0
santod/KK_sense_kernel_htc_m7vzw
mm/percpu.c
375
36048
/* * mm/percpu.c - percpu memory allocator * * Copyright (C) 2009 SUSE Linux Products GmbH * Copyright (C) 2009 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * This is percpu allocator which can handle both static and dynamic * areas. Percpu areas are allocated in chunks. Each chunk is * consisted of boot-time determined number of units and the first * chunk is used for static percpu variables in the kernel image * (special boot time alloc/init handling necessary as these areas * need to be brought up before allocation services are running). * Unit grows as necessary and all units grow or shrink in unison. * When a chunk is filled up, another chunk is allocated. * * c0 c1 c2 * ------------------- ------------------- ------------ * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u * ------------------- ...... ------------------- .... ------------ * * Allocation is done in offset-size areas of single unit space. Ie, * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to * cpus. On NUMA, the mapping can be non-linear and even sparse. * Percpu access can be done by configuring percpu base registers * according to cpu to unit mapping and pcpu_unit_size. * * There are usually many small percpu allocations many of them being * as small as 4 bytes. The allocator organizes chunks into lists * according to free size and tries to allocate from the fullest one. * Each chunk keeps the maximum contiguous area size hint which is * guaranteed to be equal to or larger than the maximum contiguous * area in the chunk. This helps the allocator not to iterate the * chunk maps unnecessarily. * * Allocation state in each chunk is kept using an array of integers * on chunk->map. A positive value in the map represents a free * region and negative allocated. Allocation inside a chunk is done * by scanning this map sequentially and serving the first matching * entry. This is mostly copied from the percpu_modalloc() allocator. * Chunks can be determined from the address using the index field * in the page struct. The index field contains a pointer to the chunk. * * To use this allocator, arch code should do the followings. * * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate * regular address to percpu pointer and back if they need to be * different from the default * * - use pcpu_setup_first_chunk() during percpu area initialization to * setup the first chunk containing the kernel static percpu area */ #include <linux/bitmap.h> #include <linux/bootmem.h> #include <linux/err.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/pfn.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/kmemleak.h> #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/io.h> #define PCPU_SLOT_BASE_SHIFT 5 #define PCPU_DFL_MAP_ALLOC 16 #ifdef CONFIG_SMP #ifndef __addr_to_pcpu_ptr #define __addr_to_pcpu_ptr(addr) \ (void __percpu *)((unsigned long)(addr) - \ (unsigned long)pcpu_base_addr + \ (unsigned long)__per_cpu_start) #endif #ifndef __pcpu_ptr_to_addr #define __pcpu_ptr_to_addr(ptr) \ (void __force *)((unsigned long)(ptr) + \ (unsigned long)pcpu_base_addr - \ (unsigned long)__per_cpu_start) #endif #else #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) #endif struct pcpu_chunk { struct list_head list; int free_size; int contig_hint; void *base_addr; int map_used; int map_alloc; int *map; void *data; bool immutable; unsigned long populated[]; }; static int pcpu_unit_pages __read_mostly; static int pcpu_unit_size __read_mostly; static int pcpu_nr_units __read_mostly; static int pcpu_atom_size __read_mostly; static int pcpu_nr_slots __read_mostly; static size_t pcpu_chunk_struct_size __read_mostly; static unsigned int pcpu_low_unit_cpu __read_mostly; static unsigned int pcpu_high_unit_cpu __read_mostly; void *pcpu_base_addr __read_mostly; EXPORT_SYMBOL_GPL(pcpu_base_addr); static const int *pcpu_unit_map __read_mostly; const unsigned long *pcpu_unit_offsets __read_mostly; static int pcpu_nr_groups __read_mostly; static const unsigned long *pcpu_group_offsets __read_mostly; static const size_t *pcpu_group_sizes __read_mostly; static struct pcpu_chunk *pcpu_first_chunk; static struct pcpu_chunk *pcpu_reserved_chunk; static int pcpu_reserved_chunk_limit; static DEFINE_MUTEX(pcpu_alloc_mutex); static DEFINE_SPINLOCK(pcpu_lock); static struct list_head *pcpu_slot __read_mostly; static void pcpu_reclaim(struct work_struct *work); static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); static bool pcpu_addr_in_first_chunk(void *addr) { void *first_start = pcpu_first_chunk->base_addr; return addr >= first_start && addr < first_start + pcpu_unit_size; } static bool pcpu_addr_in_reserved_chunk(void *addr) { void *first_start = pcpu_first_chunk->base_addr; return addr >= first_start && addr < first_start + pcpu_reserved_chunk_limit; } static int __pcpu_size_to_slot(int size) { int highbit = fls(size); return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); } static int pcpu_size_to_slot(int size) { if (size == pcpu_unit_size) return pcpu_nr_slots - 1; return __pcpu_size_to_slot(size); } static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) { if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) return 0; return pcpu_size_to_slot(chunk->free_size); } static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) { page->index = (unsigned long)pcpu; } static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) { return (struct pcpu_chunk *)page->index; } static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) { return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; } static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) { return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); } static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) { *rs = find_next_zero_bit(chunk->populated, end, *rs); *re = find_next_bit(chunk->populated, end, *rs + 1); } static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) { *rs = find_next_bit(chunk->populated, end, *rs); *re = find_next_zero_bit(chunk->populated, end, *rs + 1); } #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ (rs) < (re); \ (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ (rs) < (re); \ (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) static void *pcpu_mem_zalloc(size_t size) { if (WARN_ON_ONCE(!slab_is_available())) return NULL; if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); else return vzalloc(size); } static void pcpu_mem_free(void *ptr, size_t size) { if (size <= PAGE_SIZE) kfree(ptr); else vfree(ptr); } static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) { int nslot = pcpu_chunk_slot(chunk); if (chunk != pcpu_reserved_chunk && oslot != nslot) { if (oslot < nslot) list_move(&chunk->list, &pcpu_slot[nslot]); else list_move_tail(&chunk->list, &pcpu_slot[nslot]); } } static int pcpu_need_to_extend(struct pcpu_chunk *chunk) { int new_alloc; if (chunk->map_alloc >= chunk->map_used + 2) return 0; new_alloc = PCPU_DFL_MAP_ALLOC; while (new_alloc < chunk->map_used + 2) new_alloc *= 2; return new_alloc; } static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) { int *old = NULL, *new = NULL; size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); unsigned long flags; new = pcpu_mem_zalloc(new_size); if (!new) return -ENOMEM; spin_lock_irqsave(&pcpu_lock, flags); if (new_alloc <= chunk->map_alloc) goto out_unlock; old_size = chunk->map_alloc * sizeof(chunk->map[0]); old = chunk->map; memcpy(new, old, old_size); chunk->map_alloc = new_alloc; chunk->map = new; new = NULL; out_unlock: spin_unlock_irqrestore(&pcpu_lock, flags); pcpu_mem_free(old, old_size); pcpu_mem_free(new, new_size); return 0; } static void pcpu_split_block(struct pcpu_chunk *chunk, int i, int head, int tail) { int nr_extra = !!head + !!tail; BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); memmove(&chunk->map[i + nr_extra], &chunk->map[i], sizeof(chunk->map[0]) * (chunk->map_used - i)); chunk->map_used += nr_extra; if (head) { chunk->map[i + 1] = chunk->map[i] - head; chunk->map[i++] = head; } if (tail) { chunk->map[i++] -= tail; chunk->map[i] = tail; } } static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) { int oslot = pcpu_chunk_slot(chunk); int max_contig = 0; int i, off; for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { bool is_last = i + 1 == chunk->map_used; int head, tail; head = ALIGN(off, align) - off; BUG_ON(i == 0 && head != 0); if (chunk->map[i] < 0) continue; if (chunk->map[i] < head + size) { max_contig = max(chunk->map[i], max_contig); continue; } if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { if (chunk->map[i - 1] > 0) chunk->map[i - 1] += head; else { chunk->map[i - 1] -= head; chunk->free_size -= head; } chunk->map[i] -= head; off += head; head = 0; } tail = chunk->map[i] - head - size; if (tail < sizeof(int)) tail = 0; if (head || tail) { pcpu_split_block(chunk, i, head, tail); if (head) { i++; off += head; max_contig = max(chunk->map[i - 1], max_contig); } if (tail) max_contig = max(chunk->map[i + 1], max_contig); } if (is_last) chunk->contig_hint = max_contig; else chunk->contig_hint = max(chunk->contig_hint, max_contig); chunk->free_size -= chunk->map[i]; chunk->map[i] = -chunk->map[i]; pcpu_chunk_relocate(chunk, oslot); return off; } chunk->contig_hint = max_contig; pcpu_chunk_relocate(chunk, oslot); return -1; } static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) { int oslot = pcpu_chunk_slot(chunk); int i, off; for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) if (off == freeme) break; BUG_ON(off != freeme); BUG_ON(chunk->map[i] > 0); chunk->map[i] = -chunk->map[i]; chunk->free_size += chunk->map[i]; if (i > 0 && chunk->map[i - 1] >= 0) { chunk->map[i - 1] += chunk->map[i]; chunk->map_used--; memmove(&chunk->map[i], &chunk->map[i + 1], (chunk->map_used - i) * sizeof(chunk->map[0])); i--; } if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { chunk->map[i] += chunk->map[i + 1]; chunk->map_used--; memmove(&chunk->map[i + 1], &chunk->map[i + 2], (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); } chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); pcpu_chunk_relocate(chunk, oslot); } static struct pcpu_chunk *pcpu_alloc_chunk(void) { struct pcpu_chunk *chunk; chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); if (!chunk) return NULL; chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); if (!chunk->map) { kfree(chunk); return NULL; } chunk->map_alloc = PCPU_DFL_MAP_ALLOC; chunk->map[chunk->map_used++] = pcpu_unit_size; INIT_LIST_HEAD(&chunk->list); chunk->free_size = pcpu_unit_size; chunk->contig_hint = pcpu_unit_size; return chunk; } static void pcpu_free_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); kfree(chunk); } static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); static struct pcpu_chunk *pcpu_create_chunk(void); static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); static struct page *pcpu_addr_to_page(void *addr); static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); #ifdef CONFIG_NEED_PER_CPU_KM #include "percpu-km.c" #else #include "percpu-vm.c" #endif static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) { if (pcpu_addr_in_first_chunk(addr)) { if (pcpu_addr_in_reserved_chunk(addr)) return pcpu_reserved_chunk; return pcpu_first_chunk; } addr += pcpu_unit_offsets[raw_smp_processor_id()]; return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); } static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) { static int warn_limit = 10; struct pcpu_chunk *chunk; const char *err; int slot, off, new_alloc; unsigned long flags; void __percpu *ptr; if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { WARN(true, "illegal size (%zu) or align (%zu) for " "percpu allocation\n", size, align); return NULL; } mutex_lock(&pcpu_alloc_mutex); spin_lock_irqsave(&pcpu_lock, flags); if (reserved && pcpu_reserved_chunk) { chunk = pcpu_reserved_chunk; if (size > chunk->contig_hint) { err = "alloc from reserved chunk failed"; goto fail_unlock; } while ((new_alloc = pcpu_need_to_extend(chunk))) { spin_unlock_irqrestore(&pcpu_lock, flags); if (pcpu_extend_area_map(chunk, new_alloc) < 0) { err = "failed to extend area map of reserved chunk"; goto fail_unlock_mutex; } spin_lock_irqsave(&pcpu_lock, flags); } off = pcpu_alloc_area(chunk, size, align); if (off >= 0) goto area_found; err = "alloc from reserved chunk failed"; goto fail_unlock; } restart: for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { list_for_each_entry(chunk, &pcpu_slot[slot], list) { if (size > chunk->contig_hint) continue; new_alloc = pcpu_need_to_extend(chunk); if (new_alloc) { spin_unlock_irqrestore(&pcpu_lock, flags); if (pcpu_extend_area_map(chunk, new_alloc) < 0) { err = "failed to extend area map"; goto fail_unlock_mutex; } spin_lock_irqsave(&pcpu_lock, flags); goto restart; } off = pcpu_alloc_area(chunk, size, align); if (off >= 0) goto area_found; } } spin_unlock_irqrestore(&pcpu_lock, flags); chunk = pcpu_create_chunk(); if (!chunk) { err = "failed to allocate new chunk"; goto fail_unlock_mutex; } spin_lock_irqsave(&pcpu_lock, flags); pcpu_chunk_relocate(chunk, -1); goto restart; area_found: spin_unlock_irqrestore(&pcpu_lock, flags); if (pcpu_populate_chunk(chunk, off, size)) { spin_lock_irqsave(&pcpu_lock, flags); pcpu_free_area(chunk, off); err = "failed to populate"; goto fail_unlock; } mutex_unlock(&pcpu_alloc_mutex); ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); kmemleak_alloc_percpu(ptr, size); return ptr; fail_unlock: spin_unlock_irqrestore(&pcpu_lock, flags); fail_unlock_mutex: mutex_unlock(&pcpu_alloc_mutex); if (warn_limit) { pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " "%s\n", size, align, err); dump_stack(); if (!--warn_limit) pr_info("PERCPU: limit reached, disable warning\n"); } return NULL; } void __percpu *__alloc_percpu(size_t size, size_t align) { return pcpu_alloc(size, align, false); } EXPORT_SYMBOL_GPL(__alloc_percpu); void __percpu *__alloc_reserved_percpu(size_t size, size_t align) { return pcpu_alloc(size, align, true); } static void pcpu_reclaim(struct work_struct *work) { LIST_HEAD(todo); struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; struct pcpu_chunk *chunk, *next; mutex_lock(&pcpu_alloc_mutex); spin_lock_irq(&pcpu_lock); list_for_each_entry_safe(chunk, next, head, list) { WARN_ON(chunk->immutable); if (chunk == list_first_entry(head, struct pcpu_chunk, list)) continue; list_move(&chunk->list, &todo); } spin_unlock_irq(&pcpu_lock); list_for_each_entry_safe(chunk, next, &todo, list) { pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); pcpu_destroy_chunk(chunk); } mutex_unlock(&pcpu_alloc_mutex); } void free_percpu(void __percpu *ptr) { void *addr; struct pcpu_chunk *chunk; unsigned long flags; int off; if (!ptr) return; kmemleak_free_percpu(ptr); addr = __pcpu_ptr_to_addr(ptr); spin_lock_irqsave(&pcpu_lock, flags); chunk = pcpu_chunk_addr_search(addr); off = addr - chunk->base_addr; pcpu_free_area(chunk, off); if (chunk->free_size == pcpu_unit_size) { struct pcpu_chunk *pos; list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) if (pos != chunk) { schedule_work(&pcpu_reclaim_work); break; } } spin_unlock_irqrestore(&pcpu_lock, flags); } EXPORT_SYMBOL_GPL(free_percpu); bool is_kernel_percpu_address(unsigned long addr) { #ifdef CONFIG_SMP const size_t static_size = __per_cpu_end - __per_cpu_start; void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); unsigned int cpu; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); if ((void *)addr >= start && (void *)addr < start + static_size) return true; } #endif return false; } phys_addr_t per_cpu_ptr_to_phys(void *addr) { void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); bool in_first_chunk = false; unsigned long first_low, first_high; unsigned int cpu; first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, pcpu_unit_pages); if ((unsigned long)addr >= first_low && (unsigned long)addr < first_high) { for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); if (addr >= start && addr < start + pcpu_unit_size) { in_first_chunk = true; break; } } } if (in_first_chunk) { if (!is_vmalloc_addr(addr)) return __pa(addr); else return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr); } else return page_to_phys(pcpu_addr_to_page(addr)) + offset_in_page(addr); } struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units) { struct pcpu_alloc_info *ai; size_t base_size, ai_size; void *ptr; int unit; base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), __alignof__(ai->groups[0].cpu_map[0])); ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); if (!ptr) return NULL; ai = ptr; ptr += base_size; ai->groups[0].cpu_map = ptr; for (unit = 0; unit < nr_units; unit++) ai->groups[0].cpu_map[unit] = NR_CPUS; ai->nr_groups = nr_groups; ai->__ai_size = PFN_ALIGN(ai_size); return ai; } void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) { free_bootmem(__pa(ai), ai->__ai_size); } static void pcpu_dump_alloc_info(const char *lvl, const struct pcpu_alloc_info *ai) { int group_width = 1, cpu_width = 1, width; char empty_str[] = "--------"; int alloc = 0, alloc_end = 0; int group, v; int upa, apl; v = ai->nr_groups; while (v /= 10) group_width++; v = num_possible_cpus(); while (v /= 10) cpu_width++; empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; upa = ai->alloc_size / ai->unit_size; width = upa * (cpu_width + 1) + group_width + 3; apl = rounddown_pow_of_two(max(60 / width, 1)); printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", lvl, ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); for (group = 0; group < ai->nr_groups; group++) { const struct pcpu_group_info *gi = &ai->groups[group]; int unit = 0, unit_end = 0; BUG_ON(gi->nr_units % upa); for (alloc_end += gi->nr_units / upa; alloc < alloc_end; alloc++) { if (!(alloc % apl)) { printk(KERN_CONT "\n"); printk("%spcpu-alloc: ", lvl); } printk(KERN_CONT "[%0*d] ", group_width, group); for (unit_end += upa; unit < unit_end; unit++) if (gi->cpu_map[unit] != NR_CPUS) printk(KERN_CONT "%0*d ", cpu_width, gi->cpu_map[unit]); else printk(KERN_CONT "%s ", empty_str); } } printk(KERN_CONT "\n"); } int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr) { static char cpus_buf[4096] __initdata; static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; size_t dyn_size = ai->dyn_size; size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; struct pcpu_chunk *schunk, *dchunk = NULL; unsigned long *group_offsets; size_t *group_sizes; unsigned long *unit_off; unsigned int cpu; int *unit_map; int group, unit, i; cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); #define PCPU_SETUP_BUG_ON(cond) do { \ if (unlikely(cond)) { \ pr_emerg("PERCPU: failed to initialize, %s", #cond); \ pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ pcpu_dump_alloc_info(KERN_EMERG, ai); \ BUG(); \ } \ } while (0) PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); #ifdef CONFIG_SMP PCPU_SETUP_BUG_ON(!ai->static_size); PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK); #endif PCPU_SETUP_BUG_ON(!base_addr); PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK); PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); for (cpu = 0; cpu < nr_cpu_ids; cpu++) unit_map[cpu] = UINT_MAX; pcpu_low_unit_cpu = NR_CPUS; pcpu_high_unit_cpu = NR_CPUS; for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { const struct pcpu_group_info *gi = &ai->groups[group]; group_offsets[group] = gi->base_offset; group_sizes[group] = gi->nr_units * ai->unit_size; for (i = 0; i < gi->nr_units; i++) { cpu = gi->cpu_map[i]; if (cpu == NR_CPUS) continue; PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); unit_map[cpu] = unit + i; unit_off[cpu] = gi->base_offset + i * ai->unit_size; if (pcpu_low_unit_cpu == NR_CPUS || unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) pcpu_low_unit_cpu = cpu; if (pcpu_high_unit_cpu == NR_CPUS || unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) pcpu_high_unit_cpu = cpu; } } pcpu_nr_units = unit; for_each_possible_cpu(cpu) PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); #undef PCPU_SETUP_BUG_ON pcpu_dump_alloc_info(KERN_DEBUG, ai); pcpu_nr_groups = ai->nr_groups; pcpu_group_offsets = group_offsets; pcpu_group_sizes = group_sizes; pcpu_unit_map = unit_map; pcpu_unit_offsets = unit_off; pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; pcpu_atom_size = ai->atom_size; pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); for (i = 0; i < pcpu_nr_slots; i++) INIT_LIST_HEAD(&pcpu_slot[i]); schunk = alloc_bootmem(pcpu_chunk_struct_size); INIT_LIST_HEAD(&schunk->list); schunk->base_addr = base_addr; schunk->map = smap; schunk->map_alloc = ARRAY_SIZE(smap); schunk->immutable = true; bitmap_fill(schunk->populated, pcpu_unit_pages); if (ai->reserved_size) { schunk->free_size = ai->reserved_size; pcpu_reserved_chunk = schunk; pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; } else { schunk->free_size = dyn_size; dyn_size = 0; } schunk->contig_hint = schunk->free_size; schunk->map[schunk->map_used++] = -ai->static_size; if (schunk->free_size) schunk->map[schunk->map_used++] = schunk->free_size; if (dyn_size) { dchunk = alloc_bootmem(pcpu_chunk_struct_size); INIT_LIST_HEAD(&dchunk->list); dchunk->base_addr = base_addr; dchunk->map = dmap; dchunk->map_alloc = ARRAY_SIZE(dmap); dchunk->immutable = true; bitmap_fill(dchunk->populated, pcpu_unit_pages); dchunk->contig_hint = dchunk->free_size = dyn_size; dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; dchunk->map[dchunk->map_used++] = dchunk->free_size; } pcpu_first_chunk = dchunk ?: schunk; pcpu_chunk_relocate(pcpu_first_chunk, -1); pcpu_base_addr = base_addr; return 0; } #ifdef CONFIG_SMP const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { [PCPU_FC_AUTO] = "auto", [PCPU_FC_EMBED] = "embed", [PCPU_FC_PAGE] = "page", }; enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; static int __init percpu_alloc_setup(char *str) { if (0) ; #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK else if (!strcmp(str, "embed")) pcpu_chosen_fc = PCPU_FC_EMBED; #endif #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK else if (!strcmp(str, "page")) pcpu_chosen_fc = PCPU_FC_PAGE; #endif else pr_warning("PERCPU: unknown allocator %s specified\n", str); return 0; } early_param("percpu_alloc", percpu_alloc_setup); #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) #define BUILD_EMBED_FIRST_CHUNK #endif #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #define BUILD_PAGE_FIRST_CHUNK #endif #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) static struct pcpu_alloc_info * __init pcpu_build_alloc_info( size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn) { static int group_map[NR_CPUS] __initdata; static int group_cnt[NR_CPUS] __initdata; const size_t static_size = __per_cpu_end - __per_cpu_start; int nr_groups = 1, nr_units = 0; size_t size_sum, min_unit_size, alloc_size; int upa, max_upa, uninitialized_var(best_upa); int last_allocs, group, unit; unsigned int cpu, tcpu; struct pcpu_alloc_info *ai; unsigned int *cpu_map; memset(group_map, 0, sizeof(group_map)); memset(group_cnt, 0, sizeof(group_cnt)); size_sum = PFN_ALIGN(static_size + reserved_size + max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); dyn_size = size_sum - static_size - reserved_size; min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); alloc_size = roundup(min_unit_size, atom_size); upa = alloc_size / min_unit_size; while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) upa--; max_upa = upa; for_each_possible_cpu(cpu) { group = 0; next_group: for_each_possible_cpu(tcpu) { if (cpu == tcpu) break; if (group_map[tcpu] == group && cpu_distance_fn && (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { group++; nr_groups = max(nr_groups, group + 1); goto next_group; } } group_map[cpu] = group; group_cnt[group]++; } last_allocs = INT_MAX; for (upa = max_upa; upa; upa--) { int allocs = 0, wasted = 0; if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) continue; for (group = 0; group < nr_groups; group++) { int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); allocs += this_allocs; wasted += this_allocs * upa - group_cnt[group]; } if (wasted > num_possible_cpus() / 3) continue; if (allocs > last_allocs) break; last_allocs = allocs; best_upa = upa; } upa = best_upa; for (group = 0; group < nr_groups; group++) nr_units += roundup(group_cnt[group], upa); ai = pcpu_alloc_alloc_info(nr_groups, nr_units); if (!ai) return ERR_PTR(-ENOMEM); cpu_map = ai->groups[0].cpu_map; for (group = 0; group < nr_groups; group++) { ai->groups[group].cpu_map = cpu_map; cpu_map += roundup(group_cnt[group], upa); } ai->static_size = static_size; ai->reserved_size = reserved_size; ai->dyn_size = dyn_size; ai->unit_size = alloc_size / upa; ai->atom_size = atom_size; ai->alloc_size = alloc_size; for (group = 0, unit = 0; group_cnt[group]; group++) { struct pcpu_group_info *gi = &ai->groups[group]; gi->base_offset = unit * ai->unit_size; for_each_possible_cpu(cpu) if (group_map[cpu] == group) gi->cpu_map[gi->nr_units++] = cpu; gi->nr_units = roundup(gi->nr_units, upa); unit += gi->nr_units; } BUG_ON(unit != nr_units); return ai; } #endif #if defined(BUILD_EMBED_FIRST_CHUNK) int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn, pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_free_fn_t free_fn) { void *base = (void *)ULONG_MAX; void **areas = NULL; struct pcpu_alloc_info *ai; size_t size_sum, areas_size, max_distance; int group, i, rc; ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, cpu_distance_fn); if (IS_ERR(ai)) return PTR_ERR(ai); size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); areas = alloc_bootmem_nopanic(areas_size); if (!areas) { rc = -ENOMEM; goto out_free; } for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; unsigned int cpu = NR_CPUS; void *ptr; for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) cpu = gi->cpu_map[i]; BUG_ON(cpu == NR_CPUS); ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); if (!ptr) { rc = -ENOMEM; goto out_free_areas; } kmemleak_free(ptr); areas[group] = ptr; base = min(ptr, base); } for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; void *ptr = areas[group]; for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { if (gi->cpu_map[i] == NR_CPUS) { free_fn(ptr, ai->unit_size); continue; } memcpy(ptr, __per_cpu_load, ai->static_size); free_fn(ptr + size_sum, ai->unit_size - size_sum); } } max_distance = 0; for (group = 0; group < ai->nr_groups; group++) { ai->groups[group].base_offset = areas[group] - base; max_distance = max_t(size_t, max_distance, ai->groups[group].base_offset); } max_distance += ai->unit_size; if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " "space 0x%lx\n", max_distance, (unsigned long)(VMALLOC_END - VMALLOC_START)); #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK rc = -EINVAL; goto out_free; #endif } pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size); rc = pcpu_setup_first_chunk(ai, base); goto out_free; out_free_areas: for (group = 0; group < ai->nr_groups; group++) free_fn(areas[group], ai->groups[group].nr_units * ai->unit_size); out_free: pcpu_free_alloc_info(ai); if (areas) free_bootmem(__pa(areas), areas_size); return rc; } #endif #ifdef BUILD_PAGE_FIRST_CHUNK int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_free_fn_t free_fn, pcpu_fc_populate_pte_fn_t populate_pte_fn) { static struct vm_struct vm; struct pcpu_alloc_info *ai; char psize_str[16]; int unit_pages; size_t pages_size; struct page **pages; int unit, i, j, rc; snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); if (IS_ERR(ai)) return PTR_ERR(ai); BUG_ON(ai->nr_groups != 1); BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); unit_pages = ai->unit_size >> PAGE_SHIFT; pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * sizeof(pages[0])); pages = alloc_bootmem(pages_size); j = 0; for (unit = 0; unit < num_possible_cpus(); unit++) for (i = 0; i < unit_pages; i++) { unsigned int cpu = ai->groups[0].cpu_map[unit]; void *ptr; ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); if (!ptr) { pr_warning("PERCPU: failed to allocate %s page " "for cpu%u\n", psize_str, cpu); goto enomem; } kmemleak_free(ptr); pages[j++] = virt_to_page(ptr); } vm.flags = VM_ALLOC; vm.size = num_possible_cpus() * ai->unit_size; vm_area_register_early(&vm, PAGE_SIZE); for (unit = 0; unit < num_possible_cpus(); unit++) { unsigned long unit_addr = (unsigned long)vm.addr + unit * ai->unit_size; for (i = 0; i < unit_pages; i++) populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], unit_pages); if (rc < 0) panic("failed to map percpu area, err=%d\n", rc); memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); } pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", unit_pages, psize_str, vm.addr, ai->static_size, ai->reserved_size, ai->dyn_size); rc = pcpu_setup_first_chunk(ai, vm.addr); goto out_free_ar; enomem: while (--j >= 0) free_fn(page_address(pages[j]), PAGE_SIZE); rc = -ENOMEM; out_free_ar: free_bootmem(__pa(pages), pages_size); pcpu_free_alloc_info(ai); return rc; } #endif #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, size_t align) { return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); } static void __init pcpu_dfl_fc_free(void *ptr, size_t size) { free_bootmem(__pa(ptr), size); } void __init setup_per_cpu_areas(void) { unsigned long delta; unsigned int cpu; int rc; rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); if (rc < 0) panic("Failed to initialize percpu areas."); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; } #endif #else void __init setup_per_cpu_areas(void) { const size_t unit_size = roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, PERCPU_DYNAMIC_RESERVE)); struct pcpu_alloc_info *ai; void *fc; ai = pcpu_alloc_alloc_info(1, 1); fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!ai || !fc) panic("Failed to allocate memory for percpu areas."); kmemleak_free(fc); ai->dyn_size = unit_size; ai->unit_size = unit_size; ai->atom_size = unit_size; ai->alloc_size = unit_size; ai->groups[0].nr_units = 1; ai->groups[0].cpu_map[0] = 0; if (pcpu_setup_first_chunk(ai, fc) < 0) panic("Failed to initialize percpu areas."); } #endif void __init percpu_init_late(void) { struct pcpu_chunk *target_chunks[] = { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; struct pcpu_chunk *chunk; unsigned long flags; int i; for (i = 0; (chunk = target_chunks[i]); i++) { int *map; const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); BUILD_BUG_ON(size > PAGE_SIZE); map = pcpu_mem_zalloc(size); BUG_ON(!map); spin_lock_irqsave(&pcpu_lock, flags); memcpy(map, chunk->map, size); chunk->map = map; spin_unlock_irqrestore(&pcpu_lock, flags); } }
gpl-2.0
kernelzilla/android-kernel
drivers/mmc/host/tifm_sd.c
887
29471
/* * tifm_sd.c - TI FlashMedia driver * * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Special thanks to Brad Campbell for extensive testing of this driver. * */ #include <linux/tifm.h> #include <linux/mmc/host.h> #include <linux/highmem.h> #include <linux/scatterlist.h> #include <asm/io.h> #define DRIVER_NAME "tifm_sd" #define DRIVER_VERSION "0.8" static int no_dma = 0; static int fixed_timeout = 0; module_param(no_dma, bool, 0644); module_param(fixed_timeout, bool, 0644); /* Constants here are mostly from OMAP5912 datasheet */ #define TIFM_MMCSD_RESET 0x0002 #define TIFM_MMCSD_CLKMASK 0x03ff #define TIFM_MMCSD_POWER 0x0800 #define TIFM_MMCSD_4BBUS 0x8000 #define TIFM_MMCSD_RXDE 0x8000 /* rx dma enable */ #define TIFM_MMCSD_TXDE 0x0080 /* tx dma enable */ #define TIFM_MMCSD_BUFINT 0x0c00 /* set bits: AE, AF */ #define TIFM_MMCSD_DPE 0x0020 /* data timeout counted in kilocycles */ #define TIFM_MMCSD_INAB 0x0080 /* abort / initialize command */ #define TIFM_MMCSD_READ 0x8000 #define TIFM_MMCSD_ERRMASK 0x01e0 /* set bits: CCRC, CTO, DCRC, DTO */ #define TIFM_MMCSD_EOC 0x0001 /* end of command phase */ #define TIFM_MMCSD_CD 0x0002 /* card detect */ #define TIFM_MMCSD_CB 0x0004 /* card enter busy state */ #define TIFM_MMCSD_BRS 0x0008 /* block received/sent */ #define TIFM_MMCSD_EOFB 0x0010 /* card exit busy state */ #define TIFM_MMCSD_DTO 0x0020 /* data time-out */ #define TIFM_MMCSD_DCRC 0x0040 /* data crc error */ #define TIFM_MMCSD_CTO 0x0080 /* command time-out */ #define TIFM_MMCSD_CCRC 0x0100 /* command crc error */ #define TIFM_MMCSD_AF 0x0400 /* fifo almost full */ #define TIFM_MMCSD_AE 0x0800 /* fifo almost empty */ #define TIFM_MMCSD_OCRB 0x1000 /* OCR busy */ #define TIFM_MMCSD_CIRQ 0x2000 /* card irq (cmd40/sdio) */ #define TIFM_MMCSD_CERR 0x4000 /* card status error */ #define TIFM_MMCSD_ODTO 0x0040 /* open drain / extended timeout */ #define TIFM_MMCSD_CARD_RO 0x0200 /* card is read-only */ #define TIFM_MMCSD_FIFO_SIZE 0x0020 #define TIFM_MMCSD_RSP_R0 0x0000 #define TIFM_MMCSD_RSP_R1 0x0100 #define TIFM_MMCSD_RSP_R2 0x0200 #define TIFM_MMCSD_RSP_R3 0x0300 #define TIFM_MMCSD_RSP_R4 0x0400 #define TIFM_MMCSD_RSP_R5 0x0500 #define TIFM_MMCSD_RSP_R6 0x0600 #define TIFM_MMCSD_RSP_BUSY 0x0800 #define TIFM_MMCSD_CMD_BC 0x0000 #define TIFM_MMCSD_CMD_BCR 0x1000 #define TIFM_MMCSD_CMD_AC 0x2000 #define TIFM_MMCSD_CMD_ADTC 0x3000 #define TIFM_MMCSD_MAX_BLOCK_SIZE 0x0800UL enum { CMD_READY = 0x0001, FIFO_READY = 0x0002, BRS_READY = 0x0004, SCMD_ACTIVE = 0x0008, SCMD_READY = 0x0010, CARD_BUSY = 0x0020, DATA_CARRY = 0x0040 }; struct tifm_sd { struct tifm_dev *dev; unsigned short eject:1, open_drain:1, no_dma:1; unsigned short cmd_flags; unsigned int clk_freq; unsigned int clk_div; unsigned long timeout_jiffies; struct tasklet_struct finish_tasklet; struct timer_list timer; struct mmc_request *req; int sg_len; int sg_pos; unsigned int block_pos; struct scatterlist bounce_buf; unsigned char bounce_buf_data[TIFM_MMCSD_MAX_BLOCK_SIZE]; }; /* for some reason, host won't respond correctly to readw/writew */ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg, unsigned int off, unsigned int cnt) { struct tifm_dev *sock = host->dev; unsigned char *buf; unsigned int pos = 0, val; buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + off; if (host->cmd_flags & DATA_CARRY) { buf[pos++] = host->bounce_buf_data[0]; host->cmd_flags &= ~DATA_CARRY; } while (pos < cnt) { val = readl(sock->addr + SOCK_MMCSD_DATA); buf[pos++] = val & 0xff; if (pos == cnt) { host->bounce_buf_data[0] = (val >> 8) & 0xff; host->cmd_flags |= DATA_CARRY; break; } buf[pos++] = (val >> 8) & 0xff; } kunmap_atomic(buf - off, KM_BIO_DST_IRQ); } static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, unsigned int off, unsigned int cnt) { struct tifm_dev *sock = host->dev; unsigned char *buf; unsigned int pos = 0, val; buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + off; if (host->cmd_flags & DATA_CARRY) { val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00); writel(val, sock->addr + SOCK_MMCSD_DATA); host->cmd_flags &= ~DATA_CARRY; } while (pos < cnt) { val = buf[pos++]; if (pos == cnt) { host->bounce_buf_data[0] = val & 0xff; host->cmd_flags |= DATA_CARRY; break; } val |= (buf[pos++] << 8) & 0xff00; writel(val, sock->addr + SOCK_MMCSD_DATA); } kunmap_atomic(buf - off, KM_BIO_SRC_IRQ); } static void tifm_sd_transfer_data(struct tifm_sd *host) { struct mmc_data *r_data = host->req->cmd->data; struct scatterlist *sg = r_data->sg; unsigned int off, cnt, t_size = TIFM_MMCSD_FIFO_SIZE * 2; unsigned int p_off, p_cnt; struct page *pg; if (host->sg_pos == host->sg_len) return; while (t_size) { cnt = sg[host->sg_pos].length - host->block_pos; if (!cnt) { host->block_pos = 0; host->sg_pos++; if (host->sg_pos == host->sg_len) { if ((r_data->flags & MMC_DATA_WRITE) && (host->cmd_flags & DATA_CARRY)) writel(host->bounce_buf_data[0], host->dev->addr + SOCK_MMCSD_DATA); return; } cnt = sg[host->sg_pos].length; } off = sg[host->sg_pos].offset + host->block_pos; pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); p_off = offset_in_page(off); p_cnt = PAGE_SIZE - p_off; p_cnt = min(p_cnt, cnt); p_cnt = min(p_cnt, t_size); if (r_data->flags & MMC_DATA_READ) tifm_sd_read_fifo(host, pg, p_off, p_cnt); else if (r_data->flags & MMC_DATA_WRITE) tifm_sd_write_fifo(host, pg, p_off, p_cnt); t_size -= p_cnt; host->block_pos += p_cnt; } } static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off, struct page *src, unsigned int src_off, unsigned int count) { unsigned char *src_buf = kmap_atomic(src, KM_BIO_SRC_IRQ) + src_off; unsigned char *dst_buf = kmap_atomic(dst, KM_BIO_DST_IRQ) + dst_off; memcpy(dst_buf, src_buf, count); kunmap_atomic(dst_buf - dst_off, KM_BIO_DST_IRQ); kunmap_atomic(src_buf - src_off, KM_BIO_SRC_IRQ); } static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data) { struct scatterlist *sg = r_data->sg; unsigned int t_size = r_data->blksz; unsigned int off, cnt; unsigned int p_off, p_cnt; struct page *pg; dev_dbg(&host->dev->dev, "bouncing block\n"); while (t_size) { cnt = sg[host->sg_pos].length - host->block_pos; if (!cnt) { host->block_pos = 0; host->sg_pos++; if (host->sg_pos == host->sg_len) return; cnt = sg[host->sg_pos].length; } off = sg[host->sg_pos].offset + host->block_pos; pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); p_off = offset_in_page(off); p_cnt = PAGE_SIZE - p_off; p_cnt = min(p_cnt, cnt); p_cnt = min(p_cnt, t_size); if (r_data->flags & MMC_DATA_WRITE) tifm_sd_copy_page(sg_page(&host->bounce_buf), r_data->blksz - t_size, pg, p_off, p_cnt); else if (r_data->flags & MMC_DATA_READ) tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf), r_data->blksz - t_size, p_cnt); t_size -= p_cnt; host->block_pos += p_cnt; } } static int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data) { struct tifm_dev *sock = host->dev; unsigned int t_size = TIFM_DMA_TSIZE * r_data->blksz; unsigned int dma_len, dma_blk_cnt, dma_off; struct scatterlist *sg = NULL; unsigned long flags; if (host->sg_pos == host->sg_len) return 1; if (host->cmd_flags & DATA_CARRY) { host->cmd_flags &= ~DATA_CARRY; local_irq_save(flags); tifm_sd_bounce_block(host, r_data); local_irq_restore(flags); if (host->sg_pos == host->sg_len) return 1; } dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos; if (!dma_len) { host->block_pos = 0; host->sg_pos++; if (host->sg_pos == host->sg_len) return 1; dma_len = sg_dma_len(&r_data->sg[host->sg_pos]); } if (dma_len < t_size) { dma_blk_cnt = dma_len / r_data->blksz; dma_off = host->block_pos; host->block_pos += dma_blk_cnt * r_data->blksz; } else { dma_blk_cnt = TIFM_DMA_TSIZE; dma_off = host->block_pos; host->block_pos += t_size; } if (dma_blk_cnt) sg = &r_data->sg[host->sg_pos]; else if (dma_len) { if (r_data->flags & MMC_DATA_WRITE) { local_irq_save(flags); tifm_sd_bounce_block(host, r_data); local_irq_restore(flags); } else host->cmd_flags |= DATA_CARRY; sg = &host->bounce_buf; dma_off = 0; dma_blk_cnt = 1; } else return 1; dev_dbg(&sock->dev, "setting dma for %d blocks\n", dma_blk_cnt); writel(sg_dma_address(sg) + dma_off, sock->addr + SOCK_DMA_ADDRESS); if (r_data->flags & MMC_DATA_WRITE) writel((dma_blk_cnt << 8) | TIFM_DMA_TX | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); else writel((dma_blk_cnt << 8) | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); return 0; } static unsigned int tifm_sd_op_flags(struct mmc_command *cmd) { unsigned int rc = 0; switch (mmc_resp_type(cmd)) { case MMC_RSP_NONE: rc |= TIFM_MMCSD_RSP_R0; break; case MMC_RSP_R1B: rc |= TIFM_MMCSD_RSP_BUSY; // deliberate fall-through case MMC_RSP_R1: rc |= TIFM_MMCSD_RSP_R1; break; case MMC_RSP_R2: rc |= TIFM_MMCSD_RSP_R2; break; case MMC_RSP_R3: rc |= TIFM_MMCSD_RSP_R3; break; default: BUG(); } switch (mmc_cmd_type(cmd)) { case MMC_CMD_BC: rc |= TIFM_MMCSD_CMD_BC; break; case MMC_CMD_BCR: rc |= TIFM_MMCSD_CMD_BCR; break; case MMC_CMD_AC: rc |= TIFM_MMCSD_CMD_AC; break; case MMC_CMD_ADTC: rc |= TIFM_MMCSD_CMD_ADTC; break; default: BUG(); } return rc; } static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd) { struct tifm_dev *sock = host->dev; unsigned int cmd_mask = tifm_sd_op_flags(cmd); if (host->open_drain) cmd_mask |= TIFM_MMCSD_ODTO; if (cmd->data && (cmd->data->flags & MMC_DATA_READ)) cmd_mask |= TIFM_MMCSD_READ; dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", cmd->opcode, cmd->arg, cmd_mask); writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); writel(cmd->opcode | cmd_mask, sock->addr + SOCK_MMCSD_COMMAND); } static void tifm_sd_fetch_resp(struct mmc_command *cmd, struct tifm_dev *sock) { cmd->resp[0] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x1c) << 16) | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x18); cmd->resp[1] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x14) << 16) | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x10); cmd->resp[2] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x0c) << 16) | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x08); cmd->resp[3] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x04) << 16) | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x00); } static void tifm_sd_check_status(struct tifm_sd *host) { struct tifm_dev *sock = host->dev; struct mmc_command *cmd = host->req->cmd; if (cmd->error) goto finish_request; if (!(host->cmd_flags & CMD_READY)) return; if (cmd->data) { if (cmd->data->error) { if ((host->cmd_flags & SCMD_ACTIVE) && !(host->cmd_flags & SCMD_READY)) return; goto finish_request; } if (!(host->cmd_flags & BRS_READY)) return; if (!(host->no_dma || (host->cmd_flags & FIFO_READY))) return; if (cmd->data->flags & MMC_DATA_WRITE) { if (host->req->stop) { if (!(host->cmd_flags & SCMD_ACTIVE)) { host->cmd_flags |= SCMD_ACTIVE; writel(TIFM_MMCSD_EOFB | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), sock->addr + SOCK_MMCSD_INT_ENABLE); tifm_sd_exec(host, host->req->stop); return; } else { if (!(host->cmd_flags & SCMD_READY) || (host->cmd_flags & CARD_BUSY)) return; writel((~TIFM_MMCSD_EOFB) & readl(sock->addr + SOCK_MMCSD_INT_ENABLE), sock->addr + SOCK_MMCSD_INT_ENABLE); } } else { if (host->cmd_flags & CARD_BUSY) return; writel((~TIFM_MMCSD_EOFB) & readl(sock->addr + SOCK_MMCSD_INT_ENABLE), sock->addr + SOCK_MMCSD_INT_ENABLE); } } else { if (host->req->stop) { if (!(host->cmd_flags & SCMD_ACTIVE)) { host->cmd_flags |= SCMD_ACTIVE; tifm_sd_exec(host, host->req->stop); return; } else { if (!(host->cmd_flags & SCMD_READY)) return; } } } } finish_request: tasklet_schedule(&host->finish_tasklet); } /* Called from interrupt handler */ static void tifm_sd_data_event(struct tifm_dev *sock) { struct tifm_sd *host; unsigned int fifo_status = 0; struct mmc_data *r_data = NULL; spin_lock(&sock->lock); host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n", fifo_status, host->cmd_flags); if (host->req) { r_data = host->req->cmd->data; if (r_data && (fifo_status & TIFM_FIFO_READY)) { if (tifm_sd_set_dma_data(host, r_data)) { host->cmd_flags |= FIFO_READY; tifm_sd_check_status(host); } } } writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS); spin_unlock(&sock->lock); } /* Called from interrupt handler */ static void tifm_sd_card_event(struct tifm_dev *sock) { struct tifm_sd *host; unsigned int host_status = 0; int cmd_error = 0; struct mmc_command *cmd = NULL; unsigned long flags; spin_lock(&sock->lock); host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); host_status = readl(sock->addr + SOCK_MMCSD_STATUS); dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n", host_status, host->cmd_flags); if (host->req) { cmd = host->req->cmd; if (host_status & TIFM_MMCSD_ERRMASK) { writel(host_status & TIFM_MMCSD_ERRMASK, sock->addr + SOCK_MMCSD_STATUS); if (host_status & TIFM_MMCSD_CTO) cmd_error = -ETIMEDOUT; else if (host_status & TIFM_MMCSD_CCRC) cmd_error = -EILSEQ; if (cmd->data) { if (host_status & TIFM_MMCSD_DTO) cmd->data->error = -ETIMEDOUT; else if (host_status & TIFM_MMCSD_DCRC) cmd->data->error = -EILSEQ; } writel(TIFM_FIFO_INT_SETALL, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); if (host->req->stop) { if (host->cmd_flags & SCMD_ACTIVE) { host->req->stop->error = cmd_error; host->cmd_flags |= SCMD_READY; } else { cmd->error = cmd_error; host->cmd_flags |= SCMD_ACTIVE; tifm_sd_exec(host, host->req->stop); goto done; } } else cmd->error = cmd_error; } else { if (host_status & (TIFM_MMCSD_EOC | TIFM_MMCSD_CERR)) { if (!(host->cmd_flags & CMD_READY)) { host->cmd_flags |= CMD_READY; tifm_sd_fetch_resp(cmd, sock); } else if (host->cmd_flags & SCMD_ACTIVE) { host->cmd_flags |= SCMD_READY; tifm_sd_fetch_resp(host->req->stop, sock); } } if (host_status & TIFM_MMCSD_BRS) host->cmd_flags |= BRS_READY; } if (host->no_dma && cmd->data) { if (host_status & TIFM_MMCSD_AE) writel(host_status & TIFM_MMCSD_AE, sock->addr + SOCK_MMCSD_STATUS); if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF | TIFM_MMCSD_BRS)) { local_irq_save(flags); tifm_sd_transfer_data(host); local_irq_restore(flags); host_status &= ~TIFM_MMCSD_AE; } } if (host_status & TIFM_MMCSD_EOFB) host->cmd_flags &= ~CARD_BUSY; else if (host_status & TIFM_MMCSD_CB) host->cmd_flags |= CARD_BUSY; tifm_sd_check_status(host); } done: writel(host_status, sock->addr + SOCK_MMCSD_STATUS); spin_unlock(&sock->lock); } static void tifm_sd_set_data_timeout(struct tifm_sd *host, struct mmc_data *data) { struct tifm_dev *sock = host->dev; unsigned int data_timeout = data->timeout_clks; if (fixed_timeout) return; data_timeout += data->timeout_ns / ((1000000000UL / host->clk_freq) * host->clk_div); if (data_timeout < 0xffff) { writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); writel((~TIFM_MMCSD_DPE) & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); } else { data_timeout = (data_timeout >> 10) + 1; if (data_timeout > 0xffff) data_timeout = 0; /* set to unlimited */ writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); writel(TIFM_MMCSD_DPE | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); } } static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct tifm_sd *host = mmc_priv(mmc); struct tifm_dev *sock = host->dev; unsigned long flags; struct mmc_data *r_data = mrq->cmd->data; spin_lock_irqsave(&sock->lock, flags); if (host->eject) { mrq->cmd->error = -ENOMEDIUM; goto err_out; } if (host->req) { printk(KERN_ERR "%s : unfinished request detected\n", dev_name(&sock->dev)); mrq->cmd->error = -ETIMEDOUT; goto err_out; } host->cmd_flags = 0; host->block_pos = 0; host->sg_pos = 0; if (mrq->data && !is_power_of_2(mrq->data->blksz)) host->no_dma = 1; else host->no_dma = no_dma ? 1 : 0; if (r_data) { tifm_sd_set_data_timeout(host, r_data); if ((r_data->flags & MMC_DATA_WRITE) && !mrq->stop) writel(TIFM_MMCSD_EOFB | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), sock->addr + SOCK_MMCSD_INT_ENABLE); if (host->no_dma) { writel(TIFM_MMCSD_BUFINT | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), sock->addr + SOCK_MMCSD_INT_ENABLE); writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) | (TIFM_MMCSD_FIFO_SIZE - 1), sock->addr + SOCK_MMCSD_BUFFER_CONFIG); host->sg_len = r_data->sg_len; } else { sg_init_one(&host->bounce_buf, host->bounce_buf_data, r_data->blksz); if(1 != tifm_map_sg(sock, &host->bounce_buf, 1, r_data->flags & MMC_DATA_WRITE ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE)) { printk(KERN_ERR "%s : scatterlist map failed\n", dev_name(&sock->dev)); mrq->cmd->error = -ENOMEM; goto err_out; } host->sg_len = tifm_map_sg(sock, r_data->sg, r_data->sg_len, r_data->flags & MMC_DATA_WRITE ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); if (host->sg_len < 1) { printk(KERN_ERR "%s : scatterlist map failed\n", dev_name(&sock->dev)); tifm_unmap_sg(sock, &host->bounce_buf, 1, r_data->flags & MMC_DATA_WRITE ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); mrq->cmd->error = -ENOMEM; goto err_out; } writel(TIFM_FIFO_INT_SETALL, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); writel(ilog2(r_data->blksz) - 2, sock->addr + SOCK_FIFO_PAGE_SIZE); writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); if (r_data->flags & MMC_DATA_WRITE) writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); else writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); tifm_sd_set_dma_data(host, r_data); } writel(r_data->blocks - 1, sock->addr + SOCK_MMCSD_NUM_BLOCKS); writel(r_data->blksz - 1, sock->addr + SOCK_MMCSD_BLOCK_LEN); } host->req = mrq; mod_timer(&host->timer, jiffies + host->timeout_jiffies); writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), sock->addr + SOCK_CONTROL); tifm_sd_exec(host, mrq->cmd); spin_unlock_irqrestore(&sock->lock, flags); return; err_out: spin_unlock_irqrestore(&sock->lock, flags); mmc_request_done(mmc, mrq); } static void tifm_sd_end_cmd(unsigned long data) { struct tifm_sd *host = (struct tifm_sd*)data; struct tifm_dev *sock = host->dev; struct mmc_host *mmc = tifm_get_drvdata(sock); struct mmc_request *mrq; struct mmc_data *r_data = NULL; unsigned long flags; spin_lock_irqsave(&sock->lock, flags); del_timer(&host->timer); mrq = host->req; host->req = NULL; if (!mrq) { printk(KERN_ERR " %s : no request to complete?\n", dev_name(&sock->dev)); spin_unlock_irqrestore(&sock->lock, flags); return; } r_data = mrq->cmd->data; if (r_data) { if (host->no_dma) { writel((~TIFM_MMCSD_BUFINT) & readl(sock->addr + SOCK_MMCSD_INT_ENABLE), sock->addr + SOCK_MMCSD_INT_ENABLE); } else { tifm_unmap_sg(sock, &host->bounce_buf, 1, (r_data->flags & MMC_DATA_WRITE) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); tifm_unmap_sg(sock, r_data->sg, r_data->sg_len, (r_data->flags & MMC_DATA_WRITE) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); } r_data->bytes_xfered = r_data->blocks - readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; r_data->bytes_xfered *= r_data->blksz; r_data->bytes_xfered += r_data->blksz - readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; } writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), sock->addr + SOCK_CONTROL); spin_unlock_irqrestore(&sock->lock, flags); mmc_request_done(mmc, mrq); } static void tifm_sd_abort(unsigned long data) { struct tifm_sd *host = (struct tifm_sd*)data; printk(KERN_ERR "%s : card failed to respond for a long period of time " "(%x, %x)\n", dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags); tifm_eject(host->dev); } static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct tifm_sd *host = mmc_priv(mmc); struct tifm_dev *sock = host->dev; unsigned int clk_div1, clk_div2; unsigned long flags; spin_lock_irqsave(&sock->lock, flags); dev_dbg(&sock->dev, "ios: clock = %u, vdd = %x, bus_mode = %x, " "chip_select = %x, power_mode = %x, bus_width = %x\n", ios->clock, ios->vdd, ios->bus_mode, ios->chip_select, ios->power_mode, ios->bus_width); if (ios->bus_width == MMC_BUS_WIDTH_4) { writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), sock->addr + SOCK_MMCSD_CONFIG); } else { writel((~TIFM_MMCSD_4BBUS) & readl(sock->addr + SOCK_MMCSD_CONFIG), sock->addr + SOCK_MMCSD_CONFIG); } if (ios->clock) { clk_div1 = 20000000 / ios->clock; if (!clk_div1) clk_div1 = 1; clk_div2 = 24000000 / ios->clock; if (!clk_div2) clk_div2 = 1; if ((20000000 / clk_div1) > ios->clock) clk_div1++; if ((24000000 / clk_div2) > ios->clock) clk_div2++; if ((20000000 / clk_div1) > (24000000 / clk_div2)) { host->clk_freq = 20000000; host->clk_div = clk_div1; writel((~TIFM_CTRL_FAST_CLK) & readl(sock->addr + SOCK_CONTROL), sock->addr + SOCK_CONTROL); } else { host->clk_freq = 24000000; host->clk_div = clk_div2; writel(TIFM_CTRL_FAST_CLK | readl(sock->addr + SOCK_CONTROL), sock->addr + SOCK_CONTROL); } } else { host->clk_div = 0; } host->clk_div &= TIFM_MMCSD_CLKMASK; writel(host->clk_div | ((~TIFM_MMCSD_CLKMASK) & readl(sock->addr + SOCK_MMCSD_CONFIG)), sock->addr + SOCK_MMCSD_CONFIG); host->open_drain = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN); /* chip_select : maybe later */ //vdd //power is set before probe / after remove spin_unlock_irqrestore(&sock->lock, flags); } static int tifm_sd_ro(struct mmc_host *mmc) { int rc = 0; struct tifm_sd *host = mmc_priv(mmc); struct tifm_dev *sock = host->dev; unsigned long flags; spin_lock_irqsave(&sock->lock, flags); if (TIFM_MMCSD_CARD_RO & readl(sock->addr + SOCK_PRESENT_STATE)) rc = 1; spin_unlock_irqrestore(&sock->lock, flags); return rc; } static const struct mmc_host_ops tifm_sd_ops = { .request = tifm_sd_request, .set_ios = tifm_sd_ios, .get_ro = tifm_sd_ro }; static int tifm_sd_initialize_host(struct tifm_sd *host) { int rc; unsigned int host_status = 0; struct tifm_dev *sock = host->dev; writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); mmiowb(); host->clk_div = 61; host->clk_freq = 20000000; writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); writel(host->clk_div | TIFM_MMCSD_POWER, sock->addr + SOCK_MMCSD_CONFIG); /* wait up to 0.51 sec for reset */ for (rc = 32; rc <= 256; rc <<= 1) { if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { rc = 0; break; } msleep(rc); } if (rc) { printk(KERN_ERR "%s : controller failed to reset\n", dev_name(&sock->dev)); return -ENODEV; } writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); writel(host->clk_div | TIFM_MMCSD_POWER, sock->addr + SOCK_MMCSD_CONFIG); writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); // command timeout fixed to 64 clocks for now writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); for (rc = 16; rc <= 64; rc <<= 1) { host_status = readl(sock->addr + SOCK_MMCSD_STATUS); writel(host_status, sock->addr + SOCK_MMCSD_STATUS); if (!(host_status & TIFM_MMCSD_ERRMASK) && (host_status & TIFM_MMCSD_EOC)) { rc = 0; break; } msleep(rc); } if (rc) { printk(KERN_ERR "%s : card not ready - probe failed on initialization\n", dev_name(&sock->dev)); return -ENODEV; } writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC | TIFM_MMCSD_ERRMASK, sock->addr + SOCK_MMCSD_INT_ENABLE); mmiowb(); return 0; } static int tifm_sd_probe(struct tifm_dev *sock) { struct mmc_host *mmc; struct tifm_sd *host; int rc = -EIO; if (!(TIFM_SOCK_STATE_OCCUPIED & readl(sock->addr + SOCK_PRESENT_STATE))) { printk(KERN_WARNING "%s : card gone, unexpectedly\n", dev_name(&sock->dev)); return rc; } mmc = mmc_alloc_host(sizeof(struct tifm_sd), &sock->dev); if (!mmc) return -ENOMEM; host = mmc_priv(mmc); tifm_set_drvdata(sock, mmc); host->dev = sock; host->timeout_jiffies = msecs_to_jiffies(1000); tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd, (unsigned long)host); setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host); mmc->ops = &tifm_sd_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA; mmc->f_min = 20000000 / 60; mmc->f_max = 24000000; mmc->max_blk_count = 2048; mmc->max_hw_segs = mmc->max_blk_count; mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE); mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size; mmc->max_req_size = mmc->max_seg_size; mmc->max_phys_segs = mmc->max_hw_segs; sock->card_event = tifm_sd_card_event; sock->data_event = tifm_sd_data_event; rc = tifm_sd_initialize_host(host); if (!rc) rc = mmc_add_host(mmc); if (!rc) return 0; mmc_free_host(mmc); return rc; } static void tifm_sd_remove(struct tifm_dev *sock) { struct mmc_host *mmc = tifm_get_drvdata(sock); struct tifm_sd *host = mmc_priv(mmc); unsigned long flags; spin_lock_irqsave(&sock->lock, flags); host->eject = 1; writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); mmiowb(); spin_unlock_irqrestore(&sock->lock, flags); tasklet_kill(&host->finish_tasklet); spin_lock_irqsave(&sock->lock, flags); if (host->req) { writel(TIFM_FIFO_INT_SETALL, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); host->req->cmd->error = -ENOMEDIUM; if (host->req->stop) host->req->stop->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); } spin_unlock_irqrestore(&sock->lock, flags); mmc_remove_host(mmc); dev_dbg(&sock->dev, "after remove\n"); mmc_free_host(mmc); } #ifdef CONFIG_PM static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) { return mmc_suspend_host(tifm_get_drvdata(sock)); } static int tifm_sd_resume(struct tifm_dev *sock) { struct mmc_host *mmc = tifm_get_drvdata(sock); struct tifm_sd *host = mmc_priv(mmc); int rc; rc = tifm_sd_initialize_host(host); dev_dbg(&sock->dev, "resume initialize %d\n", rc); if (rc) host->eject = 1; else rc = mmc_resume_host(mmc); return rc; } #else #define tifm_sd_suspend NULL #define tifm_sd_resume NULL #endif /* CONFIG_PM */ static struct tifm_device_id tifm_sd_id_tbl[] = { { TIFM_TYPE_SD }, { } }; static struct tifm_driver tifm_sd_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE }, .id_table = tifm_sd_id_tbl, .probe = tifm_sd_probe, .remove = tifm_sd_remove, .suspend = tifm_sd_suspend, .resume = tifm_sd_resume }; static int __init tifm_sd_init(void) { return tifm_register_driver(&tifm_sd_driver); } static void __exit tifm_sd_exit(void) { tifm_unregister_driver(&tifm_sd_driver); } MODULE_AUTHOR("Alex Dubov"); MODULE_DESCRIPTION("TI FlashMedia SD driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(tifm, tifm_sd_id_tbl); MODULE_VERSION(DRIVER_VERSION); module_init(tifm_sd_init); module_exit(tifm_sd_exit);
gpl-2.0
NeverLEX/linux
net/netfilter/xt_connmark.c
1911
4464
/* * xt_connmark - Netfilter module to operate on connection marks * * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> * by Henrik Nordstrom <hno@marasystems.com> * Copyright © CC Computer Consultants GmbH, 2007 - 2008 * Jan Engelhardt <jengelh@medozas.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/skbuff.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_connmark.h> MODULE_AUTHOR("Henrik Nordstrom <hno@marasystems.com>"); MODULE_DESCRIPTION("Xtables: connection mark operations"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_CONNMARK"); MODULE_ALIAS("ip6t_CONNMARK"); MODULE_ALIAS("ipt_connmark"); MODULE_ALIAS("ip6t_connmark"); static unsigned int connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_connmark_tginfo1 *info = par->targinfo; enum ip_conntrack_info ctinfo; struct nf_conn *ct; u_int32_t newmark; ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) return XT_CONTINUE; switch (info->mode) { case XT_CONNMARK_SET: newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; if (ct->mark != newmark) { ct->mark = newmark; nf_conntrack_event_cache(IPCT_MARK, ct); } break; case XT_CONNMARK_SAVE: newmark = (ct->mark & ~info->ctmask) ^ (skb->mark & info->nfmask); if (ct->mark != newmark) { ct->mark = newmark; nf_conntrack_event_cache(IPCT_MARK, ct); } break; case XT_CONNMARK_RESTORE: newmark = (skb->mark & ~info->nfmask) ^ (ct->mark & info->ctmask); skb->mark = newmark; break; } return XT_CONTINUE; } static int connmark_tg_check(const struct xt_tgchk_param *par) { int ret; ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void connmark_tg_destroy(const struct xt_tgdtor_param *par) { nf_ct_l3proto_module_put(par->family); } static bool connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_connmark_mtinfo1 *info = par->matchinfo; enum ip_conntrack_info ctinfo; const struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) return false; return ((ct->mark & info->mask) == info->mark) ^ info->invert; } static int connmark_mt_check(const struct xt_mtchk_param *par) { int ret; ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void connmark_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_l3proto_module_put(par->family); } static struct xt_target connmark_tg_reg __read_mostly = { .name = "CONNMARK", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = connmark_tg_check, .target = connmark_tg, .targetsize = sizeof(struct xt_connmark_tginfo1), .destroy = connmark_tg_destroy, .me = THIS_MODULE, }; static struct xt_match connmark_mt_reg __read_mostly = { .name = "connmark", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = connmark_mt_check, .match = connmark_mt, .matchsize = sizeof(struct xt_connmark_mtinfo1), .destroy = connmark_mt_destroy, .me = THIS_MODULE, }; static int __init connmark_mt_init(void) { int ret; ret = xt_register_target(&connmark_tg_reg); if (ret < 0) return ret; ret = xt_register_match(&connmark_mt_reg); if (ret < 0) { xt_unregister_target(&connmark_tg_reg); return ret; } return 0; } static void __exit connmark_mt_exit(void) { xt_unregister_match(&connmark_mt_reg); xt_unregister_target(&connmark_tg_reg); } module_init(connmark_mt_init); module_exit(connmark_mt_exit);
gpl-2.0
cbolumar/android_kernel_samsung_msm8916
fs/minix/dir.c
2167
11450
/* * linux/fs/minix/dir.c * * Copyright (C) 1991, 1992 Linus Torvalds * * minix directory handling functions * * Updated to filesystem version 3 by Daniel Aragones */ #include "minix.h" #include <linux/buffer_head.h> #include <linux/highmem.h> #include <linux/swap.h> typedef struct minix_dir_entry minix_dirent; typedef struct minix3_dir_entry minix3_dirent; static int minix_readdir(struct file *, void *, filldir_t); const struct file_operations minix_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = minix_readdir, .fsync = generic_file_fsync, }; static inline void dir_put_page(struct page *page) { kunmap(page); page_cache_release(page); } /* * Return the offset into page `page_nr' of the last valid * byte in that page, plus one. */ static unsigned minix_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = PAGE_CACHE_SIZE; if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT)) last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); return last_byte; } static inline unsigned long dir_pages(struct inode *inode) { return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; } static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) { struct address_space *mapping = page->mapping; struct inode *dir = mapping->host; int err = 0; block_write_end(NULL, mapping, pos, len, len, page, NULL); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); mark_inode_dirty(dir); } if (IS_DIRSYNC(dir)) err = write_one_page(page, 1); else unlock_page(page); return err; } static struct page * dir_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) kmap(page); return page; } static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) { return (void*)((char*)de + sbi->s_dirsize); } static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir) { unsigned long pos = filp->f_pos; struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; unsigned offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); struct minix_sb_info *sbi = minix_sb(sb); unsigned chunk_size = sbi->s_dirsize; char *name; __u32 inumber; pos = (pos + chunk_size-1) & ~(chunk_size-1); if (pos >= inode->i_size) goto done; for ( ; n < npages; n++, offset = 0) { char *p, *kaddr, *limit; struct page *page = dir_get_page(inode, n); if (IS_ERR(page)) continue; kaddr = (char *)page_address(page); p = kaddr+offset; limit = kaddr + minix_last_byte(inode, n) - chunk_size; for ( ; p <= limit; p = minix_next_entry(p, sbi)) { if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; name = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; name = de->name; inumber = de->inode; } if (inumber) { int over; unsigned l = strnlen(name, sbi->s_namelen); offset = p - kaddr; over = filldir(dirent, name, l, (n << PAGE_CACHE_SHIFT) | offset, inumber, DT_UNKNOWN); if (over) { dir_put_page(page); goto done; } } } dir_put_page(page); } done: filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset; return 0; } static inline int namecompare(int len, int maxlen, const char * name, const char * buffer) { if (len < maxlen && buffer[len]) return 0; return !memcmp(name, buffer, len); } /* * minix_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the cache buffer in which the entry was found, and the entry * itself (as a parameter - res_dir). It does NOT read the inode of the * entry - you'll have to do that yourself if you want to. */ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page) { const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct inode * dir = dentry->d_parent->d_inode; struct super_block * sb = dir->i_sb; struct minix_sb_info * sbi = minix_sb(sb); unsigned long n; unsigned long npages = dir_pages(dir); struct page *page = NULL; char *p; char *namx; __u32 inumber; *res_page = NULL; for (n = 0; n < npages; n++) { char *kaddr, *limit; page = dir_get_page(dir, n); if (IS_ERR(page)) continue; kaddr = (char*)page_address(page); limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; namx = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; namx = de->name; inumber = de->inode; } if (!inumber) continue; if (namecompare(namelen, sbi->s_namelen, name, namx)) goto found; } dir_put_page(page); } return NULL; found: *res_page = page; return (minix_dirent *)p; } int minix_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct super_block * sb = dir->i_sb; struct minix_sb_info * sbi = minix_sb(sb); struct page *page = NULL; unsigned long npages = dir_pages(dir); unsigned long n; char *kaddr, *p; minix_dirent *de; minix3_dirent *de3; loff_t pos; int err; char *namx = NULL; __u32 inumber; /* * We take care of directory expansion in the same loop * This code plays outside i_size, so it locks the page * to protect that region. */ for (n = 0; n <= npages; n++) { char *limit, *dir_end; page = dir_get_page(dir, n); err = PTR_ERR(page); if (IS_ERR(page)) goto out; lock_page(page); kaddr = (char*)page_address(page); dir_end = kaddr + minix_last_byte(dir, n); limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { de = (minix_dirent *)p; de3 = (minix3_dirent *)p; if (sbi->s_version == MINIX_V3) { namx = de3->name; inumber = de3->inode; } else { namx = de->name; inumber = de->inode; } if (p == dir_end) { /* We hit i_size */ if (sbi->s_version == MINIX_V3) de3->inode = 0; else de->inode = 0; goto got_it; } if (!inumber) goto got_it; err = -EEXIST; if (namecompare(namelen, sbi->s_namelen, name, namx)) goto out_unlock; } unlock_page(page); dir_put_page(page); } BUG(); return -EINVAL; got_it: pos = page_offset(page) + p - (char *)page_address(page); err = minix_prepare_chunk(page, pos, sbi->s_dirsize); if (err) goto out_unlock; memcpy (namx, name, namelen); if (sbi->s_version == MINIX_V3) { memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4); de3->inode = inode->i_ino; } else { memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2); de->inode = inode->i_ino; } err = dir_commit_chunk(page, pos, sbi->s_dirsize); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dir); out_put: dir_put_page(page); out: return err; out_unlock: unlock_page(page); goto out_put; } int minix_delete_entry(struct minix_dir_entry *de, struct page *page) { struct inode *inode = page->mapping->host; char *kaddr = page_address(page); loff_t pos = page_offset(page) + (char*)de - kaddr; struct minix_sb_info *sbi = minix_sb(inode->i_sb); unsigned len = sbi->s_dirsize; int err; lock_page(page); err = minix_prepare_chunk(page, pos, len); if (err == 0) { if (sbi->s_version == MINIX_V3) ((minix3_dirent *) de)->inode = 0; else de->inode = 0; err = dir_commit_chunk(page, pos, len); } else { unlock_page(page); } dir_put_page(page); inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; mark_inode_dirty(inode); return err; } int minix_make_empty(struct inode *inode, struct inode *dir) { struct page *page = grab_cache_page(inode->i_mapping, 0); struct minix_sb_info *sbi = minix_sb(inode->i_sb); char *kaddr; int err; if (!page) return -ENOMEM; err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize); if (err) { unlock_page(page); goto fail; } kaddr = kmap_atomic(page); memset(kaddr, 0, PAGE_CACHE_SIZE); if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)kaddr; de3->inode = inode->i_ino; strcpy(de3->name, "."); de3 = minix_next_entry(de3, sbi); de3->inode = dir->i_ino; strcpy(de3->name, ".."); } else { minix_dirent *de = (minix_dirent *)kaddr; de->inode = inode->i_ino; strcpy(de->name, "."); de = minix_next_entry(de, sbi); de->inode = dir->i_ino; strcpy(de->name, ".."); } kunmap_atomic(kaddr); err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); fail: page_cache_release(page); return err; } /* * routine to check that the specified directory is empty (for rmdir) */ int minix_empty_dir(struct inode * inode) { struct page *page = NULL; unsigned long i, npages = dir_pages(inode); struct minix_sb_info *sbi = minix_sb(inode->i_sb); char *name; __u32 inumber; for (i = 0; i < npages; i++) { char *p, *kaddr, *limit; page = dir_get_page(inode, i); if (IS_ERR(page)) continue; kaddr = (char *)page_address(page); limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; name = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; name = de->name; inumber = de->inode; } if (inumber != 0) { /* check for . and .. */ if (name[0] != '.') goto not_empty; if (!name[1]) { if (inumber != inode->i_ino) goto not_empty; } else if (name[1] != '.') goto not_empty; else if (name[2]) goto not_empty; } } dir_put_page(page); } return 1; not_empty: dir_put_page(page); return 0; } /* Releases the page */ void minix_set_link(struct minix_dir_entry *de, struct page *page, struct inode *inode) { struct inode *dir = page->mapping->host; struct minix_sb_info *sbi = minix_sb(dir->i_sb); loff_t pos = page_offset(page) + (char *)de-(char*)page_address(page); int err; lock_page(page); err = minix_prepare_chunk(page, pos, sbi->s_dirsize); if (err == 0) { if (sbi->s_version == MINIX_V3) ((minix3_dirent *) de)->inode = inode->i_ino; else de->inode = inode->i_ino; err = dir_commit_chunk(page, pos, sbi->s_dirsize); } else { unlock_page(page); } dir_put_page(page); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dir); } struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p) { struct page *page = dir_get_page(dir, 0); struct minix_sb_info *sbi = minix_sb(dir->i_sb); struct minix_dir_entry *de = NULL; if (!IS_ERR(page)) { de = minix_next_entry(page_address(page), sbi); *p = page; } return de; } ino_t minix_inode_by_name(struct dentry *dentry) { struct page *page; struct minix_dir_entry *de = minix_find_entry(dentry, &page); ino_t res = 0; if (de) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct minix_sb_info *sbi = minix_sb(inode->i_sb); if (sbi->s_version == MINIX_V3) res = ((minix3_dirent *) de)->inode; else res = de->inode; dir_put_page(page); } return res; }
gpl-2.0
motley-git/Kernel-Nexus7
drivers/gpu/drm/drm_memory.c
2679
4821
/** * \file drm_memory.c * Memory management wrappers for DRM * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/highmem.h> #include "drmP.h" /** * Called when "/proc/dri/%dev%/mem" is read. * * \param buf output buffer. * \param start start of output data. * \param offset requested start offset. * \param len requested number of bytes. * \param eof whether there is no more data to return. * \param data private data. * \return number of written bytes. * * No-op. */ int drm_mem_info(char *buf, char **start, off_t offset, int len, int *eof, void *data) { return 0; } #if __OS_HAS_AGP static void *agp_remap(unsigned long offset, unsigned long size, struct drm_device * dev) { unsigned long i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE; struct drm_agp_mem *agpmem; struct page **page_map; struct page **phys_page_map; void *addr; size = PAGE_ALIGN(size); #ifdef __alpha__ offset -= dev->hose->mem_space->start; #endif list_for_each_entry(agpmem, &dev->agp->memory, head) if (agpmem->bound <= offset && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size)) break; if (&agpmem->head == &dev->agp->memory) return NULL; /* * OK, we're mapping AGP space on a chipset/platform on which memory accesses by * the CPU do not get remapped by the GART. We fix this by using the kernel's * page-table instead (that's probably faster anyhow...). */ /* note: use vmalloc() because num_pages could be large... */ page_map = vmalloc(num_pages * sizeof(struct page *)); if (!page_map) return NULL; phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); for (i = 0; i < num_pages; ++i) page_map[i] = phys_page_map[i]; addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); vfree(page_map); return addr; } /** Wrapper around agp_free_memory() */ void drm_free_agp(DRM_AGP_MEM * handle, int pages) { agp_free_memory(handle); } EXPORT_SYMBOL(drm_free_agp); /** Wrapper around agp_bind_memory() */ int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) { return agp_bind_memory(handle, start); } /** Wrapper around agp_unbind_memory() */ int drm_unbind_agp(DRM_AGP_MEM * handle) { return agp_unbind_memory(handle); } EXPORT_SYMBOL(drm_unbind_agp); #else /* __OS_HAS_AGP */ static inline void *agp_remap(unsigned long offset, unsigned long size, struct drm_device * dev) { return NULL; } #endif /* agp */ void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) { if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) map->handle = agp_remap(map->offset, map->size, dev); else map->handle = ioremap(map->offset, map->size); } EXPORT_SYMBOL(drm_core_ioremap); void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) { if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) map->handle = agp_remap(map->offset, map->size, dev); else map->handle = ioremap_wc(map->offset, map->size); } EXPORT_SYMBOL(drm_core_ioremap_wc); void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev) { if (!map->handle || !map->size) return; if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) vunmap(map->handle); else iounmap(map->handle); } EXPORT_SYMBOL(drm_core_ioremapfree);
gpl-2.0
cooldroid/android_kernel_oneplus_msm8974
net/sunrpc/rpc_pipe.c
2679
29260
/* * net/sunrpc/rpc_pipe.c * * Userland/kernel interface for rpcauth_gss. * Code shamelessly plagiarized from fs/nfsd/nfsctl.c * and fs/sysfs/inode.c * * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no> * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/pagemap.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/fsnotify.h> #include <linux/kernel.h> #include <linux/rcupdate.h> #include <asm/ioctls.h> #include <linux/poll.h> #include <linux/wait.h> #include <linux/seq_file.h> #include <linux/sunrpc/clnt.h> #include <linux/workqueue.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/cache.h> #include <linux/nsproxy.h> #include <linux/notifier.h> #include "netns.h" #include "sunrpc.h" #define RPCDBG_FACILITY RPCDBG_DEBUG #define NET_NAME(net) ((net == &init_net) ? " (init_net)" : "") static struct file_system_type rpc_pipe_fs_type; static struct kmem_cache *rpc_inode_cachep __read_mostly; #define RPC_UPCALL_TIMEOUT (30*HZ) static BLOCKING_NOTIFIER_HEAD(rpc_pipefs_notifier_list); int rpc_pipefs_notifier_register(struct notifier_block *nb) { return blocking_notifier_chain_cond_register(&rpc_pipefs_notifier_list, nb); } EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_register); void rpc_pipefs_notifier_unregister(struct notifier_block *nb) { blocking_notifier_chain_unregister(&rpc_pipefs_notifier_list, nb); } EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_unregister); static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head, void (*destroy_msg)(struct rpc_pipe_msg *), int err) { struct rpc_pipe_msg *msg; if (list_empty(head)) return; do { msg = list_entry(head->next, struct rpc_pipe_msg, list); list_del_init(&msg->list); msg->errno = err; destroy_msg(msg); } while (!list_empty(head)); wake_up(waitq); } static void rpc_timeout_upcall_queue(struct work_struct *work) { LIST_HEAD(free_list); struct rpc_pipe *pipe = container_of(work, struct rpc_pipe, queue_timeout.work); void (*destroy_msg)(struct rpc_pipe_msg *); struct dentry *dentry; spin_lock(&pipe->lock); destroy_msg = pipe->ops->destroy_msg; if (pipe->nreaders == 0) { list_splice_init(&pipe->pipe, &free_list); pipe->pipelen = 0; } dentry = dget(pipe->dentry); spin_unlock(&pipe->lock); if (dentry) { rpc_purge_list(&RPC_I(dentry->d_inode)->waitq, &free_list, destroy_msg, -ETIMEDOUT); dput(dentry); } } ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, char __user *dst, size_t buflen) { char *data = (char *)msg->data + msg->copied; size_t mlen = min(msg->len - msg->copied, buflen); unsigned long left; left = copy_to_user(dst, data, mlen); if (left == mlen) { msg->errno = -EFAULT; return -EFAULT; } mlen -= left; msg->copied += mlen; msg->errno = 0; return mlen; } EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall); /** * rpc_queue_upcall - queue an upcall message to userspace * @inode: inode of upcall pipe on which to queue given message * @msg: message to queue * * Call with an @inode created by rpc_mkpipe() to queue an upcall. * A userspace process may then later read the upcall by performing a * read on an open file for this inode. It is up to the caller to * initialize the fields of @msg (other than @msg->list) appropriately. */ int rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg) { int res = -EPIPE; struct dentry *dentry; spin_lock(&pipe->lock); if (pipe->nreaders) { list_add_tail(&msg->list, &pipe->pipe); pipe->pipelen += msg->len; res = 0; } else if (pipe->flags & RPC_PIPE_WAIT_FOR_OPEN) { if (list_empty(&pipe->pipe)) queue_delayed_work(rpciod_workqueue, &pipe->queue_timeout, RPC_UPCALL_TIMEOUT); list_add_tail(&msg->list, &pipe->pipe); pipe->pipelen += msg->len; res = 0; } dentry = dget(pipe->dentry); spin_unlock(&pipe->lock); if (dentry) { wake_up(&RPC_I(dentry->d_inode)->waitq); dput(dentry); } return res; } EXPORT_SYMBOL_GPL(rpc_queue_upcall); static inline void rpc_inode_setowner(struct inode *inode, void *private) { RPC_I(inode)->private = private; } static void rpc_close_pipes(struct inode *inode) { struct rpc_pipe *pipe = RPC_I(inode)->pipe; int need_release; LIST_HEAD(free_list); mutex_lock(&inode->i_mutex); spin_lock(&pipe->lock); need_release = pipe->nreaders != 0 || pipe->nwriters != 0; pipe->nreaders = 0; list_splice_init(&pipe->in_upcall, &free_list); list_splice_init(&pipe->pipe, &free_list); pipe->pipelen = 0; pipe->dentry = NULL; spin_unlock(&pipe->lock); rpc_purge_list(&RPC_I(inode)->waitq, &free_list, pipe->ops->destroy_msg, -EPIPE); pipe->nwriters = 0; if (need_release && pipe->ops->release_pipe) pipe->ops->release_pipe(inode); cancel_delayed_work_sync(&pipe->queue_timeout); rpc_inode_setowner(inode, NULL); RPC_I(inode)->pipe = NULL; mutex_unlock(&inode->i_mutex); } static struct inode * rpc_alloc_inode(struct super_block *sb) { struct rpc_inode *rpci; rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); if (!rpci) return NULL; return &rpci->vfs_inode; } static void rpc_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(rpc_inode_cachep, RPC_I(inode)); } static void rpc_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, rpc_i_callback); } static int rpc_pipe_open(struct inode *inode, struct file *filp) { struct rpc_pipe *pipe; int first_open; int res = -ENXIO; mutex_lock(&inode->i_mutex); pipe = RPC_I(inode)->pipe; if (pipe == NULL) goto out; first_open = pipe->nreaders == 0 && pipe->nwriters == 0; if (first_open && pipe->ops->open_pipe) { res = pipe->ops->open_pipe(inode); if (res) goto out; } if (filp->f_mode & FMODE_READ) pipe->nreaders++; if (filp->f_mode & FMODE_WRITE) pipe->nwriters++; res = 0; out: mutex_unlock(&inode->i_mutex); return res; } static int rpc_pipe_release(struct inode *inode, struct file *filp) { struct rpc_pipe *pipe; struct rpc_pipe_msg *msg; int last_close; mutex_lock(&inode->i_mutex); pipe = RPC_I(inode)->pipe; if (pipe == NULL) goto out; msg = filp->private_data; if (msg != NULL) { spin_lock(&pipe->lock); msg->errno = -EAGAIN; list_del_init(&msg->list); spin_unlock(&pipe->lock); pipe->ops->destroy_msg(msg); } if (filp->f_mode & FMODE_WRITE) pipe->nwriters --; if (filp->f_mode & FMODE_READ) { pipe->nreaders --; if (pipe->nreaders == 0) { LIST_HEAD(free_list); spin_lock(&pipe->lock); list_splice_init(&pipe->pipe, &free_list); pipe->pipelen = 0; spin_unlock(&pipe->lock); rpc_purge_list(&RPC_I(inode)->waitq, &free_list, pipe->ops->destroy_msg, -EAGAIN); } } last_close = pipe->nwriters == 0 && pipe->nreaders == 0; if (last_close && pipe->ops->release_pipe) pipe->ops->release_pipe(inode); out: mutex_unlock(&inode->i_mutex); return 0; } static ssize_t rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) { struct inode *inode = filp->f_path.dentry->d_inode; struct rpc_pipe *pipe; struct rpc_pipe_msg *msg; int res = 0; mutex_lock(&inode->i_mutex); pipe = RPC_I(inode)->pipe; if (pipe == NULL) { res = -EPIPE; goto out_unlock; } msg = filp->private_data; if (msg == NULL) { spin_lock(&pipe->lock); if (!list_empty(&pipe->pipe)) { msg = list_entry(pipe->pipe.next, struct rpc_pipe_msg, list); list_move(&msg->list, &pipe->in_upcall); pipe->pipelen -= msg->len; filp->private_data = msg; msg->copied = 0; } spin_unlock(&pipe->lock); if (msg == NULL) goto out_unlock; } /* NOTE: it is up to the callback to update msg->copied */ res = pipe->ops->upcall(filp, msg, buf, len); if (res < 0 || msg->len == msg->copied) { filp->private_data = NULL; spin_lock(&pipe->lock); list_del_init(&msg->list); spin_unlock(&pipe->lock); pipe->ops->destroy_msg(msg); } out_unlock: mutex_unlock(&inode->i_mutex); return res; } static ssize_t rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset) { struct inode *inode = filp->f_path.dentry->d_inode; int res; mutex_lock(&inode->i_mutex); res = -EPIPE; if (RPC_I(inode)->pipe != NULL) res = RPC_I(inode)->pipe->ops->downcall(filp, buf, len); mutex_unlock(&inode->i_mutex); return res; } static unsigned int rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) { struct inode *inode = filp->f_path.dentry->d_inode; struct rpc_inode *rpci = RPC_I(inode); unsigned int mask = POLLOUT | POLLWRNORM; poll_wait(filp, &rpci->waitq, wait); mutex_lock(&inode->i_mutex); if (rpci->pipe == NULL) mask |= POLLERR | POLLHUP; else if (filp->private_data || !list_empty(&rpci->pipe->pipe)) mask |= POLLIN | POLLRDNORM; mutex_unlock(&inode->i_mutex); return mask; } static long rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_path.dentry->d_inode; struct rpc_pipe *pipe; int len; switch (cmd) { case FIONREAD: mutex_lock(&inode->i_mutex); pipe = RPC_I(inode)->pipe; if (pipe == NULL) { mutex_unlock(&inode->i_mutex); return -EPIPE; } spin_lock(&pipe->lock); len = pipe->pipelen; if (filp->private_data) { struct rpc_pipe_msg *msg; msg = filp->private_data; len += msg->len - msg->copied; } spin_unlock(&pipe->lock); mutex_unlock(&inode->i_mutex); return put_user(len, (int __user *)arg); default: return -EINVAL; } } static const struct file_operations rpc_pipe_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = rpc_pipe_read, .write = rpc_pipe_write, .poll = rpc_pipe_poll, .unlocked_ioctl = rpc_pipe_ioctl, .open = rpc_pipe_open, .release = rpc_pipe_release, }; static int rpc_show_info(struct seq_file *m, void *v) { struct rpc_clnt *clnt = m->private; rcu_read_lock(); seq_printf(m, "RPC server: %s\n", rcu_dereference(clnt->cl_xprt)->servername); seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname, clnt->cl_prog, clnt->cl_vers); seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO)); seq_printf(m, "port: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PORT)); rcu_read_unlock(); return 0; } static int rpc_info_open(struct inode *inode, struct file *file) { struct rpc_clnt *clnt = NULL; int ret = single_open(file, rpc_show_info, NULL); if (!ret) { struct seq_file *m = file->private_data; spin_lock(&file->f_path.dentry->d_lock); if (!d_unhashed(file->f_path.dentry)) clnt = RPC_I(inode)->private; if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) { spin_unlock(&file->f_path.dentry->d_lock); m->private = clnt; } else { spin_unlock(&file->f_path.dentry->d_lock); single_release(inode, file); ret = -EINVAL; } } return ret; } static int rpc_info_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; struct rpc_clnt *clnt = (struct rpc_clnt *)m->private; if (clnt) rpc_release_client(clnt); return single_release(inode, file); } static const struct file_operations rpc_info_operations = { .owner = THIS_MODULE, .open = rpc_info_open, .read = seq_read, .llseek = seq_lseek, .release = rpc_info_release, }; /* * Description of fs contents. */ struct rpc_filelist { const char *name; const struct file_operations *i_fop; umode_t mode; }; static int rpc_delete_dentry(const struct dentry *dentry) { return 1; } static const struct dentry_operations rpc_dentry_operations = { .d_delete = rpc_delete_dentry, }; static struct inode * rpc_get_inode(struct super_block *sb, umode_t mode) { struct inode *inode = new_inode(sb); if (!inode) return NULL; inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { case S_IFDIR: inode->i_fop = &simple_dir_operations; inode->i_op = &simple_dir_inode_operations; inc_nlink(inode); default: break; } return inode; } static int __rpc_create_common(struct inode *dir, struct dentry *dentry, umode_t mode, const struct file_operations *i_fop, void *private) { struct inode *inode; d_drop(dentry); inode = rpc_get_inode(dir->i_sb, mode); if (!inode) goto out_err; inode->i_ino = iunique(dir->i_sb, 100); if (i_fop) inode->i_fop = i_fop; if (private) rpc_inode_setowner(inode, private); d_add(dentry, inode); return 0; out_err: printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", __FILE__, __func__, dentry->d_name.name); dput(dentry); return -ENOMEM; } static int __rpc_create(struct inode *dir, struct dentry *dentry, umode_t mode, const struct file_operations *i_fop, void *private) { int err; err = __rpc_create_common(dir, dentry, S_IFREG | mode, i_fop, private); if (err) return err; fsnotify_create(dir, dentry); return 0; } static int __rpc_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, const struct file_operations *i_fop, void *private) { int err; err = __rpc_create_common(dir, dentry, S_IFDIR | mode, i_fop, private); if (err) return err; inc_nlink(dir); fsnotify_mkdir(dir, dentry); return 0; } static void init_pipe(struct rpc_pipe *pipe) { pipe->nreaders = 0; pipe->nwriters = 0; INIT_LIST_HEAD(&pipe->in_upcall); INIT_LIST_HEAD(&pipe->in_downcall); INIT_LIST_HEAD(&pipe->pipe); pipe->pipelen = 0; INIT_DELAYED_WORK(&pipe->queue_timeout, rpc_timeout_upcall_queue); pipe->ops = NULL; spin_lock_init(&pipe->lock); pipe->dentry = NULL; } void rpc_destroy_pipe_data(struct rpc_pipe *pipe) { kfree(pipe); } EXPORT_SYMBOL_GPL(rpc_destroy_pipe_data); struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags) { struct rpc_pipe *pipe; pipe = kzalloc(sizeof(struct rpc_pipe), GFP_KERNEL); if (!pipe) return ERR_PTR(-ENOMEM); init_pipe(pipe); pipe->ops = ops; pipe->flags = flags; return pipe; } EXPORT_SYMBOL_GPL(rpc_mkpipe_data); static int __rpc_mkpipe_dentry(struct inode *dir, struct dentry *dentry, umode_t mode, const struct file_operations *i_fop, void *private, struct rpc_pipe *pipe) { struct rpc_inode *rpci; int err; err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private); if (err) return err; rpci = RPC_I(dentry->d_inode); rpci->private = private; rpci->pipe = pipe; fsnotify_create(dir, dentry); return 0; } static int __rpc_rmdir(struct inode *dir, struct dentry *dentry) { int ret; dget(dentry); ret = simple_rmdir(dir, dentry); d_delete(dentry); dput(dentry); return ret; } int rpc_rmdir(struct dentry *dentry) { struct dentry *parent; struct inode *dir; int error; parent = dget_parent(dentry); dir = parent->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); error = __rpc_rmdir(dir, dentry); mutex_unlock(&dir->i_mutex); dput(parent); return error; } EXPORT_SYMBOL_GPL(rpc_rmdir); static int __rpc_unlink(struct inode *dir, struct dentry *dentry) { int ret; dget(dentry); ret = simple_unlink(dir, dentry); d_delete(dentry); dput(dentry); return ret; } static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; rpc_close_pipes(inode); return __rpc_unlink(dir, dentry); } static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent, struct qstr *name) { struct dentry *dentry; dentry = d_lookup(parent, name); if (!dentry) { dentry = d_alloc(parent, name); if (!dentry) return ERR_PTR(-ENOMEM); } if (dentry->d_inode == NULL) { d_set_d_op(dentry, &rpc_dentry_operations); return dentry; } dput(dentry); return ERR_PTR(-EEXIST); } /* * FIXME: This probably has races. */ static void __rpc_depopulate(struct dentry *parent, const struct rpc_filelist *files, int start, int eof) { struct inode *dir = parent->d_inode; struct dentry *dentry; struct qstr name; int i; for (i = start; i < eof; i++) { name.name = files[i].name; name.len = strlen(files[i].name); name.hash = full_name_hash(name.name, name.len); dentry = d_lookup(parent, &name); if (dentry == NULL) continue; if (dentry->d_inode == NULL) goto next; switch (dentry->d_inode->i_mode & S_IFMT) { default: BUG(); case S_IFREG: __rpc_unlink(dir, dentry); break; case S_IFDIR: __rpc_rmdir(dir, dentry); } next: dput(dentry); } } static void rpc_depopulate(struct dentry *parent, const struct rpc_filelist *files, int start, int eof) { struct inode *dir = parent->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD); __rpc_depopulate(parent, files, start, eof); mutex_unlock(&dir->i_mutex); } static int rpc_populate(struct dentry *parent, const struct rpc_filelist *files, int start, int eof, void *private) { struct inode *dir = parent->d_inode; struct dentry *dentry; int i, err; mutex_lock(&dir->i_mutex); for (i = start; i < eof; i++) { struct qstr q; q.name = files[i].name; q.len = strlen(files[i].name); q.hash = full_name_hash(q.name, q.len); dentry = __rpc_lookup_create_exclusive(parent, &q); err = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_bad; switch (files[i].mode & S_IFMT) { default: BUG(); case S_IFREG: err = __rpc_create(dir, dentry, files[i].mode, files[i].i_fop, private); break; case S_IFDIR: err = __rpc_mkdir(dir, dentry, files[i].mode, NULL, private); } if (err != 0) goto out_bad; } mutex_unlock(&dir->i_mutex); return 0; out_bad: __rpc_depopulate(parent, files, start, eof); mutex_unlock(&dir->i_mutex); printk(KERN_WARNING "%s: %s failed to populate directory %s\n", __FILE__, __func__, parent->d_name.name); return err; } static struct dentry *rpc_mkdir_populate(struct dentry *parent, struct qstr *name, umode_t mode, void *private, int (*populate)(struct dentry *, void *), void *args_populate) { struct dentry *dentry; struct inode *dir = parent->d_inode; int error; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); dentry = __rpc_lookup_create_exclusive(parent, name); if (IS_ERR(dentry)) goto out; error = __rpc_mkdir(dir, dentry, mode, NULL, private); if (error != 0) goto out_err; if (populate != NULL) { error = populate(dentry, args_populate); if (error) goto err_rmdir; } out: mutex_unlock(&dir->i_mutex); return dentry; err_rmdir: __rpc_rmdir(dir, dentry); out_err: dentry = ERR_PTR(error); goto out; } static int rpc_rmdir_depopulate(struct dentry *dentry, void (*depopulate)(struct dentry *)) { struct dentry *parent; struct inode *dir; int error; parent = dget_parent(dentry); dir = parent->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); if (depopulate != NULL) depopulate(dentry); error = __rpc_rmdir(dir, dentry); mutex_unlock(&dir->i_mutex); dput(parent); return error; } /** * rpc_mkpipe - make an rpc_pipefs file for kernel<->userspace communication * @parent: dentry of directory to create new "pipe" in * @name: name of pipe * @private: private data to associate with the pipe, for the caller's use * @ops: operations defining the behavior of the pipe: upcall, downcall, * release_pipe, open_pipe, and destroy_msg. * @flags: rpc_pipe flags * * Data is made available for userspace to read by calls to * rpc_queue_upcall(). The actual reads will result in calls to * @ops->upcall, which will be called with the file pointer, * message, and userspace buffer to copy to. * * Writes can come at any time, and do not necessarily have to be * responses to upcalls. They will result in calls to @msg->downcall. * * The @private argument passed here will be available to all these methods * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private. */ struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name, void *private, struct rpc_pipe *pipe) { struct dentry *dentry; struct inode *dir = parent->d_inode; umode_t umode = S_IFIFO | S_IRUSR | S_IWUSR; struct qstr q; int err; if (pipe->ops->upcall == NULL) umode &= ~S_IRUGO; if (pipe->ops->downcall == NULL) umode &= ~S_IWUGO; q.name = name; q.len = strlen(name); q.hash = full_name_hash(q.name, q.len), mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); dentry = __rpc_lookup_create_exclusive(parent, &q); if (IS_ERR(dentry)) goto out; err = __rpc_mkpipe_dentry(dir, dentry, umode, &rpc_pipe_fops, private, pipe); if (err) goto out_err; out: mutex_unlock(&dir->i_mutex); return dentry; out_err: dentry = ERR_PTR(err); printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n", __FILE__, __func__, parent->d_name.name, name, err); goto out; } EXPORT_SYMBOL_GPL(rpc_mkpipe_dentry); /** * rpc_unlink - remove a pipe * @dentry: dentry for the pipe, as returned from rpc_mkpipe * * After this call, lookups will no longer find the pipe, and any * attempts to read or write using preexisting opens of the pipe will * return -EPIPE. */ int rpc_unlink(struct dentry *dentry) { struct dentry *parent; struct inode *dir; int error = 0; parent = dget_parent(dentry); dir = parent->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); error = __rpc_rmpipe(dir, dentry); mutex_unlock(&dir->i_mutex); dput(parent); return error; } EXPORT_SYMBOL_GPL(rpc_unlink); enum { RPCAUTH_info, RPCAUTH_EOF }; static const struct rpc_filelist authfiles[] = { [RPCAUTH_info] = { .name = "info", .i_fop = &rpc_info_operations, .mode = S_IFREG | S_IRUSR, }, }; static int rpc_clntdir_populate(struct dentry *dentry, void *private) { return rpc_populate(dentry, authfiles, RPCAUTH_info, RPCAUTH_EOF, private); } static void rpc_clntdir_depopulate(struct dentry *dentry) { rpc_depopulate(dentry, authfiles, RPCAUTH_info, RPCAUTH_EOF); } /** * rpc_create_client_dir - Create a new rpc_client directory in rpc_pipefs * @dentry: dentry from the rpc_pipefs root to the new directory * @name: &struct qstr for the name * @rpc_client: rpc client to associate with this directory * * This creates a directory at the given @path associated with * @rpc_clnt, which will contain a file named "info" with some basic * information about the client, together with any "pipes" that may * later be created using rpc_mkpipe(). */ struct dentry *rpc_create_client_dir(struct dentry *dentry, struct qstr *name, struct rpc_clnt *rpc_client) { return rpc_mkdir_populate(dentry, name, S_IRUGO | S_IXUGO, NULL, rpc_clntdir_populate, rpc_client); } /** * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir() * @clnt: rpc client */ int rpc_remove_client_dir(struct dentry *dentry) { return rpc_rmdir_depopulate(dentry, rpc_clntdir_depopulate); } static const struct rpc_filelist cache_pipefs_files[3] = { [0] = { .name = "channel", .i_fop = &cache_file_operations_pipefs, .mode = S_IFREG|S_IRUSR|S_IWUSR, }, [1] = { .name = "content", .i_fop = &content_file_operations_pipefs, .mode = S_IFREG|S_IRUSR, }, [2] = { .name = "flush", .i_fop = &cache_flush_operations_pipefs, .mode = S_IFREG|S_IRUSR|S_IWUSR, }, }; static int rpc_cachedir_populate(struct dentry *dentry, void *private) { return rpc_populate(dentry, cache_pipefs_files, 0, 3, private); } static void rpc_cachedir_depopulate(struct dentry *dentry) { rpc_depopulate(dentry, cache_pipefs_files, 0, 3); } struct dentry *rpc_create_cache_dir(struct dentry *parent, struct qstr *name, umode_t umode, struct cache_detail *cd) { return rpc_mkdir_populate(parent, name, umode, NULL, rpc_cachedir_populate, cd); } void rpc_remove_cache_dir(struct dentry *dentry) { rpc_rmdir_depopulate(dentry, rpc_cachedir_depopulate); } /* * populate the filesystem */ static const struct super_operations s_ops = { .alloc_inode = rpc_alloc_inode, .destroy_inode = rpc_destroy_inode, .statfs = simple_statfs, }; #define RPCAUTH_GSSMAGIC 0x67596969 /* * We have a single directory with 1 node in it. */ enum { RPCAUTH_lockd, RPCAUTH_mount, RPCAUTH_nfs, RPCAUTH_portmap, RPCAUTH_statd, RPCAUTH_nfsd4_cb, RPCAUTH_cache, RPCAUTH_nfsd, RPCAUTH_RootEOF }; static const struct rpc_filelist files[] = { [RPCAUTH_lockd] = { .name = "lockd", .mode = S_IFDIR | S_IRUGO | S_IXUGO, }, [RPCAUTH_mount] = { .name = "mount", .mode = S_IFDIR | S_IRUGO | S_IXUGO, }, [RPCAUTH_nfs] = { .name = "nfs", .mode = S_IFDIR | S_IRUGO | S_IXUGO, }, [RPCAUTH_portmap] = { .name = "portmap", .mode = S_IFDIR | S_IRUGO | S_IXUGO, }, [RPCAUTH_statd] = { .name = "statd", .mode = S_IFDIR | S_IRUGO | S_IXUGO, }, [RPCAUTH_nfsd4_cb] = { .name = "nfsd4_cb", .mode = S_IFDIR | S_IRUGO | S_IXUGO, }, [RPCAUTH_cache] = { .name = "cache", .mode = S_IFDIR | S_IRUGO | S_IXUGO, }, [RPCAUTH_nfsd] = { .name = "nfsd", .mode = S_IFDIR | S_IRUGO | S_IXUGO, }, }; /* * This call can be used only in RPC pipefs mount notification hooks. */ struct dentry *rpc_d_lookup_sb(const struct super_block *sb, const unsigned char *dir_name) { struct qstr dir = { .name = dir_name, .len = strlen(dir_name), .hash = full_name_hash(dir_name, strlen(dir_name)), }; return d_lookup(sb->s_root, &dir); } EXPORT_SYMBOL_GPL(rpc_d_lookup_sb); void rpc_pipefs_init_net(struct net *net) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); mutex_init(&sn->pipefs_sb_lock); } /* * This call will be used for per network namespace operations calls. * Note: Function will be returned with pipefs_sb_lock taken if superblock was * found. This lock have to be released by rpc_put_sb_net() when all operations * will be completed. */ struct super_block *rpc_get_sb_net(const struct net *net) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); mutex_lock(&sn->pipefs_sb_lock); if (sn->pipefs_sb) return sn->pipefs_sb; mutex_unlock(&sn->pipefs_sb_lock); return NULL; } EXPORT_SYMBOL_GPL(rpc_get_sb_net); void rpc_put_sb_net(const struct net *net) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); BUG_ON(sn->pipefs_sb == NULL); mutex_unlock(&sn->pipefs_sb_lock); } EXPORT_SYMBOL_GPL(rpc_put_sb_net); static int rpc_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct dentry *root; struct net *net = data; struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); int err; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = RPCAUTH_GSSMAGIC; sb->s_op = &s_ops; sb->s_time_gran = 1; inode = rpc_get_inode(sb, S_IFDIR | 0755); sb->s_root = root = d_make_root(inode); if (!root) return -ENOMEM; if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) return -ENOMEM; dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net, NET_NAME(net)); sn->pipefs_sb = sb; err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_MOUNT, sb); if (err) goto err_depopulate; sb->s_fs_info = get_net(net); return 0; err_depopulate: blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_UMOUNT, sb); sn->pipefs_sb = NULL; __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF); return err; } static struct dentry * rpc_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_ns(fs_type, flags, current->nsproxy->net_ns, rpc_fill_super); } static void rpc_kill_sb(struct super_block *sb) { struct net *net = sb->s_fs_info; struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); mutex_lock(&sn->pipefs_sb_lock); sn->pipefs_sb = NULL; mutex_unlock(&sn->pipefs_sb_lock); put_net(net); dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", net, NET_NAME(net)); blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_UMOUNT, sb); kill_litter_super(sb); } static struct file_system_type rpc_pipe_fs_type = { .owner = THIS_MODULE, .name = "rpc_pipefs", .mount = rpc_mount, .kill_sb = rpc_kill_sb, }; static void init_once(void *foo) { struct rpc_inode *rpci = (struct rpc_inode *) foo; inode_init_once(&rpci->vfs_inode); rpci->private = NULL; rpci->pipe = NULL; init_waitqueue_head(&rpci->waitq); } int register_rpc_pipefs(void) { int err; rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", sizeof(struct rpc_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (!rpc_inode_cachep) return -ENOMEM; err = rpc_clients_notifier_register(); if (err) goto err_notifier; err = register_filesystem(&rpc_pipe_fs_type); if (err) goto err_register; return 0; err_register: rpc_clients_notifier_unregister(); err_notifier: kmem_cache_destroy(rpc_inode_cachep); return err; } void unregister_rpc_pipefs(void) { rpc_clients_notifier_unregister(); kmem_cache_destroy(rpc_inode_cachep); unregister_filesystem(&rpc_pipe_fs_type); } /* Make 'mount -t rpc_pipefs ...' autoload this module. */ MODULE_ALIAS("rpc_pipefs");
gpl-2.0
CM-CHT/android_kernel_intel_cherrytrail
fs/nls/nls_cp737.c
2935
15503
/* * linux/fs/nls/nls_cp737.c * * Charset cp737 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397, 0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, 0x039e, 0x039f, 0x03a0, /* 0x90*/ 0x03a1, 0x03a3, 0x03a4, 0x03a5, 0x03a6, 0x03a7, 0x03a8, 0x03a9, 0x03b1, 0x03b2, 0x03b3, 0x03b4, 0x03b5, 0x03b6, 0x03b7, 0x03b8, /* 0xa0*/ 0x03b9, 0x03ba, 0x03bb, 0x03bc, 0x03bd, 0x03be, 0x03bf, 0x03c0, 0x03c1, 0x03c3, 0x03c2, 0x03c4, 0x03c5, 0x03c6, 0x03c7, 0x03c8, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x03c9, 0x03ac, 0x03ad, 0x03ae, 0x03ca, 0x03af, 0x03cc, 0x03cd, 0x03cb, 0x03ce, 0x0386, 0x0388, 0x0389, 0x038a, 0x038c, 0x038e, /* 0xf0*/ 0x038f, 0x00b1, 0x2265, 0x2264, 0x03aa, 0x03ab, 0x00f7, 0x2248, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0xf1, 0xfd, 0x00, 0x00, 0x00, 0x00, 0xfa, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, /* 0xf0-0xf7 */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x00, /* 0x80-0x87 */ 0xeb, 0xec, 0xed, 0x00, 0xee, 0x00, 0xef, 0xf0, /* 0x88-0x8f */ 0x00, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, /* 0x90-0x97 */ 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, /* 0x98-0x9f */ 0x8f, 0x90, 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, /* 0xa0-0xa7 */ 0x96, 0x97, 0xf4, 0xf5, 0xe1, 0xe2, 0xe3, 0xe5, /* 0xa8-0xaf */ 0x00, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, /* 0xb0-0xb7 */ 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, /* 0xb8-0xbf */ 0xa7, 0xa8, 0xaa, 0xa9, 0xab, 0xac, 0xad, 0xae, /* 0xc0-0xc7 */ 0xaf, 0xe0, 0xe4, 0xe8, 0xe6, 0xe7, 0xe9, 0x00, /* 0xc8-0xcf */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, page03, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, page22, NULL, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x80-0x87 */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x88-0x8f */ 0xa8, 0xa9, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xe0, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xe1, 0xe2, 0xe3, 0xe5, 0xe6, 0xe7, /* 0xe8-0xef */ 0xe9, 0xf1, 0xf2, 0xf3, 0xe4, 0xe8, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x98-0x9f */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0xa0-0xa7 */ 0x90, 0x91, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x97, 0xea, 0xeb, 0xec, 0xf4, 0xed, 0xee, 0xef, /* 0xe0-0xe7 */ 0xf5, 0xf0, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp737", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_cp737(void) { return register_nls(&table); } static void __exit exit_nls_cp737(void) { unregister_nls(&table); } module_init(init_nls_cp737) module_exit(exit_nls_cp737) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
TimofeyFox/S7270_kernel
arch/arm/mach-omap2/mux.c
3959
26653
/* * linux/arch/arm/mach-omap2/mux.c * * OMAP2, OMAP3 and OMAP4 pin multiplexing configurations * * Copyright (C) 2004 - 2010 Texas Instruments Inc. * Copyright (C) 2003 - 2008 Nokia Corporation * * Written by Tony Lindgren * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <plat/omap_hwmod.h> #include "control.h" #include "mux.h" #include "prm.h" #define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */ #define OMAP_MUX_BASE_SZ 0x5ca struct omap_mux_entry { struct omap_mux mux; struct list_head node; }; static LIST_HEAD(mux_partitions); static DEFINE_MUTEX(muxmode_mutex); struct omap_mux_partition *omap_mux_get(const char *name) { struct omap_mux_partition *partition; list_for_each_entry(partition, &mux_partitions, node) { if (!strcmp(name, partition->name)) return partition; } return NULL; } u16 omap_mux_read(struct omap_mux_partition *partition, u16 reg) { if (partition->flags & OMAP_MUX_REG_8BIT) return __raw_readb(partition->base + reg); else return __raw_readw(partition->base + reg); } void omap_mux_write(struct omap_mux_partition *partition, u16 val, u16 reg) { if (partition->flags & OMAP_MUX_REG_8BIT) __raw_writeb(val, partition->base + reg); else __raw_writew(val, partition->base + reg); } void omap_mux_write_array(struct omap_mux_partition *partition, struct omap_board_mux *board_mux) { if (!board_mux) return; while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) { omap_mux_write(partition, board_mux->value, board_mux->reg_offset); board_mux++; } } #ifdef CONFIG_OMAP_MUX static char *omap_mux_options; static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition, int gpio, int val) { struct omap_mux_entry *e; struct omap_mux *gpio_mux = NULL; u16 old_mode; u16 mux_mode; int found = 0; struct list_head *muxmodes = &partition->muxmodes; if (!gpio) return -EINVAL; list_for_each_entry(e, muxmodes, node) { struct omap_mux *m = &e->mux; if (gpio == m->gpio) { gpio_mux = m; found++; } } if (found == 0) { pr_err("%s: Could not set gpio%i\n", __func__, gpio); return -ENODEV; } if (found > 1) { pr_info("%s: Multiple gpio paths (%d) for gpio%i\n", __func__, found, gpio); return -EINVAL; } old_mode = omap_mux_read(partition, gpio_mux->reg_offset); mux_mode = val & ~(OMAP_MUX_NR_MODES - 1); if (partition->flags & OMAP_MUX_GPIO_IN_MODE3) mux_mode |= OMAP_MUX_MODE3; else mux_mode |= OMAP_MUX_MODE4; pr_debug("%s: Setting signal %s.gpio%i 0x%04x -> 0x%04x\n", __func__, gpio_mux->muxnames[0], gpio, old_mode, mux_mode); omap_mux_write(partition, mux_mode, gpio_mux->reg_offset); return 0; } int __init omap_mux_init_gpio(int gpio, int val) { struct omap_mux_partition *partition; int ret; list_for_each_entry(partition, &mux_partitions, node) { ret = _omap_mux_init_gpio(partition, gpio, val); if (!ret) return ret; } return -ENODEV; } static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition, const char *muxname, struct omap_mux **found_mux) { struct omap_mux *mux = NULL; struct omap_mux_entry *e; const char *mode_name; int found = 0, found_mode = 0, mode0_len = 0; struct list_head *muxmodes = &partition->muxmodes; mode_name = strchr(muxname, '.'); if (mode_name) { mode0_len = strlen(muxname) - strlen(mode_name); mode_name++; } else { mode_name = muxname; } list_for_each_entry(e, muxmodes, node) { char *m0_entry; int i; mux = &e->mux; m0_entry = mux->muxnames[0]; /* First check for full name in mode0.muxmode format */ if (mode0_len && strncmp(muxname, m0_entry, mode0_len)) continue; /* Then check for muxmode only */ for (i = 0; i < OMAP_MUX_NR_MODES; i++) { char *mode_cur = mux->muxnames[i]; if (!mode_cur) continue; if (!strcmp(mode_name, mode_cur)) { *found_mux = mux; found++; found_mode = i; } } } if (found == 1) { return found_mode; } if (found > 1) { pr_err("%s: Multiple signal paths (%i) for %s\n", __func__, found, muxname); return -EINVAL; } pr_err("%s: Could not find signal %s\n", __func__, muxname); return -ENODEV; } static int __init omap_mux_get_by_name(const char *muxname, struct omap_mux_partition **found_partition, struct omap_mux **found_mux) { struct omap_mux_partition *partition; list_for_each_entry(partition, &mux_partitions, node) { struct omap_mux *mux = NULL; int mux_mode = _omap_mux_get_by_name(partition, muxname, &mux); if (mux_mode < 0) continue; *found_partition = partition; *found_mux = mux; return mux_mode; } return -ENODEV; } int __init omap_mux_init_signal(const char *muxname, int val) { struct omap_mux_partition *partition = NULL; struct omap_mux *mux = NULL; u16 old_mode; int mux_mode; mux_mode = omap_mux_get_by_name(muxname, &partition, &mux); if (mux_mode < 0) return mux_mode; old_mode = omap_mux_read(partition, mux->reg_offset); mux_mode |= val; pr_debug("%s: Setting signal %s 0x%04x -> 0x%04x\n", __func__, muxname, old_mode, mux_mode); omap_mux_write(partition, mux_mode, mux->reg_offset); return 0; } struct omap_hwmod_mux_info * __init omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads) { struct omap_hwmod_mux_info *hmux; int i, nr_pads_dynamic = 0; if (!bpads || nr_pads < 1) return NULL; hmux = kzalloc(sizeof(struct omap_hwmod_mux_info), GFP_KERNEL); if (!hmux) goto err1; hmux->nr_pads = nr_pads; hmux->pads = kzalloc(sizeof(struct omap_device_pad) * nr_pads, GFP_KERNEL); if (!hmux->pads) goto err2; for (i = 0; i < hmux->nr_pads; i++) { struct omap_mux_partition *partition; struct omap_device_pad *bpad = &bpads[i], *pad = &hmux->pads[i]; struct omap_mux *mux; int mux_mode; mux_mode = omap_mux_get_by_name(bpad->name, &partition, &mux); if (mux_mode < 0) goto err3; if (!pad->partition) pad->partition = partition; if (!pad->mux) pad->mux = mux; pad->name = kzalloc(strlen(bpad->name) + 1, GFP_KERNEL); if (!pad->name) { int j; for (j = i - 1; j >= 0; j--) kfree(hmux->pads[j].name); goto err3; } strcpy(pad->name, bpad->name); pad->flags = bpad->flags; pad->enable = bpad->enable; pad->idle = bpad->idle; pad->off = bpad->off; if (pad->flags & (OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP)) nr_pads_dynamic++; pr_debug("%s: Initialized %s\n", __func__, pad->name); } if (!nr_pads_dynamic) return hmux; /* * Add pads that need dynamic muxing into a separate list */ hmux->nr_pads_dynamic = nr_pads_dynamic; hmux->pads_dynamic = kzalloc(sizeof(struct omap_device_pad *) * nr_pads_dynamic, GFP_KERNEL); if (!hmux->pads_dynamic) { pr_err("%s: Could not allocate dynamic pads\n", __func__); return hmux; } nr_pads_dynamic = 0; for (i = 0; i < hmux->nr_pads; i++) { struct omap_device_pad *pad = &hmux->pads[i]; if (pad->flags & (OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP)) { pr_debug("%s: pad %s tagged dynamic\n", __func__, pad->name); hmux->pads_dynamic[nr_pads_dynamic] = pad; nr_pads_dynamic++; } } return hmux; err3: kfree(hmux->pads); err2: kfree(hmux); err1: pr_err("%s: Could not allocate device mux entry\n", __func__); return NULL; } /** * omap_hwmod_mux_scan_wakeups - omap hwmod scan wakeup pads * @hmux: Pads for a hwmod * @mpu_irqs: MPU irq array for a hwmod * * Scans the wakeup status of pads for a single hwmod. If an irq * array is defined for this mux, the parser will call the registered * ISRs for corresponding pads, otherwise the parser will stop at the * first wakeup active pad and return. Returns true if there is a * pending and non-served wakeup event for the mux, otherwise false. */ static bool omap_hwmod_mux_scan_wakeups(struct omap_hwmod_mux_info *hmux, struct omap_hwmod_irq_info *mpu_irqs) { int i, irq; unsigned int val; u32 handled_irqs = 0; for (i = 0; i < hmux->nr_pads_dynamic; i++) { struct omap_device_pad *pad = hmux->pads_dynamic[i]; if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP) || !(pad->idle & OMAP_WAKEUP_EN)) continue; val = omap_mux_read(pad->partition, pad->mux->reg_offset); if (!(val & OMAP_WAKEUP_EVENT)) continue; if (!hmux->irqs) return true; irq = hmux->irqs[i]; /* make sure we only handle each irq once */ if (handled_irqs & 1 << irq) continue; handled_irqs |= 1 << irq; generic_handle_irq(mpu_irqs[irq].irq); } return false; } /** * _omap_hwmod_mux_handle_irq - Process wakeup events for a single hwmod * * Checks a single hwmod for every wakeup capable pad to see if there is an * active wakeup event. If this is the case, call the corresponding ISR. */ static int _omap_hwmod_mux_handle_irq(struct omap_hwmod *oh, void *data) { if (!oh->mux || !oh->mux->enabled) return 0; if (omap_hwmod_mux_scan_wakeups(oh->mux, oh->mpu_irqs)) generic_handle_irq(oh->mpu_irqs[0].irq); return 0; } /** * omap_hwmod_mux_handle_irq - Process pad wakeup irqs. * * Calls a function for each registered omap_hwmod to check * pad wakeup statuses. */ static irqreturn_t omap_hwmod_mux_handle_irq(int irq, void *unused) { omap_hwmod_for_each(_omap_hwmod_mux_handle_irq, NULL); return IRQ_HANDLED; } /* Assumes the calling function takes care of locking */ void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state) { int i; /* Runtime idling of dynamic pads */ if (state == _HWMOD_STATE_IDLE && hmux->enabled) { for (i = 0; i < hmux->nr_pads_dynamic; i++) { struct omap_device_pad *pad = hmux->pads_dynamic[i]; int val = -EINVAL; val = pad->idle; omap_mux_write(pad->partition, val, pad->mux->reg_offset); } return; } /* Runtime enabling of dynamic pads */ if ((state == _HWMOD_STATE_ENABLED) && hmux->pads_dynamic && hmux->enabled) { for (i = 0; i < hmux->nr_pads_dynamic; i++) { struct omap_device_pad *pad = hmux->pads_dynamic[i]; int val = -EINVAL; val = pad->enable; omap_mux_write(pad->partition, val, pad->mux->reg_offset); } return; } /* Enabling or disabling of all pads */ for (i = 0; i < hmux->nr_pads; i++) { struct omap_device_pad *pad = &hmux->pads[i]; int flags, val = -EINVAL; flags = pad->flags; switch (state) { case _HWMOD_STATE_ENABLED: val = pad->enable; pr_debug("%s: Enabling %s %x\n", __func__, pad->name, val); break; case _HWMOD_STATE_DISABLED: /* Use safe mode unless OMAP_DEVICE_PAD_REMUX */ if (flags & OMAP_DEVICE_PAD_REMUX) val = pad->off; else val = OMAP_MUX_MODE7; pr_debug("%s: Disabling %s %x\n", __func__, pad->name, val); break; default: /* Nothing to be done */ break; }; if (val >= 0) { omap_mux_write(pad->partition, val, pad->mux->reg_offset); pad->flags = flags; } } if (state == _HWMOD_STATE_ENABLED) hmux->enabled = true; else hmux->enabled = false; } #ifdef CONFIG_DEBUG_FS #define OMAP_MUX_MAX_NR_FLAGS 10 #define OMAP_MUX_TEST_FLAG(val, mask) \ if (((val) & (mask)) == (mask)) { \ i++; \ flags[i] = #mask; \ } /* REVISIT: Add checking for non-optimal mux settings */ static inline void omap_mux_decode(struct seq_file *s, u16 val) { char *flags[OMAP_MUX_MAX_NR_FLAGS]; char mode[sizeof("OMAP_MUX_MODE") + 1]; int i = -1; sprintf(mode, "OMAP_MUX_MODE%d", val & 0x7); i++; flags[i] = mode; OMAP_MUX_TEST_FLAG(val, OMAP_PIN_OFF_WAKEUPENABLE); if (val & OMAP_OFF_EN) { if (!(val & OMAP_OFFOUT_EN)) { if (!(val & OMAP_OFF_PULL_UP)) { OMAP_MUX_TEST_FLAG(val, OMAP_PIN_OFF_INPUT_PULLDOWN); } else { OMAP_MUX_TEST_FLAG(val, OMAP_PIN_OFF_INPUT_PULLUP); } } else { if (!(val & OMAP_OFFOUT_VAL)) { OMAP_MUX_TEST_FLAG(val, OMAP_PIN_OFF_OUTPUT_LOW); } else { OMAP_MUX_TEST_FLAG(val, OMAP_PIN_OFF_OUTPUT_HIGH); } } } if (val & OMAP_INPUT_EN) { if (val & OMAP_PULL_ENA) { if (!(val & OMAP_PULL_UP)) { OMAP_MUX_TEST_FLAG(val, OMAP_PIN_INPUT_PULLDOWN); } else { OMAP_MUX_TEST_FLAG(val, OMAP_PIN_INPUT_PULLUP); } } else { OMAP_MUX_TEST_FLAG(val, OMAP_PIN_INPUT); } } else { i++; flags[i] = "OMAP_PIN_OUTPUT"; } do { seq_printf(s, "%s", flags[i]); if (i > 0) seq_printf(s, " | "); } while (i-- > 0); } #define OMAP_MUX_DEFNAME_LEN 32 static int omap_mux_dbg_board_show(struct seq_file *s, void *unused) { struct omap_mux_partition *partition = s->private; struct omap_mux_entry *e; u8 omap_gen = omap_rev() >> 28; list_for_each_entry(e, &partition->muxmodes, node) { struct omap_mux *m = &e->mux; char m0_def[OMAP_MUX_DEFNAME_LEN]; char *m0_name = m->muxnames[0]; u16 val; int i, mode; if (!m0_name) continue; /* REVISIT: Needs to be updated if mode0 names get longer */ for (i = 0; i < OMAP_MUX_DEFNAME_LEN; i++) { if (m0_name[i] == '\0') { m0_def[i] = m0_name[i]; break; } m0_def[i] = toupper(m0_name[i]); } val = omap_mux_read(partition, m->reg_offset); mode = val & OMAP_MUX_MODE7; if (mode != 0) seq_printf(s, "/* %s */\n", m->muxnames[mode]); /* * XXX: Might be revisited to support differences across * same OMAP generation. */ seq_printf(s, "OMAP%d_MUX(%s, ", omap_gen, m0_def); omap_mux_decode(s, val); seq_printf(s, "),\n"); } return 0; } static int omap_mux_dbg_board_open(struct inode *inode, struct file *file) { return single_open(file, omap_mux_dbg_board_show, inode->i_private); } static const struct file_operations omap_mux_dbg_board_fops = { .open = omap_mux_dbg_board_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct omap_mux_partition *omap_mux_get_partition(struct omap_mux *mux) { struct omap_mux_partition *partition; list_for_each_entry(partition, &mux_partitions, node) { struct list_head *muxmodes = &partition->muxmodes; struct omap_mux_entry *e; list_for_each_entry(e, muxmodes, node) { struct omap_mux *m = &e->mux; if (m == mux) return partition; } } return NULL; } static int omap_mux_dbg_signal_show(struct seq_file *s, void *unused) { struct omap_mux *m = s->private; struct omap_mux_partition *partition; const char *none = "NA"; u16 val; int mode; partition = omap_mux_get_partition(m); if (!partition) return 0; val = omap_mux_read(partition, m->reg_offset); mode = val & OMAP_MUX_MODE7; seq_printf(s, "name: %s.%s (0x%08x/0x%03x = 0x%04x), b %s, t %s\n", m->muxnames[0], m->muxnames[mode], partition->phys + m->reg_offset, m->reg_offset, val, m->balls[0] ? m->balls[0] : none, m->balls[1] ? m->balls[1] : none); seq_printf(s, "mode: "); omap_mux_decode(s, val); seq_printf(s, "\n"); seq_printf(s, "signals: %s | %s | %s | %s | %s | %s | %s | %s\n", m->muxnames[0] ? m->muxnames[0] : none, m->muxnames[1] ? m->muxnames[1] : none, m->muxnames[2] ? m->muxnames[2] : none, m->muxnames[3] ? m->muxnames[3] : none, m->muxnames[4] ? m->muxnames[4] : none, m->muxnames[5] ? m->muxnames[5] : none, m->muxnames[6] ? m->muxnames[6] : none, m->muxnames[7] ? m->muxnames[7] : none); return 0; } #define OMAP_MUX_MAX_ARG_CHAR 7 static ssize_t omap_mux_dbg_signal_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[OMAP_MUX_MAX_ARG_CHAR]; struct seq_file *seqf; struct omap_mux *m; unsigned long val; int buf_size, ret; struct omap_mux_partition *partition; if (count > OMAP_MUX_MAX_ARG_CHAR) return -EINVAL; memset(buf, 0, sizeof(buf)); buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; ret = strict_strtoul(buf, 0x10, &val); if (ret < 0) return ret; if (val > 0xffff) return -EINVAL; seqf = file->private_data; m = seqf->private; partition = omap_mux_get_partition(m); if (!partition) return -ENODEV; omap_mux_write(partition, (u16)val, m->reg_offset); *ppos += count; return count; } static int omap_mux_dbg_signal_open(struct inode *inode, struct file *file) { return single_open(file, omap_mux_dbg_signal_show, inode->i_private); } static const struct file_operations omap_mux_dbg_signal_fops = { .open = omap_mux_dbg_signal_open, .read = seq_read, .write = omap_mux_dbg_signal_write, .llseek = seq_lseek, .release = single_release, }; static struct dentry *mux_dbg_dir; static void __init omap_mux_dbg_create_entry( struct omap_mux_partition *partition, struct dentry *mux_dbg_dir) { struct omap_mux_entry *e; list_for_each_entry(e, &partition->muxmodes, node) { struct omap_mux *m = &e->mux; (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir, m, &omap_mux_dbg_signal_fops); } } static void __init omap_mux_dbg_init(void) { struct omap_mux_partition *partition; static struct dentry *mux_dbg_board_dir; mux_dbg_dir = debugfs_create_dir("omap_mux", NULL); if (!mux_dbg_dir) return; mux_dbg_board_dir = debugfs_create_dir("board", mux_dbg_dir); if (!mux_dbg_board_dir) return; list_for_each_entry(partition, &mux_partitions, node) { omap_mux_dbg_create_entry(partition, mux_dbg_dir); (void)debugfs_create_file(partition->name, S_IRUGO, mux_dbg_board_dir, partition, &omap_mux_dbg_board_fops); } } #else static inline void omap_mux_dbg_init(void) { } #endif /* CONFIG_DEBUG_FS */ static void __init omap_mux_free_names(struct omap_mux *m) { int i; for (i = 0; i < OMAP_MUX_NR_MODES; i++) kfree(m->muxnames[i]); #ifdef CONFIG_DEBUG_FS for (i = 0; i < OMAP_MUX_NR_SIDES; i++) kfree(m->balls[i]); #endif } /* Free all data except for GPIO pins unless CONFIG_DEBUG_FS is set */ static int __init omap_mux_late_init(void) { struct omap_mux_partition *partition; int ret; list_for_each_entry(partition, &mux_partitions, node) { struct omap_mux_entry *e, *tmp; list_for_each_entry_safe(e, tmp, &partition->muxmodes, node) { struct omap_mux *m = &e->mux; u16 mode = omap_mux_read(partition, m->reg_offset); if (OMAP_MODE_GPIO(mode)) continue; #ifndef CONFIG_DEBUG_FS mutex_lock(&muxmode_mutex); list_del(&e->node); mutex_unlock(&muxmode_mutex); omap_mux_free_names(m); kfree(m); #endif } } ret = request_irq(omap_prcm_event_to_irq("io"), omap_hwmod_mux_handle_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "hwmod_io", omap_mux_late_init); if (ret) pr_warning("mux: Failed to setup hwmod io irq %d\n", ret); omap_mux_dbg_init(); return 0; } late_initcall(omap_mux_late_init); static void __init omap_mux_package_fixup(struct omap_mux *p, struct omap_mux *superset) { while (p->reg_offset != OMAP_MUX_TERMINATOR) { struct omap_mux *s = superset; int found = 0; while (s->reg_offset != OMAP_MUX_TERMINATOR) { if (s->reg_offset == p->reg_offset) { *s = *p; found++; break; } s++; } if (!found) pr_err("%s: Unknown entry offset 0x%x\n", __func__, p->reg_offset); p++; } } #ifdef CONFIG_DEBUG_FS static void __init omap_mux_package_init_balls(struct omap_ball *b, struct omap_mux *superset) { while (b->reg_offset != OMAP_MUX_TERMINATOR) { struct omap_mux *s = superset; int found = 0; while (s->reg_offset != OMAP_MUX_TERMINATOR) { if (s->reg_offset == b->reg_offset) { s->balls[0] = b->balls[0]; s->balls[1] = b->balls[1]; found++; break; } s++; } if (!found) pr_err("%s: Unknown ball offset 0x%x\n", __func__, b->reg_offset); b++; } } #else /* CONFIG_DEBUG_FS */ static inline void omap_mux_package_init_balls(struct omap_ball *b, struct omap_mux *superset) { } #endif /* CONFIG_DEBUG_FS */ static int __init omap_mux_setup(char *options) { if (!options) return 0; omap_mux_options = options; return 1; } __setup("omap_mux=", omap_mux_setup); /* * Note that the omap_mux=some.signal1=0x1234,some.signal2=0x1234 * cmdline options only override the bootloader values. * During development, please enable CONFIG_DEBUG_FS, and use the * signal specific entries under debugfs. */ static void __init omap_mux_set_cmdline_signals(void) { char *options, *next_opt, *token; if (!omap_mux_options) return; options = kstrdup(omap_mux_options, GFP_KERNEL); if (!options) return; next_opt = options; while ((token = strsep(&next_opt, ",")) != NULL) { char *keyval, *name; unsigned long val; keyval = token; name = strsep(&keyval, "="); if (name) { int res; res = strict_strtoul(keyval, 0x10, &val); if (res < 0) continue; omap_mux_init_signal(name, (u16)val); } } kfree(options); } static int __init omap_mux_copy_names(struct omap_mux *src, struct omap_mux *dst) { int i; for (i = 0; i < OMAP_MUX_NR_MODES; i++) { if (src->muxnames[i]) { dst->muxnames[i] = kstrdup(src->muxnames[i], GFP_KERNEL); if (!dst->muxnames[i]) goto free; } } #ifdef CONFIG_DEBUG_FS for (i = 0; i < OMAP_MUX_NR_SIDES; i++) { if (src->balls[i]) { dst->balls[i] = kstrdup(src->balls[i], GFP_KERNEL); if (!dst->balls[i]) goto free; } } #endif return 0; free: omap_mux_free_names(dst); return -ENOMEM; } #endif /* CONFIG_OMAP_MUX */ static struct omap_mux *omap_mux_get_by_gpio( struct omap_mux_partition *partition, int gpio) { struct omap_mux_entry *e; struct omap_mux *ret = NULL; list_for_each_entry(e, &partition->muxmodes, node) { struct omap_mux *m = &e->mux; if (m->gpio == gpio) { ret = m; break; } } return ret; } /* Needed for dynamic muxing of GPIO pins for off-idle */ u16 omap_mux_get_gpio(int gpio) { struct omap_mux_partition *partition; struct omap_mux *m = NULL; list_for_each_entry(partition, &mux_partitions, node) { m = omap_mux_get_by_gpio(partition, gpio); if (m) return omap_mux_read(partition, m->reg_offset); } if (!m || m->reg_offset == OMAP_MUX_TERMINATOR) pr_err("%s: Could not get gpio%i\n", __func__, gpio); return OMAP_MUX_TERMINATOR; } /* Needed for dynamic muxing of GPIO pins for off-idle */ void omap_mux_set_gpio(u16 val, int gpio) { struct omap_mux_partition *partition; struct omap_mux *m = NULL; list_for_each_entry(partition, &mux_partitions, node) { m = omap_mux_get_by_gpio(partition, gpio); if (m) { omap_mux_write(partition, val, m->reg_offset); return; } } if (!m || m->reg_offset == OMAP_MUX_TERMINATOR) pr_err("%s: Could not set gpio%i\n", __func__, gpio); } static struct omap_mux * __init omap_mux_list_add( struct omap_mux_partition *partition, struct omap_mux *src) { struct omap_mux_entry *entry; struct omap_mux *m; entry = kzalloc(sizeof(struct omap_mux_entry), GFP_KERNEL); if (!entry) return NULL; m = &entry->mux; entry->mux = *src; #ifdef CONFIG_OMAP_MUX if (omap_mux_copy_names(src, m)) { kfree(entry); return NULL; } #endif mutex_lock(&muxmode_mutex); list_add_tail(&entry->node, &partition->muxmodes); mutex_unlock(&muxmode_mutex); return m; } /* * Note if CONFIG_OMAP_MUX is not selected, we will only initialize * the GPIO to mux offset mapping that is needed for dynamic muxing * of GPIO pins for off-idle. */ static void __init omap_mux_init_list(struct omap_mux_partition *partition, struct omap_mux *superset) { while (superset->reg_offset != OMAP_MUX_TERMINATOR) { struct omap_mux *entry; #ifdef CONFIG_OMAP_MUX if (!superset->muxnames || !superset->muxnames[0]) { superset++; continue; } #else /* Skip pins that are not muxed as GPIO by bootloader */ if (!OMAP_MODE_GPIO(omap_mux_read(partition, superset->reg_offset))) { superset++; continue; } #endif entry = omap_mux_list_add(partition, superset); if (!entry) { pr_err("%s: Could not add entry\n", __func__); return; } superset++; } } #ifdef CONFIG_OMAP_MUX static void omap_mux_init_package(struct omap_mux *superset, struct omap_mux *package_subset, struct omap_ball *package_balls) { if (package_subset) omap_mux_package_fixup(package_subset, superset); if (package_balls) omap_mux_package_init_balls(package_balls, superset); } static void __init omap_mux_init_signals(struct omap_mux_partition *partition, struct omap_board_mux *board_mux) { omap_mux_set_cmdline_signals(); omap_mux_write_array(partition, board_mux); } #else static void omap_mux_init_package(struct omap_mux *superset, struct omap_mux *package_subset, struct omap_ball *package_balls) { } static void __init omap_mux_init_signals(struct omap_mux_partition *partition, struct omap_board_mux *board_mux) { } #endif static u32 mux_partitions_cnt; int __init omap_mux_init(const char *name, u32 flags, u32 mux_pbase, u32 mux_size, struct omap_mux *superset, struct omap_mux *package_subset, struct omap_board_mux *board_mux, struct omap_ball *package_balls) { struct omap_mux_partition *partition; partition = kzalloc(sizeof(struct omap_mux_partition), GFP_KERNEL); if (!partition) return -ENOMEM; partition->name = name; partition->flags = flags; partition->size = mux_size; partition->phys = mux_pbase; partition->base = ioremap(mux_pbase, mux_size); if (!partition->base) { pr_err("%s: Could not ioremap mux partition at 0x%08x\n", __func__, partition->phys); kfree(partition); return -ENODEV; } INIT_LIST_HEAD(&partition->muxmodes); list_add_tail(&partition->node, &mux_partitions); mux_partitions_cnt++; pr_info("%s: Add partition: #%d: %s, flags: %x\n", __func__, mux_partitions_cnt, partition->name, partition->flags); omap_mux_init_package(superset, package_subset, package_balls); omap_mux_init_list(partition, superset); omap_mux_init_signals(partition, board_mux); return 0; }
gpl-2.0
somcom3x/android_kernel_motorola_msm8992
arch/arm/mach-ixp4xx/coyote-setup.c
4471
3411
/* * arch/arm/mach-ixp4xx/coyote-setup.c * * Board setup for ADI Engineering and IXDGP425 boards * * Copyright (C) 2003-2005 MontaVista Software, Inc. * * Author: Deepak Saxena <dsaxena@plexity.net> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_8250.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/memory.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #define COYOTE_IDE_BASE_PHYS IXP4XX_EXP_BUS_BASE(3) #define COYOTE_IDE_BASE_VIRT 0xFFFE1000 #define COYOTE_IDE_REGION_SIZE 0x1000 #define COYOTE_IDE_DATA_PORT 0xFFFE10E0 #define COYOTE_IDE_CTRL_PORT 0xFFFE10FC #define COYOTE_IDE_ERROR_PORT 0xFFFE10E2 #define IRQ_COYOTE_IDE IRQ_IXP4XX_GPIO5 static struct flash_platform_data coyote_flash_data = { .map_name = "cfi_probe", .width = 2, }; static struct resource coyote_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device coyote_flash = { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &coyote_flash_data, }, .num_resources = 1, .resource = &coyote_flash_resource, }; static struct resource coyote_uart_resource = { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }; static struct plat_serial8250_port coyote_uart_data[] = { { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { }, }; static struct platform_device coyote_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = coyote_uart_data, }, .num_resources = 1, .resource = &coyote_uart_resource, }; static struct platform_device *coyote_devices[] __initdata = { &coyote_flash, &coyote_uart }; static void __init coyote_init(void) { ixp4xx_sys_init(); coyote_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); coyote_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_32M - 1; *IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE; *IXP4XX_EXP_CS1 = *IXP4XX_EXP_CS0; if (machine_is_ixdpg425()) { coyote_uart_data[0].membase = (char*)(IXP4XX_UART1_BASE_VIRT + REG_OFFSET); coyote_uart_data[0].mapbase = IXP4XX_UART1_BASE_PHYS; coyote_uart_data[0].irq = IRQ_IXP4XX_UART1; } platform_add_devices(coyote_devices, ARRAY_SIZE(coyote_devices)); } #ifdef CONFIG_ARCH_ADI_COYOTE MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote") /* Maintainer: MontaVista Software, Inc. */ .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .init_time = ixp4xx_timer_init, .atag_offset = 0x100, .init_machine = coyote_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif .restart = ixp4xx_restart, MACHINE_END #endif /* * IXDPG425 is identical to Coyote except for which serial port * is connected. */ #ifdef CONFIG_MACH_IXDPG425 MACHINE_START(IXDPG425, "Intel IXDPG425") /* Maintainer: MontaVista Software, Inc. */ .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .init_time = ixp4xx_timer_init, .atag_offset = 0x100, .init_machine = coyote_init, .restart = ixp4xx_restart, MACHINE_END #endif
gpl-2.0
micromys/linux-sunxi
arch/microblaze/kernel/setup.c
4471
6307
/* * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/cpu.h> #include <linux/initrd.h> #include <linux/console.h> #include <linux/debugfs.h> #include <asm/setup.h> #include <asm/sections.h> #include <asm/page.h> #include <linux/io.h> #include <linux/bug.h> #include <linux/param.h> #include <linux/pci.h> #include <linux/cache.h> #include <linux/of_platform.h> #include <linux/dma-mapping.h> #include <asm/cacheflush.h> #include <asm/entry.h> #include <asm/cpuinfo.h> #include <asm/prom.h> #include <asm/pgtable.h> DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */ DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */ DEFINE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */ DEFINE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */ DEFINE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ unsigned int boot_cpuid; char cmd_line[COMMAND_LINE_SIZE]; void __init setup_arch(char **cmdline_p) { *cmdline_p = cmd_line; console_verbose(); unflatten_device_tree(); setup_cpuinfo(); microblaze_cache_init(); setup_memory(); #ifdef CONFIG_EARLY_PRINTK /* remap early console to virtual address */ remap_early_printk(); #endif xilinx_pci_init(); #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) printk(KERN_NOTICE "Self modified code enable\n"); #endif #ifdef CONFIG_VT #if defined(CONFIG_XILINX_CONSOLE) conswitchp = &xil_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif } #ifdef CONFIG_MTD_UCLINUX /* Handle both romfs and cramfs types, without generating unnecessary code (ie no point checking for CRAMFS if it's not even enabled) */ inline unsigned get_romfs_len(unsigned *addr) { #ifdef CONFIG_ROMFS_FS if (memcmp(&addr[0], "-rom1fs-", 8) == 0) /* romfs */ return be32_to_cpu(addr[2]); #endif #ifdef CONFIG_CRAMFS if (addr[0] == le32_to_cpu(0x28cd3d45)) /* cramfs */ return le32_to_cpu(addr[1]); #endif return 0; } #endif /* CONFIG_MTD_UCLINUX_EBSS */ unsigned long kernel_tlb; void __init machine_early_init(const char *cmdline, unsigned int ram, unsigned int fdt, unsigned int msr, unsigned int tlb0, unsigned int tlb1) { unsigned long *src, *dst; unsigned int offset = 0; /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the * end of kernel. There are two position which we want to check. * The first is __init_end and the second __bss_start. */ #ifdef CONFIG_MTD_UCLINUX int romfs_size; unsigned int romfs_base; char *old_klimit = klimit; romfs_base = (ram ? ram : (unsigned int)&__init_end); romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); if (!romfs_size) { romfs_base = (unsigned int)&__bss_start; romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); } /* Move ROMFS out of BSS before clearing it */ if (romfs_size > 0) { memmove(&_ebss, (int *)romfs_base, romfs_size); klimit += romfs_size; } #endif /* clearing bss section */ memset(__bss_start, 0, __bss_stop-__bss_start); memset(_ssbss, 0, _esbss-_ssbss); /* Copy command line passed from bootloader */ #ifndef CONFIG_CMDLINE_BOOL if (cmdline && cmdline[0] != '\0') strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); #endif lockdep_init(); /* initialize device tree for usage in early_printk */ early_init_devtree((void *)_fdt_start); #ifdef CONFIG_EARLY_PRINTK setup_early_printk(NULL); #endif /* setup kernel_tlb after BSS cleaning * Maybe worth to move to asm code */ kernel_tlb = tlb0 + tlb1; /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0, tlb1, kernel_tlb); */ printk("Ramdisk addr 0x%08x, ", ram); if (fdt) printk("FDT at 0x%08x\n", fdt); else printk("Compiled-in FDT at 0x%08x\n", (unsigned int)_fdt_start); #ifdef CONFIG_MTD_UCLINUX printk("Found romfs @ 0x%08x (0x%08x)\n", romfs_base, romfs_size); printk("#### klimit %p ####\n", old_klimit); BUG_ON(romfs_size < 0); /* What else can we do? */ printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", romfs_size, romfs_base, (unsigned)&_ebss); printk("New klimit: 0x%08x\n", (unsigned)klimit); #endif #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR if (msr) printk("!!!Your kernel has setup MSR instruction but " "CPU don't have it %x\n", msr); #else if (!msr) printk("!!!Your kernel not setup MSR instruction but " "CPU have it %x\n", msr); #endif /* Do not copy reset vectors. offset = 0x2 means skip the first * two instructions. dst is pointer to MB vectors which are placed * in block ram. If you want to copy reset vector setup offset to 0x0 */ #if !CONFIG_MANUAL_RESET_VECTOR offset = 0x2; #endif dst = (unsigned long *) (offset * sizeof(u32)); for (src = __ivt_start + offset; src < __ivt_end; src++, dst++) *dst = *src; /* Initialize global data */ per_cpu(KM, 0) = 0x1; /* We start in kernel mode */ per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; } #ifdef CONFIG_DEBUG_FS struct dentry *of_debugfs_root; static int microblaze_debugfs_init(void) { of_debugfs_root = debugfs_create_dir("microblaze", NULL); return of_debugfs_root == NULL; } arch_initcall(microblaze_debugfs_init); # ifdef CONFIG_MMU static int __init debugfs_tlb(void) { struct dentry *d; if (!of_debugfs_root) return -ENODEV; d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip); if (!d) return -ENOMEM; } device_initcall(debugfs_tlb); # endif #endif static int dflt_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; /* We are only intereted in device addition */ if (action != BUS_NOTIFY_ADD_DEVICE) return 0; set_dma_ops(dev, &dma_direct_ops); return NOTIFY_DONE; } static struct notifier_block dflt_plat_bus_notifier = { .notifier_call = dflt_bus_notify, .priority = INT_MAX, }; static int __init setup_bus_notifier(void) { bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier); return 0; } arch_initcall(setup_bus_notifier);
gpl-2.0
thicklizard/GPEweepingangel
drivers/video/backlight/vgg2432a4.c
4983
6383
/* drivers/video/backlight/vgg2432a4.c * * VGG2432A4 (ILI9320) LCD controller driver. * * Copyright 2007 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/lcd.h> #include <linux/module.h> #include <linux/spi/spi.h> #include <video/ili9320.h> #include "ili9320.h" /* Device initialisation sequences */ static struct ili9320_reg vgg_init1[] = { { .address = ILI9320_POWER1, .value = ILI9320_POWER1_AP(0) | ILI9320_POWER1_BT(0), }, { .address = ILI9320_POWER2, .value = (ILI9320_POWER2_VC(7) | ILI9320_POWER2_DC0(0) | ILI9320_POWER2_DC1(0)), }, { .address = ILI9320_POWER3, .value = ILI9320_POWER3_VRH(0), }, { .address = ILI9320_POWER4, .value = ILI9320_POWER4_VREOUT(0), }, }; static struct ili9320_reg vgg_init2[] = { { .address = ILI9320_POWER1, .value = (ILI9320_POWER1_AP(3) | ILI9320_POWER1_APE | ILI9320_POWER1_BT(7) | ILI9320_POWER1_SAP), }, { .address = ILI9320_POWER2, .value = ILI9320_POWER2_VC(7) | ILI9320_POWER2_DC0(3), } }; static struct ili9320_reg vgg_gamma[] = { { .address = ILI9320_GAMMA1, .value = 0x0000, }, { .address = ILI9320_GAMMA2, .value = 0x0505, }, { .address = ILI9320_GAMMA3, .value = 0x0004, }, { .address = ILI9320_GAMMA4, .value = 0x0006, }, { .address = ILI9320_GAMMA5, .value = 0x0707, }, { .address = ILI9320_GAMMA6, .value = 0x0105, }, { .address = ILI9320_GAMMA7, .value = 0x0002, }, { .address = ILI9320_GAMMA8, .value = 0x0707, }, { .address = ILI9320_GAMMA9, .value = 0x0704, }, { .address = ILI9320_GAMMA10, .value = 0x807, } }; static struct ili9320_reg vgg_init0[] = { [0] = { /* set direction and scan mode gate */ .address = ILI9320_DRIVER, .value = ILI9320_DRIVER_SS, }, { .address = ILI9320_DRIVEWAVE, .value = (ILI9320_DRIVEWAVE_MUSTSET | ILI9320_DRIVEWAVE_EOR | ILI9320_DRIVEWAVE_BC), }, { .address = ILI9320_ENTRYMODE, .value = ILI9320_ENTRYMODE_ID(3) | ILI9320_ENTRYMODE_BGR, }, { .address = ILI9320_RESIZING, .value = 0x0, }, }; static int vgg2432a4_lcd_init(struct ili9320 *lcd, struct ili9320_platdata *cfg) { unsigned int addr; int ret; /* Set VCore before anything else (VGG243237-6UFLWA) */ ret = ili9320_write(lcd, 0x00e5, 0x8000); if (ret) goto err_initial; /* Start the oscillator up before we can do anything else. */ ret = ili9320_write(lcd, ILI9320_OSCILATION, ILI9320_OSCILATION_OSC); if (ret) goto err_initial; /* must wait at-lesat 10ms after starting */ mdelay(15); ret = ili9320_write_regs(lcd, vgg_init0, ARRAY_SIZE(vgg_init0)); if (ret != 0) goto err_initial; ili9320_write(lcd, ILI9320_DISPLAY2, cfg->display2); ili9320_write(lcd, ILI9320_DISPLAY3, cfg->display3); ili9320_write(lcd, ILI9320_DISPLAY4, cfg->display4); ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1); ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0); ili9320_write(lcd, ILI9320_RGB_IF2, cfg->rgb_if2); ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1)); if (ret != 0) goto err_vgg; mdelay(300); ret = ili9320_write_regs(lcd, vgg_init2, ARRAY_SIZE(vgg_init2)); if (ret != 0) goto err_vgg2; mdelay(100); ili9320_write(lcd, ILI9320_POWER3, 0x13c); mdelay(100); ili9320_write(lcd, ILI9320_POWER4, 0x1c00); ili9320_write(lcd, ILI9320_POWER7, 0x000e); mdelay(100); ili9320_write(lcd, ILI9320_GRAM_HORIZ_ADDR, 0x00); ili9320_write(lcd, ILI9320_GRAM_VERT_ADD, 0x00); ret = ili9320_write_regs(lcd, vgg_gamma, ARRAY_SIZE(vgg_gamma)); if (ret != 0) goto err_vgg3; ili9320_write(lcd, ILI9320_HORIZ_START, 0x0); ili9320_write(lcd, ILI9320_HORIZ_END, cfg->hsize - 1); ili9320_write(lcd, ILI9320_VERT_START, 0x0); ili9320_write(lcd, ILI9320_VERT_END, cfg->vsize - 1); ili9320_write(lcd, ILI9320_DRIVER2, ILI9320_DRIVER2_NL(((cfg->vsize - 240) / 8) + 0x1D)); ili9320_write(lcd, ILI9320_BASE_IMAGE, 0x1); ili9320_write(lcd, ILI9320_VERT_SCROLL, 0x00); for (addr = ILI9320_PARTIAL1_POSITION; addr <= ILI9320_PARTIAL2_END; addr++) { ili9320_write(lcd, addr, 0x0); } ili9320_write(lcd, ILI9320_INTERFACE1, 0x10); ili9320_write(lcd, ILI9320_INTERFACE2, cfg->interface2); ili9320_write(lcd, ILI9320_INTERFACE3, cfg->interface3); ili9320_write(lcd, ILI9320_INTERFACE4, cfg->interface4); ili9320_write(lcd, ILI9320_INTERFACE5, cfg->interface5); ili9320_write(lcd, ILI9320_INTERFACE6, cfg->interface6); lcd->display1 = (ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_DTE | ILI9320_DISPLAY1_GON | ILI9320_DISPLAY1_BASEE | 0x40); ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1); return 0; err_vgg3: err_vgg2: err_vgg: err_initial: return ret; } #ifdef CONFIG_PM static int vgg2432a4_suspend(struct spi_device *spi, pm_message_t state) { return ili9320_suspend(dev_get_drvdata(&spi->dev), state); } static int vgg2432a4_resume(struct spi_device *spi) { return ili9320_resume(dev_get_drvdata(&spi->dev)); } #else #define vgg2432a4_suspend NULL #define vgg2432a4_resume NULL #endif static struct ili9320_client vgg2432a4_client = { .name = "VGG2432A4", .init = vgg2432a4_lcd_init, }; /* Device probe */ static int __devinit vgg2432a4_probe(struct spi_device *spi) { int ret; ret = ili9320_probe_spi(spi, &vgg2432a4_client); if (ret != 0) { dev_err(&spi->dev, "failed to initialise ili9320\n"); return ret; } return 0; } static int __devexit vgg2432a4_remove(struct spi_device *spi) { return ili9320_remove(dev_get_drvdata(&spi->dev)); } static void vgg2432a4_shutdown(struct spi_device *spi) { ili9320_shutdown(dev_get_drvdata(&spi->dev)); } static struct spi_driver vgg2432a4_driver = { .driver = { .name = "VGG2432A4", .owner = THIS_MODULE, }, .probe = vgg2432a4_probe, .remove = __devexit_p(vgg2432a4_remove), .shutdown = vgg2432a4_shutdown, .suspend = vgg2432a4_suspend, .resume = vgg2432a4_resume, }; module_spi_driver(vgg2432a4_driver); MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>"); MODULE_DESCRIPTION("VGG2432A4 LCD Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:VGG2432A4");
gpl-2.0
GuneetAtwal/kernel_klte
drivers/net/ethernet/mellanox/mlx4/qp.c
4983
13812
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/gfp.h> #include <linux/export.h> #include <linux/init.h> #include <linux/mlx4/cmd.h> #include <linux/mlx4/qp.h> #include "mlx4.h" #include "icm.h" void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; struct mlx4_qp *qp; spin_lock(&qp_table->lock); qp = __mlx4_qp_lookup(dev, qpn); if (qp) atomic_inc(&qp->refcount); spin_unlock(&qp_table->lock); if (!qp) { mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); return; } qp->event(qp, event_type); if (atomic_dec_and_test(&qp->refcount)) complete(&qp->free); } static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp) { return qp->qpn >= dev->caps.sqp_start && qp->qpn <= dev->caps.sqp_start + 1; } static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp, int native) { static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { [MLX4_QP_STATE_RST] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, }, [MLX4_QP_STATE_INIT] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, }, [MLX4_QP_STATE_RTR] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, }, [MLX4_QP_STATE_RTS] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, }, [MLX4_QP_STATE_SQD] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, }, [MLX4_QP_STATE_SQER] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, }, [MLX4_QP_STATE_ERR] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, } }; struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; int ret = 0; u8 port; if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || !op[cur_state][new_state]) return -EINVAL; if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) { ret = mlx4_cmd(dev, 0, qp->qpn, 2, MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native); if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR && cur_state != MLX4_QP_STATE_RST && is_qp0(dev, qp)) { port = (qp->qpn & 1) + 1; priv->mfunc.master.qp0_state[port].qp0_active = 0; } return ret; } mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { u64 mtt_addr = mlx4_mtt_addr(dev, mtt); context->mtt_base_addr_h = mtt_addr >> 32; context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; } *(__be32 *) mailbox->buf = cpu_to_be32(optpar); memcpy(mailbox->buf + 8, context, sizeof *context); ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = cpu_to_be32(qp->qpn); ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), new_state == MLX4_QP_STATE_RST ? 2 : 0, op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); mlx4_free_cmd_mailbox(dev, mailbox); return ret; } int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp) { return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, optpar, sqd_event, qp, 0); } EXPORT_SYMBOL_GPL(mlx4_qp_modify); int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); if (*base == -1) return -ENOMEM; return 0; } int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) { u64 in_param; u64 out_param; int err; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, cnt); set_param_h(&in_param, align); err = mlx4_cmd_imm(dev, in_param, &out_param, RES_QP, RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) return err; *base = get_param_l(&out_param); return 0; } return __mlx4_qp_reserve_range(dev, cnt, align, base); } EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) return; mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); } void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) { u64 in_param; int err; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, base_qpn); set_param_h(&in_param, cnt); err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) { mlx4_warn(dev, "Failed to release qp range" " base:%d cnt:%d\n", base_qpn, cnt); } } else __mlx4_qp_release_range(dev, base_qpn, cnt); } EXPORT_SYMBOL_GPL(mlx4_qp_release_range); int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; int err; err = mlx4_table_get(dev, &qp_table->qp_table, qpn); if (err) goto err_out; err = mlx4_table_get(dev, &qp_table->auxc_table, qpn); if (err) goto err_put_qp; err = mlx4_table_get(dev, &qp_table->altc_table, qpn); if (err) goto err_put_auxc; err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn); if (err) goto err_put_altc; err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn); if (err) goto err_put_rdmarc; return 0; err_put_rdmarc: mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); err_put_altc: mlx4_table_put(dev, &qp_table->altc_table, qpn); err_put_auxc: mlx4_table_put(dev, &qp_table->auxc_table, qpn); err_put_qp: mlx4_table_put(dev, &qp_table->qp_table, qpn); err_out: return err; } static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) { u64 param; if (mlx4_is_mfunc(dev)) { set_param_l(&param, qpn); return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } return __mlx4_qp_alloc_icm(dev, qpn); } void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; mlx4_table_put(dev, &qp_table->cmpt_table, qpn); mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); mlx4_table_put(dev, &qp_table->altc_table, qpn); mlx4_table_put(dev, &qp_table->auxc_table, qpn); mlx4_table_put(dev, &qp_table->qp_table, qpn); } static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) { u64 in_param; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, qpn); if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); } else __mlx4_qp_free_icm(dev, qpn); } int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; int err; if (!qpn) return -EINVAL; qp->qpn = qpn; err = mlx4_qp_alloc_icm(dev, qpn); if (err) return err; spin_lock_irq(&qp_table->lock); err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp); spin_unlock_irq(&qp_table->lock); if (err) goto err_icm; atomic_set(&qp->refcount, 1); init_completion(&qp->free); return 0; err_icm: mlx4_qp_free_icm(dev, qpn); return err; } EXPORT_SYMBOL_GPL(mlx4_qp_alloc); void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; unsigned long flags; spin_lock_irqsave(&qp_table->lock, flags); radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); spin_unlock_irqrestore(&qp_table->lock, flags); } EXPORT_SYMBOL_GPL(mlx4_qp_remove); void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) { if (atomic_dec_and_test(&qp->refcount)) complete(&qp->free); wait_for_completion(&qp->free); mlx4_qp_free_icm(dev, qp->qpn); } EXPORT_SYMBOL_GPL(mlx4_qp_free); static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) { return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } int mlx4_init_qp_table(struct mlx4_dev *dev) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; int err; int reserved_from_top = 0; spin_lock_init(&qp_table->lock); INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); if (mlx4_is_slave(dev)) return 0; /* * We reserve 2 extra QPs per port for the special QPs. The * block of special QPs must be aligned to a multiple of 8, so * round up. * * We also reserve the MSB of the 24-bit QP number to indicate * that a QP is an XRC QP. */ dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); { int sort[MLX4_NUM_QP_REGION]; int i, j, tmp; int last_base = dev->caps.num_qps; for (i = 1; i < MLX4_NUM_QP_REGION; ++i) sort[i] = i; for (i = MLX4_NUM_QP_REGION; i > 0; --i) { for (j = 2; j < i; ++j) { if (dev->caps.reserved_qps_cnt[sort[j]] > dev->caps.reserved_qps_cnt[sort[j - 1]]) { tmp = sort[j]; sort[j] = sort[j - 1]; sort[j - 1] = tmp; } } } for (i = 1; i < MLX4_NUM_QP_REGION; ++i) { last_base -= dev->caps.reserved_qps_cnt[sort[i]]; dev->caps.reserved_qps_base[sort[i]] = last_base; reserved_from_top += dev->caps.reserved_qps_cnt[sort[i]]; } } err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, (1 << 23) - 1, dev->caps.sqp_start + 8, reserved_from_top); if (err) return err; return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start); } void mlx4_cleanup_qp_table(struct mlx4_dev *dev) { if (mlx4_is_slave(dev)) return; mlx4_CONF_SPECIAL_QP(dev, 0); mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); } int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, struct mlx4_qp_context *context) { struct mlx4_cmd_mailbox *mailbox; int err; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) memcpy(context, mailbox->buf + 8, sizeof *context); mlx4_free_cmd_mailbox(dev, mailbox); return err; } EXPORT_SYMBOL_GPL(mlx4_qp_query); int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_qp_context *context, struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) { int err; int i; enum mlx4_qp_state states[] = { MLX4_QP_STATE_RST, MLX4_QP_STATE_INIT, MLX4_QP_STATE_RTR, MLX4_QP_STATE_RTS }; for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { context->flags &= cpu_to_be32(~(0xf << 28)); context->flags |= cpu_to_be32(states[i + 1] << 28); err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], context, 0, 0, qp); if (err) { mlx4_err(dev, "Failed to bring QP to state: " "%d with error: %d\n", states[i + 1], err); return err; } *qp_state = states[i + 1]; } return 0; } EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
gpl-2.0
jimsmith80/android_kernel_zte_warplte
drivers/net/ethernet/mellanox/mlx4/qp.c
4983
13812
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/gfp.h> #include <linux/export.h> #include <linux/init.h> #include <linux/mlx4/cmd.h> #include <linux/mlx4/qp.h> #include "mlx4.h" #include "icm.h" void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; struct mlx4_qp *qp; spin_lock(&qp_table->lock); qp = __mlx4_qp_lookup(dev, qpn); if (qp) atomic_inc(&qp->refcount); spin_unlock(&qp_table->lock); if (!qp) { mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); return; } qp->event(qp, event_type); if (atomic_dec_and_test(&qp->refcount)) complete(&qp->free); } static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp) { return qp->qpn >= dev->caps.sqp_start && qp->qpn <= dev->caps.sqp_start + 1; } static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp, int native) { static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { [MLX4_QP_STATE_RST] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, }, [MLX4_QP_STATE_INIT] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, }, [MLX4_QP_STATE_RTR] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, }, [MLX4_QP_STATE_RTS] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, }, [MLX4_QP_STATE_SQD] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, }, [MLX4_QP_STATE_SQER] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, }, [MLX4_QP_STATE_ERR] = { [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, } }; struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; int ret = 0; u8 port; if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || !op[cur_state][new_state]) return -EINVAL; if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) { ret = mlx4_cmd(dev, 0, qp->qpn, 2, MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native); if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR && cur_state != MLX4_QP_STATE_RST && is_qp0(dev, qp)) { port = (qp->qpn & 1) + 1; priv->mfunc.master.qp0_state[port].qp0_active = 0; } return ret; } mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { u64 mtt_addr = mlx4_mtt_addr(dev, mtt); context->mtt_base_addr_h = mtt_addr >> 32; context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; } *(__be32 *) mailbox->buf = cpu_to_be32(optpar); memcpy(mailbox->buf + 8, context, sizeof *context); ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = cpu_to_be32(qp->qpn); ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), new_state == MLX4_QP_STATE_RST ? 2 : 0, op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); mlx4_free_cmd_mailbox(dev, mailbox); return ret; } int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp) { return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, optpar, sqd_event, qp, 0); } EXPORT_SYMBOL_GPL(mlx4_qp_modify); int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); if (*base == -1) return -ENOMEM; return 0; } int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) { u64 in_param; u64 out_param; int err; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, cnt); set_param_h(&in_param, align); err = mlx4_cmd_imm(dev, in_param, &out_param, RES_QP, RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) return err; *base = get_param_l(&out_param); return 0; } return __mlx4_qp_reserve_range(dev, cnt, align, base); } EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) return; mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); } void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) { u64 in_param; int err; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, base_qpn); set_param_h(&in_param, cnt); err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) { mlx4_warn(dev, "Failed to release qp range" " base:%d cnt:%d\n", base_qpn, cnt); } } else __mlx4_qp_release_range(dev, base_qpn, cnt); } EXPORT_SYMBOL_GPL(mlx4_qp_release_range); int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; int err; err = mlx4_table_get(dev, &qp_table->qp_table, qpn); if (err) goto err_out; err = mlx4_table_get(dev, &qp_table->auxc_table, qpn); if (err) goto err_put_qp; err = mlx4_table_get(dev, &qp_table->altc_table, qpn); if (err) goto err_put_auxc; err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn); if (err) goto err_put_altc; err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn); if (err) goto err_put_rdmarc; return 0; err_put_rdmarc: mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); err_put_altc: mlx4_table_put(dev, &qp_table->altc_table, qpn); err_put_auxc: mlx4_table_put(dev, &qp_table->auxc_table, qpn); err_put_qp: mlx4_table_put(dev, &qp_table->qp_table, qpn); err_out: return err; } static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) { u64 param; if (mlx4_is_mfunc(dev)) { set_param_l(&param, qpn); return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } return __mlx4_qp_alloc_icm(dev, qpn); } void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; mlx4_table_put(dev, &qp_table->cmpt_table, qpn); mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); mlx4_table_put(dev, &qp_table->altc_table, qpn); mlx4_table_put(dev, &qp_table->auxc_table, qpn); mlx4_table_put(dev, &qp_table->qp_table, qpn); } static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) { u64 in_param; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, qpn); if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); } else __mlx4_qp_free_icm(dev, qpn); } int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; int err; if (!qpn) return -EINVAL; qp->qpn = qpn; err = mlx4_qp_alloc_icm(dev, qpn); if (err) return err; spin_lock_irq(&qp_table->lock); err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp); spin_unlock_irq(&qp_table->lock); if (err) goto err_icm; atomic_set(&qp->refcount, 1); init_completion(&qp->free); return 0; err_icm: mlx4_qp_free_icm(dev, qpn); return err; } EXPORT_SYMBOL_GPL(mlx4_qp_alloc); void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; unsigned long flags; spin_lock_irqsave(&qp_table->lock, flags); radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); spin_unlock_irqrestore(&qp_table->lock, flags); } EXPORT_SYMBOL_GPL(mlx4_qp_remove); void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) { if (atomic_dec_and_test(&qp->refcount)) complete(&qp->free); wait_for_completion(&qp->free); mlx4_qp_free_icm(dev, qp->qpn); } EXPORT_SYMBOL_GPL(mlx4_qp_free); static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) { return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } int mlx4_init_qp_table(struct mlx4_dev *dev) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; int err; int reserved_from_top = 0; spin_lock_init(&qp_table->lock); INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); if (mlx4_is_slave(dev)) return 0; /* * We reserve 2 extra QPs per port for the special QPs. The * block of special QPs must be aligned to a multiple of 8, so * round up. * * We also reserve the MSB of the 24-bit QP number to indicate * that a QP is an XRC QP. */ dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); { int sort[MLX4_NUM_QP_REGION]; int i, j, tmp; int last_base = dev->caps.num_qps; for (i = 1; i < MLX4_NUM_QP_REGION; ++i) sort[i] = i; for (i = MLX4_NUM_QP_REGION; i > 0; --i) { for (j = 2; j < i; ++j) { if (dev->caps.reserved_qps_cnt[sort[j]] > dev->caps.reserved_qps_cnt[sort[j - 1]]) { tmp = sort[j]; sort[j] = sort[j - 1]; sort[j - 1] = tmp; } } } for (i = 1; i < MLX4_NUM_QP_REGION; ++i) { last_base -= dev->caps.reserved_qps_cnt[sort[i]]; dev->caps.reserved_qps_base[sort[i]] = last_base; reserved_from_top += dev->caps.reserved_qps_cnt[sort[i]]; } } err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, (1 << 23) - 1, dev->caps.sqp_start + 8, reserved_from_top); if (err) return err; return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start); } void mlx4_cleanup_qp_table(struct mlx4_dev *dev) { if (mlx4_is_slave(dev)) return; mlx4_CONF_SPECIAL_QP(dev, 0); mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); } int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, struct mlx4_qp_context *context) { struct mlx4_cmd_mailbox *mailbox; int err; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) memcpy(context, mailbox->buf + 8, sizeof *context); mlx4_free_cmd_mailbox(dev, mailbox); return err; } EXPORT_SYMBOL_GPL(mlx4_qp_query); int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_qp_context *context, struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) { int err; int i; enum mlx4_qp_state states[] = { MLX4_QP_STATE_RST, MLX4_QP_STATE_INIT, MLX4_QP_STATE_RTR, MLX4_QP_STATE_RTS }; for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { context->flags &= cpu_to_be32(~(0xf << 28)); context->flags |= cpu_to_be32(states[i + 1] << 28); err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], context, 0, 0, qp); if (err) { mlx4_err(dev, "Failed to bring QP to state: " "%d with error: %d\n", states[i + 1], err); return err; } *qp_state = states[i + 1]; } return 0; } EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
gpl-2.0
AOSPXS/kernel_sony_msm8x60
drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
5239
20177
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/i2c.h> #include <linux/module.h> #include <media/ir-kbd-i2c.h> #include "pvrusb2-i2c-core.h" #include "pvrusb2-hdw-internal.h" #include "pvrusb2-debug.h" #include "pvrusb2-fx2-cmd.h" #include "pvrusb2.h" #define trace_i2c(...) pvr2_trace(PVR2_TRACE_I2C,__VA_ARGS__) /* This module attempts to implement a compliant I2C adapter for the pvrusb2 device. */ static unsigned int i2c_scan; module_param(i2c_scan, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time"); static int ir_mode[PVR_NUM] = { [0 ... PVR_NUM-1] = 1 }; module_param_array(ir_mode, int, NULL, 0444); MODULE_PARM_DESC(ir_mode,"specify: 0=disable IR reception, 1=normal IR"); static int pvr2_disable_ir_video; module_param_named(disable_autoload_ir_video, pvr2_disable_ir_video, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(disable_autoload_ir_video, "1=do not try to autoload ir_video IR receiver"); static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */ u8 i2c_addr, /* I2C address we're talking to */ u8 *data, /* Data to write */ u16 length) /* Size of data to write */ { /* Return value - default 0 means success */ int ret; if (!data) length = 0; if (length > (sizeof(hdw->cmd_buffer) - 3)) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Killing an I2C write to %u that is too large" " (desired=%u limit=%u)", i2c_addr, length,(unsigned int)(sizeof(hdw->cmd_buffer) - 3)); return -ENOTSUPP; } LOCK_TAKE(hdw->ctl_lock); /* Clear the command buffer (likely to be paranoia) */ memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer)); /* Set up command buffer for an I2C write */ hdw->cmd_buffer[0] = FX2CMD_I2C_WRITE; /* write prefix */ hdw->cmd_buffer[1] = i2c_addr; /* i2c addr of chip */ hdw->cmd_buffer[2] = length; /* length of what follows */ if (length) memcpy(hdw->cmd_buffer + 3, data, length); /* Do the operation */ ret = pvr2_send_request(hdw, hdw->cmd_buffer, length + 3, hdw->cmd_buffer, 1); if (!ret) { if (hdw->cmd_buffer[0] != 8) { ret = -EIO; if (hdw->cmd_buffer[0] != 7) { trace_i2c("unexpected status" " from i2_write[%d]: %d", i2c_addr,hdw->cmd_buffer[0]); } } } LOCK_GIVE(hdw->ctl_lock); return ret; } static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */ u8 i2c_addr, /* I2C address we're talking to */ u8 *data, /* Data to write */ u16 dlen, /* Size of data to write */ u8 *res, /* Where to put data we read */ u16 rlen) /* Amount of data to read */ { /* Return value - default 0 means success */ int ret; if (!data) dlen = 0; if (dlen > (sizeof(hdw->cmd_buffer) - 4)) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Killing an I2C read to %u that has wlen too large" " (desired=%u limit=%u)", i2c_addr, dlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 4)); return -ENOTSUPP; } if (res && (rlen > (sizeof(hdw->cmd_buffer) - 1))) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Killing an I2C read to %u that has rlen too large" " (desired=%u limit=%u)", i2c_addr, rlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 1)); return -ENOTSUPP; } LOCK_TAKE(hdw->ctl_lock); /* Clear the command buffer (likely to be paranoia) */ memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer)); /* Set up command buffer for an I2C write followed by a read */ hdw->cmd_buffer[0] = FX2CMD_I2C_READ; /* read prefix */ hdw->cmd_buffer[1] = dlen; /* arg length */ hdw->cmd_buffer[2] = rlen; /* answer length. Device will send one more byte (status). */ hdw->cmd_buffer[3] = i2c_addr; /* i2c addr of chip */ if (dlen) memcpy(hdw->cmd_buffer + 4, data, dlen); /* Do the operation */ ret = pvr2_send_request(hdw, hdw->cmd_buffer, 4 + dlen, hdw->cmd_buffer, rlen + 1); if (!ret) { if (hdw->cmd_buffer[0] != 8) { ret = -EIO; if (hdw->cmd_buffer[0] != 7) { trace_i2c("unexpected status" " from i2_read[%d]: %d", i2c_addr,hdw->cmd_buffer[0]); } } } /* Copy back the result */ if (res && rlen) { if (ret) { /* Error, just blank out the return buffer */ memset(res, 0, rlen); } else { memcpy(res, hdw->cmd_buffer + 1, rlen); } } LOCK_GIVE(hdw->ctl_lock); return ret; } /* This is the common low level entry point for doing I2C operations to the hardware. */ static int pvr2_i2c_basic_op(struct pvr2_hdw *hdw, u8 i2c_addr, u8 *wdata, u16 wlen, u8 *rdata, u16 rlen) { if (!rdata) rlen = 0; if (!wdata) wlen = 0; if (rlen || !wlen) { return pvr2_i2c_read(hdw,i2c_addr,wdata,wlen,rdata,rlen); } else { return pvr2_i2c_write(hdw,i2c_addr,wdata,wlen); } } /* This is a special entry point for cases of I2C transaction attempts to the IR receiver. The implementation here simulates the IR receiver by issuing a command to the FX2 firmware and using that response to return what the real I2C receiver would have returned. We use this for 24xxx devices, where the IR receiver chip has been removed and replaced with FX2 related logic. */ static int i2c_24xxx_ir(struct pvr2_hdw *hdw, u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen) { u8 dat[4]; unsigned int stat; if (!(rlen || wlen)) { /* This is a probe attempt. Just let it succeed. */ return 0; } /* We don't understand this kind of transaction */ if ((wlen != 0) || (rlen == 0)) return -EIO; if (rlen < 3) { /* Mike Isely <isely@pobox.com> Appears to be a probe attempt from lirc. Just fill in zeroes and return. If we try instead to do the full transaction here, then bad things seem to happen within the lirc driver module (version 0.8.0-7 sources from Debian, when run under vanilla 2.6.17.6 kernel) - and I don't have the patience to chase it down. */ if (rlen > 0) rdata[0] = 0; if (rlen > 1) rdata[1] = 0; return 0; } /* Issue a command to the FX2 to read the IR receiver. */ LOCK_TAKE(hdw->ctl_lock); do { hdw->cmd_buffer[0] = FX2CMD_GET_IR_CODE; stat = pvr2_send_request(hdw, hdw->cmd_buffer,1, hdw->cmd_buffer,4); dat[0] = hdw->cmd_buffer[0]; dat[1] = hdw->cmd_buffer[1]; dat[2] = hdw->cmd_buffer[2]; dat[3] = hdw->cmd_buffer[3]; } while (0); LOCK_GIVE(hdw->ctl_lock); /* Give up if that operation failed. */ if (stat != 0) return stat; /* Mangle the results into something that looks like the real IR receiver. */ rdata[2] = 0xc1; if (dat[0] != 1) { /* No code received. */ rdata[0] = 0; rdata[1] = 0; } else { u16 val; /* Mash the FX2 firmware-provided IR code into something that the normal i2c chip-level driver expects. */ val = dat[1]; val <<= 8; val |= dat[2]; val >>= 1; val &= ~0x0003; val |= 0x8000; rdata[0] = (val >> 8) & 0xffu; rdata[1] = val & 0xffu; } return 0; } /* This is a special entry point that is entered if an I2C operation is attempted to a wm8775 chip on model 24xxx hardware. Autodetect of this part doesn't work, but we know it is really there. So let's look for the autodetect attempt and just return success if we see that. */ static int i2c_hack_wm8775(struct pvr2_hdw *hdw, u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen) { if (!(rlen || wlen)) { // This is a probe attempt. Just let it succeed. return 0; } return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen); } /* This is an entry point designed to always fail any attempt to perform a transfer. We use this to cause certain I2C addresses to not be probed. */ static int i2c_black_hole(struct pvr2_hdw *hdw, u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen) { return -EIO; } /* This is a special entry point that is entered if an I2C operation is attempted to a cx25840 chip on model 24xxx hardware. This chip can sometimes wedge itself. Worse still, when this happens msp3400 can falsely detect this part and then the system gets hosed up after msp3400 gets confused and dies. What we want to do here is try to keep msp3400 away and also try to notice if the chip is wedged and send a warning to the system log. */ static int i2c_hack_cx25840(struct pvr2_hdw *hdw, u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen) { int ret; unsigned int subaddr; u8 wbuf[2]; int state = hdw->i2c_cx25840_hack_state; if (!(rlen || wlen)) { // Probe attempt - always just succeed and don't bother the // hardware (this helps to make the state machine further // down somewhat easier). return 0; } if (state == 3) { return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen); } /* We're looking for the exact pattern where the revision register is being read. The cx25840 module will always look at the revision register first. Any other pattern of access therefore has to be a probe attempt from somebody else so we'll reject it. Normally we could just let each client just probe the part anyway, but when the cx25840 is wedged, msp3400 will get a false positive and that just screws things up... */ if (wlen == 0) { switch (state) { case 1: subaddr = 0x0100; break; case 2: subaddr = 0x0101; break; default: goto fail; } } else if (wlen == 2) { subaddr = (wdata[0] << 8) | wdata[1]; switch (subaddr) { case 0x0100: state = 1; break; case 0x0101: state = 2; break; default: goto fail; } } else { goto fail; } if (!rlen) goto success; state = 0; if (rlen != 1) goto fail; /* If we get to here then we have a legitimate read for one of the two revision bytes, so pass it through. */ wbuf[0] = subaddr >> 8; wbuf[1] = subaddr; ret = pvr2_i2c_basic_op(hdw,i2c_addr,wbuf,2,rdata,rlen); if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "WARNING: Detected a wedged cx25840 chip;" " the device will not work."); pvr2_trace(PVR2_TRACE_ERROR_LEGS, "WARNING: Try power cycling the pvrusb2 device."); pvr2_trace(PVR2_TRACE_ERROR_LEGS, "WARNING: Disabling further access to the device" " to prevent other foul-ups."); // This blocks all further communication with the part. hdw->i2c_func[0x44] = NULL; pvr2_hdw_render_useless(hdw); goto fail; } /* Success! */ pvr2_trace(PVR2_TRACE_CHIPS,"cx25840 appears to be OK."); state = 3; success: hdw->i2c_cx25840_hack_state = state; return 0; fail: hdw->i2c_cx25840_hack_state = state; return -EIO; } /* This is a very, very limited I2C adapter implementation. We can only support what we actually know will work on the device... */ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { int ret = -ENOTSUPP; pvr2_i2c_func funcp = NULL; struct pvr2_hdw *hdw = (struct pvr2_hdw *)(i2c_adap->algo_data); if (!num) { ret = -EINVAL; goto done; } if (msgs[0].addr < PVR2_I2C_FUNC_CNT) { funcp = hdw->i2c_func[msgs[0].addr]; } if (!funcp) { ret = -EIO; goto done; } if (num == 1) { if (msgs[0].flags & I2C_M_RD) { /* Simple read */ u16 tcnt,bcnt,offs; if (!msgs[0].len) { /* Length == 0 read. This is a probe. */ if (funcp(hdw,msgs[0].addr,NULL,0,NULL,0)) { ret = -EIO; goto done; } ret = 1; goto done; } /* If the read is short enough we'll do the whole thing atomically. Otherwise we have no choice but to break apart the reads. */ tcnt = msgs[0].len; offs = 0; while (tcnt) { bcnt = tcnt; if (bcnt > sizeof(hdw->cmd_buffer)-1) { bcnt = sizeof(hdw->cmd_buffer)-1; } if (funcp(hdw,msgs[0].addr,NULL,0, msgs[0].buf+offs,bcnt)) { ret = -EIO; goto done; } offs += bcnt; tcnt -= bcnt; } ret = 1; goto done; } else { /* Simple write */ ret = 1; if (funcp(hdw,msgs[0].addr, msgs[0].buf,msgs[0].len,NULL,0)) { ret = -EIO; } goto done; } } else if (num == 2) { if (msgs[0].addr != msgs[1].addr) { trace_i2c("i2c refusing 2 phase transfer with" " conflicting target addresses"); ret = -ENOTSUPP; goto done; } if ((!((msgs[0].flags & I2C_M_RD))) && (msgs[1].flags & I2C_M_RD)) { u16 tcnt,bcnt,wcnt,offs; /* Write followed by atomic read. If the read portion is short enough we'll do the whole thing atomically. Otherwise we have no choice but to break apart the reads. */ tcnt = msgs[1].len; wcnt = msgs[0].len; offs = 0; while (tcnt || wcnt) { bcnt = tcnt; if (bcnt > sizeof(hdw->cmd_buffer)-1) { bcnt = sizeof(hdw->cmd_buffer)-1; } if (funcp(hdw,msgs[0].addr, msgs[0].buf,wcnt, msgs[1].buf+offs,bcnt)) { ret = -EIO; goto done; } offs += bcnt; tcnt -= bcnt; wcnt = 0; } ret = 2; goto done; } else { trace_i2c("i2c refusing complex transfer" " read0=%d read1=%d", (msgs[0].flags & I2C_M_RD), (msgs[1].flags & I2C_M_RD)); } } else { trace_i2c("i2c refusing %d phase transfer",num); } done: if (pvrusb2_debug & PVR2_TRACE_I2C_TRAF) { unsigned int idx,offs,cnt; for (idx = 0; idx < num; idx++) { cnt = msgs[idx].len; printk(KERN_INFO "pvrusb2 i2c xfer %u/%u:" " addr=0x%x len=%d %s", idx+1,num, msgs[idx].addr, cnt, (msgs[idx].flags & I2C_M_RD ? "read" : "write")); if ((ret > 0) || !(msgs[idx].flags & I2C_M_RD)) { if (cnt > 8) cnt = 8; printk(" ["); for (offs = 0; offs < (cnt>8?8:cnt); offs++) { if (offs) printk(" "); printk("%02x",msgs[idx].buf[offs]); } if (offs < cnt) printk(" ..."); printk("]"); } if (idx+1 == num) { printk(" result=%d",ret); } printk("\n"); } if (!num) { printk(KERN_INFO "pvrusb2 i2c xfer null transfer result=%d\n", ret); } } return ret; } static u32 pvr2_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; } static struct i2c_algorithm pvr2_i2c_algo_template = { .master_xfer = pvr2_i2c_xfer, .functionality = pvr2_i2c_functionality, }; static struct i2c_adapter pvr2_i2c_adap_template = { .owner = THIS_MODULE, .class = 0, }; /* Return true if device exists at given address */ static int do_i2c_probe(struct pvr2_hdw *hdw, int addr) { struct i2c_msg msg[1]; int rc; msg[0].addr = 0; msg[0].flags = I2C_M_RD; msg[0].len = 0; msg[0].buf = NULL; msg[0].addr = addr; rc = i2c_transfer(&hdw->i2c_adap, msg, ARRAY_SIZE(msg)); return rc == 1; } static void do_i2c_scan(struct pvr2_hdw *hdw) { int i; printk(KERN_INFO "%s: i2c scan beginning\n", hdw->name); for (i = 0; i < 128; i++) { if (do_i2c_probe(hdw, i)) { printk(KERN_INFO "%s: i2c scan: found device @ 0x%x\n", hdw->name, i); } } printk(KERN_INFO "%s: i2c scan done.\n", hdw->name); } static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw) { struct i2c_board_info info; struct IR_i2c_init_data *init_data = &hdw->ir_init_data; if (pvr2_disable_ir_video) { pvr2_trace(PVR2_TRACE_INFO, "Automatic binding of ir_video has been disabled."); return; } memset(&info, 0, sizeof(struct i2c_board_info)); switch (hdw->ir_scheme_active) { case PVR2_IR_SCHEME_24XXX: /* FX2-controlled IR */ case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */ init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP; init_data->type = RC_TYPE_RC5; init_data->name = hdw->hdw_desc->description; init_data->polling_interval = 100; /* ms From ir-kbd-i2c */ /* IR Receiver */ info.addr = 0x18; info.platform_data = init_data; strlcpy(info.type, "ir_video", I2C_NAME_SIZE); pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.", info.type, info.addr); i2c_new_device(&hdw->i2c_adap, &info); break; case PVR2_IR_SCHEME_ZILOG: /* HVR-1950 style */ case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */ init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; init_data->type = RC_TYPE_RC5; init_data->name = hdw->hdw_desc->description; /* IR Receiver */ info.addr = 0x71; info.platform_data = init_data; strlcpy(info.type, "ir_rx_z8f0811_haup", I2C_NAME_SIZE); pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.", info.type, info.addr); i2c_new_device(&hdw->i2c_adap, &info); /* IR Trasmitter */ info.addr = 0x70; info.platform_data = init_data; strlcpy(info.type, "ir_tx_z8f0811_haup", I2C_NAME_SIZE); pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.", info.type, info.addr); i2c_new_device(&hdw->i2c_adap, &info); break; default: /* The device either doesn't support I2C-based IR or we don't know (yet) how to operate IR on the device. */ break; } } void pvr2_i2c_core_init(struct pvr2_hdw *hdw) { unsigned int idx; /* The default action for all possible I2C addresses is just to do the transfer normally. */ for (idx = 0; idx < PVR2_I2C_FUNC_CNT; idx++) { hdw->i2c_func[idx] = pvr2_i2c_basic_op; } /* However, deal with various special cases for 24xxx hardware. */ if (ir_mode[hdw->unit_number] == 0) { printk(KERN_INFO "%s: IR disabled\n",hdw->name); hdw->i2c_func[0x18] = i2c_black_hole; } else if (ir_mode[hdw->unit_number] == 1) { if (hdw->ir_scheme_active == PVR2_IR_SCHEME_24XXX) { /* Set up translation so that our IR looks like a 29xxx device */ hdw->i2c_func[0x18] = i2c_24xxx_ir; } } if (hdw->hdw_desc->flag_has_cx25840) { hdw->i2c_func[0x44] = i2c_hack_cx25840; } if (hdw->hdw_desc->flag_has_wm8775) { hdw->i2c_func[0x1b] = i2c_hack_wm8775; } // Configure the adapter and set up everything else related to it. memcpy(&hdw->i2c_adap,&pvr2_i2c_adap_template,sizeof(hdw->i2c_adap)); memcpy(&hdw->i2c_algo,&pvr2_i2c_algo_template,sizeof(hdw->i2c_algo)); strlcpy(hdw->i2c_adap.name,hdw->name,sizeof(hdw->i2c_adap.name)); hdw->i2c_adap.dev.parent = &hdw->usb_dev->dev; hdw->i2c_adap.algo = &hdw->i2c_algo; hdw->i2c_adap.algo_data = hdw; hdw->i2c_linked = !0; i2c_set_adapdata(&hdw->i2c_adap, &hdw->v4l2_dev); i2c_add_adapter(&hdw->i2c_adap); if (hdw->i2c_func[0x18] == i2c_24xxx_ir) { /* Probe for a different type of IR receiver on this device. This is really the only way to differentiate older 24xxx devices from 24xxx variants that include an IR blaster. If the IR blaster is present, the IR receiver is part of that chip and thus we must disable the emulated IR receiver. */ if (do_i2c_probe(hdw, 0x71)) { pvr2_trace(PVR2_TRACE_INFO, "Device has newer IR hardware;" " disabling unneeded virtual IR device"); hdw->i2c_func[0x18] = NULL; /* Remember that this is a different device... */ hdw->ir_scheme_active = PVR2_IR_SCHEME_24XXX_MCE; } } if (i2c_scan) do_i2c_scan(hdw); pvr2_i2c_register_ir(hdw); } void pvr2_i2c_core_done(struct pvr2_hdw *hdw) { if (hdw->i2c_linked) { i2c_del_adapter(&hdw->i2c_adap); hdw->i2c_linked = 0; } } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 75 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
Colonel-Corn/kernel_htc_msm8974
arch/powerpc/sysdev/fsl_85xx_cache_sram.c
9079
4394
/* * Copyright 2009-2010 Freescale Semiconductor, Inc. * * Simple memory allocator abstraction for QorIQ (P1/P2) based Cache-SRAM * * Author: Vivek Mahajan <vivek.mahajan@freescale.com> * * This file is derived from the original work done * by Sylvain Munaut for the Bestcomm SRAM allocator. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/of_platform.h> #include <asm/pgtable.h> #include <asm/fsl_85xx_cache_sram.h> #include "fsl_85xx_cache_ctlr.h" struct mpc85xx_cache_sram *cache_sram; void *mpc85xx_cache_sram_alloc(unsigned int size, phys_addr_t *phys, unsigned int align) { unsigned long offset; unsigned long flags; if (unlikely(cache_sram == NULL)) return NULL; if (!size || (size > cache_sram->size) || (align > cache_sram->size)) { pr_err("%s(): size(=%x) or align(=%x) zero or too big\n", __func__, size, align); return NULL; } if ((align & (align - 1)) || align <= 1) { pr_err("%s(): align(=%x) must be power of two and >1\n", __func__, align); return NULL; } spin_lock_irqsave(&cache_sram->lock, flags); offset = rh_alloc_align(cache_sram->rh, size, align, NULL); spin_unlock_irqrestore(&cache_sram->lock, flags); if (IS_ERR_VALUE(offset)) return NULL; *phys = cache_sram->base_phys + offset; return (unsigned char *)cache_sram->base_virt + offset; } EXPORT_SYMBOL(mpc85xx_cache_sram_alloc); void mpc85xx_cache_sram_free(void *ptr) { unsigned long flags; BUG_ON(!ptr); spin_lock_irqsave(&cache_sram->lock, flags); rh_free(cache_sram->rh, ptr - cache_sram->base_virt); spin_unlock_irqrestore(&cache_sram->lock, flags); } EXPORT_SYMBOL(mpc85xx_cache_sram_free); int __init instantiate_cache_sram(struct platform_device *dev, struct sram_parameters sram_params) { int ret = 0; if (cache_sram) { dev_err(&dev->dev, "Already initialized cache-sram\n"); return -EBUSY; } cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL); if (!cache_sram) { dev_err(&dev->dev, "Out of memory for cache_sram structure\n"); return -ENOMEM; } cache_sram->base_phys = sram_params.sram_offset; cache_sram->size = sram_params.sram_size; if (!request_mem_region(cache_sram->base_phys, cache_sram->size, "fsl_85xx_cache_sram")) { dev_err(&dev->dev, "%s: request memory failed\n", dev->dev.of_node->full_name); ret = -ENXIO; goto out_free; } cache_sram->base_virt = ioremap_prot(cache_sram->base_phys, cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); if (!cache_sram->base_virt) { dev_err(&dev->dev, "%s: ioremap_prot failed\n", dev->dev.of_node->full_name); ret = -ENOMEM; goto out_release; } cache_sram->rh = rh_create(sizeof(unsigned int)); if (IS_ERR(cache_sram->rh)) { dev_err(&dev->dev, "%s: Unable to create remote heap\n", dev->dev.of_node->full_name); ret = PTR_ERR(cache_sram->rh); goto out_unmap; } rh_attach_region(cache_sram->rh, 0, cache_sram->size); spin_lock_init(&cache_sram->lock); dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n", (unsigned long long)cache_sram->base_phys, cache_sram->size); return 0; out_unmap: iounmap(cache_sram->base_virt); out_release: release_mem_region(cache_sram->base_phys, cache_sram->size); out_free: kfree(cache_sram); return ret; } void remove_cache_sram(struct platform_device *dev) { BUG_ON(!cache_sram); rh_detach_region(cache_sram->rh, 0, cache_sram->size); rh_destroy(cache_sram->rh); iounmap(cache_sram->base_virt); release_mem_region(cache_sram->base_phys, cache_sram->size); kfree(cache_sram); cache_sram = NULL; dev_info(&dev->dev, "MPC85xx Cache-SRAM driver unloaded\n"); }
gpl-2.0
HSAFoundation/HSA-Drivers-Linux-AMD
src/kernel/drivers/isdn/hysdn/boardergo.c
9847
16195
/* $Id: boardergo.c,v 1.5.6.7 2001/11/06 21:58:19 kai Exp $ * * Linux driver for HYSDN cards, specific routines for ergo type boards. * * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH * Copyright 1999 by Werner Cornelius (werner@titro.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * As all Linux supported cards Champ2, Ergo and Metro2/4 use the same * DPRAM interface and layout with only minor differences all related * stuff is done here, not in separate modules. * */ #include <linux/signal.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <asm/io.h> #include "hysdn_defs.h" #include "boardergo.h" #define byteout(addr, val) outb(val, addr) #define bytein(addr) inb(addr) /***************************************************/ /* The cards interrupt handler. Called from system */ /***************************************************/ static irqreturn_t ergo_interrupt(int intno, void *dev_id) { hysdn_card *card = dev_id; /* parameter from irq */ tErgDpram *dpr; unsigned long flags; unsigned char volatile b; if (!card) return IRQ_NONE; /* error -> spurious interrupt */ if (!card->irq_enabled) return IRQ_NONE; /* other device interrupting or irq switched off */ spin_lock_irqsave(&card->hysdn_lock, flags); /* no further irqs allowed */ if (!(bytein(card->iobase + PCI9050_INTR_REG) & PCI9050_INTR_REG_STAT1)) { spin_unlock_irqrestore(&card->hysdn_lock, flags); /* restore old state */ return IRQ_NONE; /* no interrupt requested by E1 */ } /* clear any pending ints on the board */ dpr = card->dpram; b = dpr->ToPcInt; /* clear for ergo */ b |= dpr->ToPcIntMetro; /* same for metro */ b |= dpr->ToHyInt; /* and for champ */ /* start kernel task immediately after leaving all interrupts */ if (!card->hw_lock) schedule_work(&card->irq_queue); spin_unlock_irqrestore(&card->hysdn_lock, flags); return IRQ_HANDLED; } /* ergo_interrupt */ /******************************************************************************/ /* ergo_irq_bh will be called as part of the kernel clearing its shared work */ /* queue sometime after a call to schedule_work has been made passing our */ /* work_struct. This task is the only one handling data transfer from or to */ /* the card after booting. The task may be queued from everywhere */ /* (interrupts included). */ /******************************************************************************/ static void ergo_irq_bh(struct work_struct *ugli_api) { hysdn_card *card = container_of(ugli_api, hysdn_card, irq_queue); tErgDpram *dpr; int again; unsigned long flags; if (card->state != CARD_STATE_RUN) return; /* invalid call */ dpr = card->dpram; /* point to DPRAM */ spin_lock_irqsave(&card->hysdn_lock, flags); if (card->hw_lock) { spin_unlock_irqrestore(&card->hysdn_lock, flags); /* hardware currently unavailable */ return; } card->hw_lock = 1; /* we now lock the hardware */ do { again = 0; /* assume loop not to be repeated */ if (!dpr->ToHyFlag) { /* we are able to send a buffer */ if (hysdn_sched_tx(card, dpr->ToHyBuf, &dpr->ToHySize, &dpr->ToHyChannel, ERG_TO_HY_BUF_SIZE)) { dpr->ToHyFlag = 1; /* enable tx */ again = 1; /* restart loop */ } } /* we are able to send a buffer */ if (dpr->ToPcFlag) { /* a message has arrived for us, handle it */ if (hysdn_sched_rx(card, dpr->ToPcBuf, dpr->ToPcSize, dpr->ToPcChannel)) { dpr->ToPcFlag = 0; /* we worked the data */ again = 1; /* restart loop */ } } /* a message has arrived for us */ if (again) { dpr->ToHyInt = 1; dpr->ToPcInt = 1; /* interrupt to E1 for all cards */ } else card->hw_lock = 0; /* free hardware again */ } while (again); /* until nothing more to do */ spin_unlock_irqrestore(&card->hysdn_lock, flags); } /* ergo_irq_bh */ /*********************************************************/ /* stop the card (hardware reset) and disable interrupts */ /*********************************************************/ static void ergo_stopcard(hysdn_card *card) { unsigned long flags; unsigned char val; hysdn_net_release(card); /* first release the net device if existing */ #ifdef CONFIG_HYSDN_CAPI hycapi_capi_stop(card); #endif /* CONFIG_HYSDN_CAPI */ spin_lock_irqsave(&card->hysdn_lock, flags); val = bytein(card->iobase + PCI9050_INTR_REG); /* get actual value */ val &= ~(PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1); /* mask irq */ byteout(card->iobase + PCI9050_INTR_REG, val); card->irq_enabled = 0; byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RESET); /* reset E1 processor */ card->state = CARD_STATE_UNUSED; card->err_log_state = ERRLOG_STATE_OFF; /* currently no log active */ spin_unlock_irqrestore(&card->hysdn_lock, flags); } /* ergo_stopcard */ /**************************************************************************/ /* enable or disable the cards error log. The event is queued if possible */ /**************************************************************************/ static void ergo_set_errlog_state(hysdn_card *card, int on) { unsigned long flags; if (card->state != CARD_STATE_RUN) { card->err_log_state = ERRLOG_STATE_OFF; /* must be off */ return; } spin_lock_irqsave(&card->hysdn_lock, flags); if (((card->err_log_state == ERRLOG_STATE_OFF) && !on) || ((card->err_log_state == ERRLOG_STATE_ON) && on)) { spin_unlock_irqrestore(&card->hysdn_lock, flags); return; /* nothing to do */ } if (on) card->err_log_state = ERRLOG_STATE_START; /* request start */ else card->err_log_state = ERRLOG_STATE_STOP; /* request stop */ spin_unlock_irqrestore(&card->hysdn_lock, flags); schedule_work(&card->irq_queue); } /* ergo_set_errlog_state */ /******************************************/ /* test the cards RAM and return 0 if ok. */ /******************************************/ static const char TestText[36] = "This Message is filler, why read it"; static int ergo_testram(hysdn_card *card) { tErgDpram *dpr = card->dpram; memset(dpr->TrapTable, 0, sizeof(dpr->TrapTable)); /* clear all Traps */ dpr->ToHyInt = 1; /* E1 INTR state forced */ memcpy(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText, sizeof(TestText)); if (memcmp(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText, sizeof(TestText))) return (-1); memcpy(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText, sizeof(TestText)); if (memcmp(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText, sizeof(TestText))) return (-1); return (0); } /* ergo_testram */ /*****************************************************************************/ /* this function is intended to write stage 1 boot image to the cards buffer */ /* this is done in two steps. First the 1024 hi-words are written (offs=0), */ /* then the 1024 lo-bytes are written. The remaining DPRAM is cleared, the */ /* PCI-write-buffers flushed and the card is taken out of reset. */ /* The function then waits for a reaction of the E1 processor or a timeout. */ /* Negative return values are interpreted as errors. */ /*****************************************************************************/ static int ergo_writebootimg(struct HYSDN_CARD *card, unsigned char *buf, unsigned long offs) { unsigned char *dst; tErgDpram *dpram; int cnt = (BOOT_IMG_SIZE >> 2); /* number of words to move and swap (byte order!) */ if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: write bootldr offs=0x%lx ", offs); dst = card->dpram; /* pointer to start of DPRAM */ dst += (offs + ERG_DPRAM_FILL_SIZE); /* offset in the DPRAM */ while (cnt--) { *dst++ = *(buf + 1); /* high byte */ *dst++ = *buf; /* low byte */ dst += 2; /* point to next longword */ buf += 2; /* buffer only filled with words */ } /* if low words (offs = 2) have been written, clear the rest of the DPRAM, */ /* flush the PCI-write-buffer and take the E1 out of reset */ if (offs) { memset(card->dpram, 0, ERG_DPRAM_FILL_SIZE); /* fill the DPRAM still not cleared */ dpram = card->dpram; /* get pointer to dpram structure */ dpram->ToHyNoDpramErrLog = 0xFF; /* write a dpram register */ while (!dpram->ToHyNoDpramErrLog); /* reread volatile register to flush PCI */ byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RUN); /* start E1 processor */ /* the interrupts are still masked */ msleep_interruptible(20); /* Timeout 20ms */ if (((tDpramBootSpooler *) card->dpram)->Len != DPRAM_SPOOLER_DATA_SIZE) { if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: write bootldr no answer"); return (-ERR_BOOTIMG_FAIL); } } /* start_boot_img */ return (0); /* successful */ } /* ergo_writebootimg */ /********************************************************************************/ /* ergo_writebootseq writes the buffer containing len bytes to the E1 processor */ /* using the boot spool mechanism. If everything works fine 0 is returned. In */ /* case of errors a negative error value is returned. */ /********************************************************************************/ static int ergo_writebootseq(struct HYSDN_CARD *card, unsigned char *buf, int len) { tDpramBootSpooler *sp = (tDpramBootSpooler *) card->dpram; unsigned char *dst; unsigned char buflen; int nr_write; unsigned char tmp_rdptr; unsigned char wr_mirror; int i; if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: write boot seq len=%d ", len); dst = sp->Data; /* point to data in spool structure */ buflen = sp->Len; /* maximum len of spooled data */ wr_mirror = sp->WrPtr; /* only once read */ /* try until all bytes written or error */ i = 0x1000; /* timeout value */ while (len) { /* first determine the number of bytes that may be buffered */ do { tmp_rdptr = sp->RdPtr; /* first read the pointer */ i--; /* decrement timeout */ } while (i && (tmp_rdptr != sp->RdPtr)); /* wait for stable pointer */ if (!i) { if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: write boot seq timeout"); return (-ERR_BOOTSEQ_FAIL); /* value not stable -> timeout */ } if ((nr_write = tmp_rdptr - wr_mirror - 1) < 0) nr_write += buflen; /* now we got number of free bytes - 1 in buffer */ if (!nr_write) continue; /* no free bytes in buffer */ if (nr_write > len) nr_write = len; /* limit if last few bytes */ i = 0x1000; /* reset timeout value */ /* now we know how much bytes we may put in the puffer */ len -= nr_write; /* we savely could adjust len before output */ while (nr_write--) { *(dst + wr_mirror) = *buf++; /* output one byte */ if (++wr_mirror >= buflen) wr_mirror = 0; sp->WrPtr = wr_mirror; /* announce the next byte to E1 */ } /* while (nr_write) */ } /* while (len) */ return (0); } /* ergo_writebootseq */ /***********************************************************************************/ /* ergo_waitpofready waits for a maximum of 10 seconds for the completition of the */ /* boot process. If the process has been successful 0 is returned otherwise a */ /* negative error code is returned. */ /***********************************************************************************/ static int ergo_waitpofready(struct HYSDN_CARD *card) { tErgDpram *dpr = card->dpram; /* pointer to DPRAM structure */ int timecnt = 10000 / 50; /* timeout is 10 secs max. */ unsigned long flags; int msg_size; int i; if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: waiting for pof ready"); while (timecnt--) { /* wait until timeout */ if (dpr->ToPcFlag) { /* data has arrived */ if ((dpr->ToPcChannel != CHAN_SYSTEM) || (dpr->ToPcSize < MIN_RDY_MSG_SIZE) || (dpr->ToPcSize > MAX_RDY_MSG_SIZE) || ((*(unsigned long *) dpr->ToPcBuf) != RDY_MAGIC)) break; /* an error occurred */ /* Check for additional data delivered during SysReady */ msg_size = dpr->ToPcSize - RDY_MAGIC_SIZE; if (msg_size > 0) if (EvalSysrTokData(card, dpr->ToPcBuf + RDY_MAGIC_SIZE, msg_size)) break; if (card->debug_flags & LOG_POF_RECORD) hysdn_addlog(card, "ERGO: pof boot success"); spin_lock_irqsave(&card->hysdn_lock, flags); card->state = CARD_STATE_RUN; /* now card is running */ /* enable the cards interrupt */ byteout(card->iobase + PCI9050_INTR_REG, bytein(card->iobase + PCI9050_INTR_REG) | (PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1)); card->irq_enabled = 1; /* we are ready to receive interrupts */ dpr->ToPcFlag = 0; /* reset data indicator */ dpr->ToHyInt = 1; dpr->ToPcInt = 1; /* interrupt to E1 for all cards */ spin_unlock_irqrestore(&card->hysdn_lock, flags); if ((hynet_enable & (1 << card->myid)) && (i = hysdn_net_create(card))) { ergo_stopcard(card); card->state = CARD_STATE_BOOTERR; return (i); } #ifdef CONFIG_HYSDN_CAPI if ((i = hycapi_capi_create(card))) { printk(KERN_WARNING "HYSDN: failed to create capi-interface.\n"); } #endif /* CONFIG_HYSDN_CAPI */ return (0); /* success */ } /* data has arrived */ msleep_interruptible(50); /* Timeout 50ms */ } /* wait until timeout */ if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: pof boot ready timeout"); return (-ERR_POF_TIMEOUT); } /* ergo_waitpofready */ /************************************************************************************/ /* release the cards hardware. Before releasing do a interrupt disable and hardware */ /* reset. Also unmap dpram. */ /* Use only during module release. */ /************************************************************************************/ static void ergo_releasehardware(hysdn_card *card) { ergo_stopcard(card); /* first stop the card if not already done */ free_irq(card->irq, card); /* release interrupt */ release_region(card->iobase + PCI9050_INTR_REG, 1); /* release all io ports */ release_region(card->iobase + PCI9050_USER_IO, 1); iounmap(card->dpram); card->dpram = NULL; /* release shared mem */ } /* ergo_releasehardware */ /*********************************************************************************/ /* acquire the needed hardware ports and map dpram. If an error occurs a nonzero */ /* value is returned. */ /* Use only during module init. */ /*********************************************************************************/ int ergo_inithardware(hysdn_card *card) { if (!request_region(card->iobase + PCI9050_INTR_REG, 1, "HYSDN")) return (-1); if (!request_region(card->iobase + PCI9050_USER_IO, 1, "HYSDN")) { release_region(card->iobase + PCI9050_INTR_REG, 1); return (-1); /* ports already in use */ } card->memend = card->membase + ERG_DPRAM_PAGE_SIZE - 1; if (!(card->dpram = ioremap(card->membase, ERG_DPRAM_PAGE_SIZE))) { release_region(card->iobase + PCI9050_INTR_REG, 1); release_region(card->iobase + PCI9050_USER_IO, 1); return (-1); } ergo_stopcard(card); /* disable interrupts */ if (request_irq(card->irq, ergo_interrupt, IRQF_SHARED, "HYSDN", card)) { ergo_releasehardware(card); /* return the acquired hardware */ return (-1); } /* success, now setup the function pointers */ card->stopcard = ergo_stopcard; card->releasehardware = ergo_releasehardware; card->testram = ergo_testram; card->writebootimg = ergo_writebootimg; card->writebootseq = ergo_writebootseq; card->waitpofready = ergo_waitpofready; card->set_errlog_state = ergo_set_errlog_state; INIT_WORK(&card->irq_queue, ergo_irq_bh); spin_lock_init(&card->hysdn_lock); return (0); } /* ergo_inithardware */
gpl-2.0
arter97-temasek-i9300/android_kernel_samsung_smdk4412
arch/mips/kernel/smp-up.c
11127
1695
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org) * * Symmetric Uniprocessor (TM) Support */ #include <linux/kernel.h> #include <linux/sched.h> /* * Send inter-processor interrupt */ static void up_send_ipi_single(int cpu, unsigned int action) { panic(KERN_ERR "%s called", __func__); } static inline void up_send_ipi_mask(const struct cpumask *mask, unsigned int action) { panic(KERN_ERR "%s called", __func__); } /* * After we've done initial boot, this function is called to allow the * board code to clean up state, if needed */ static void __cpuinit up_init_secondary(void) { } static void __cpuinit up_smp_finish(void) { } /* Hook for after all CPUs are online */ static void up_cpus_done(void) { } /* * Firmware CPU startup hook */ static void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle) { } static void __init up_smp_setup(void) { } static void __init up_prepare_cpus(unsigned int max_cpus) { } #ifdef CONFIG_HOTPLUG_CPU static int up_cpu_disable(void) { return -ENOSYS; } static void up_cpu_die(unsigned int cpu) { BUG(); } #endif struct plat_smp_ops up_smp_ops = { .send_ipi_single = up_send_ipi_single, .send_ipi_mask = up_send_ipi_mask, .init_secondary = up_init_secondary, .smp_finish = up_smp_finish, .cpus_done = up_cpus_done, .boot_secondary = up_boot_secondary, .smp_setup = up_smp_setup, .prepare_cpus = up_prepare_cpus, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = up_cpu_disable, .cpu_die = up_cpu_die, #endif };
gpl-2.0
micropi/a20-b2g-kernel
drivers/pnp/isapnp/compat.c
14711
2196
/* * compat.c - A series of functions to make it easier to convert drivers that use * the old isapnp APIs. If possible use the new APIs instead. * * Copyright 2002 Adam Belay <ambx1@neo.rr.com> */ #include <linux/module.h> #include <linux/isapnp.h> #include <linux/string.h> static void pnp_convert_id(char *buf, unsigned short vendor, unsigned short device) { sprintf(buf, "%c%c%c%x%x%x%x", 'A' + ((vendor >> 2) & 0x3f) - 1, 'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1, 'A' + ((vendor >> 8) & 0x1f) - 1, (device >> 4) & 0x0f, device & 0x0f, (device >> 12) & 0x0f, (device >> 8) & 0x0f); } struct pnp_card *pnp_find_card(unsigned short vendor, unsigned short device, struct pnp_card *from) { char id[8]; char any[8]; struct list_head *list; pnp_convert_id(id, vendor, device); pnp_convert_id(any, ISAPNP_ANY_ID, ISAPNP_ANY_ID); list = from ? from->global_list.next : pnp_cards.next; while (list != &pnp_cards) { struct pnp_card *card = global_to_pnp_card(list); if (compare_pnp_id(card->id, id) || (memcmp(id, any, 7) == 0)) return card; list = list->next; } return NULL; } struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor, unsigned short function, struct pnp_dev *from) { char id[8]; char any[8]; pnp_convert_id(id, vendor, function); pnp_convert_id(any, ISAPNP_ANY_ID, ISAPNP_ANY_ID); if (card == NULL) { /* look for a logical device from all cards */ struct list_head *list; list = pnp_global.next; if (from) list = from->global_list.next; while (list != &pnp_global) { struct pnp_dev *dev = global_to_pnp_dev(list); if (compare_pnp_id(dev->id, id) || (memcmp(id, any, 7) == 0)) return dev; list = list->next; } } else { struct list_head *list; list = card->devices.next; if (from) { list = from->card_list.next; if (from->card != card) /* something is wrong */ return NULL; } while (list != &card->devices) { struct pnp_dev *dev = card_to_pnp_dev(list); if (compare_pnp_id(dev->id, id)) return dev; list = list->next; } } return NULL; } EXPORT_SYMBOL(pnp_find_card); EXPORT_SYMBOL(pnp_find_dev);
gpl-2.0
jcadduono/android_kernel_oneplus_msm8996
drivers/net/ethernet/intel/igb/igb_ethtool.c
120
87825
/* Intel(R) Gigabit Ethernet Linux driver * Copyright(c) 2007-2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, see <http://www.gnu.org/licenses/>. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Contact Information: * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 */ /* ethtool support for igb */ #include <linux/vmalloc.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/if_ether.h> #include <linux/ethtool.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/highmem.h> #include <linux/mdio.h> #include "igb.h" struct igb_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; #define IGB_STAT(_name, _stat) { \ .stat_string = _name, \ .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ .stat_offset = offsetof(struct igb_adapter, _stat) \ } static const struct igb_stats igb_gstrings_stats[] = { IGB_STAT("rx_packets", stats.gprc), IGB_STAT("tx_packets", stats.gptc), IGB_STAT("rx_bytes", stats.gorc), IGB_STAT("tx_bytes", stats.gotc), IGB_STAT("rx_broadcast", stats.bprc), IGB_STAT("tx_broadcast", stats.bptc), IGB_STAT("rx_multicast", stats.mprc), IGB_STAT("tx_multicast", stats.mptc), IGB_STAT("multicast", stats.mprc), IGB_STAT("collisions", stats.colc), IGB_STAT("rx_crc_errors", stats.crcerrs), IGB_STAT("rx_no_buffer_count", stats.rnbc), IGB_STAT("rx_missed_errors", stats.mpc), IGB_STAT("tx_aborted_errors", stats.ecol), IGB_STAT("tx_carrier_errors", stats.tncrs), IGB_STAT("tx_window_errors", stats.latecol), IGB_STAT("tx_abort_late_coll", stats.latecol), IGB_STAT("tx_deferred_ok", stats.dc), IGB_STAT("tx_single_coll_ok", stats.scc), IGB_STAT("tx_multi_coll_ok", stats.mcc), IGB_STAT("tx_timeout_count", tx_timeout_count), IGB_STAT("rx_long_length_errors", stats.roc), IGB_STAT("rx_short_length_errors", stats.ruc), IGB_STAT("rx_align_errors", stats.algnerrc), IGB_STAT("tx_tcp_seg_good", stats.tsctc), IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), IGB_STAT("rx_flow_control_xon", stats.xonrxc), IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), IGB_STAT("tx_flow_control_xon", stats.xontxc), IGB_STAT("tx_flow_control_xoff", stats.xofftxc), IGB_STAT("rx_long_byte_count", stats.gorc), IGB_STAT("tx_dma_out_of_sync", stats.doosync), IGB_STAT("tx_smbus", stats.mgptc), IGB_STAT("rx_smbus", stats.mgprc), IGB_STAT("dropped_smbus", stats.mgpdc), IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), }; #define IGB_NETDEV_STAT(_net_stat) { \ .stat_string = __stringify(_net_stat), \ .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ } static const struct igb_stats igb_gstrings_net_stats[] = { IGB_NETDEV_STAT(rx_errors), IGB_NETDEV_STAT(tx_errors), IGB_NETDEV_STAT(tx_dropped), IGB_NETDEV_STAT(rx_length_errors), IGB_NETDEV_STAT(rx_over_errors), IGB_NETDEV_STAT(rx_frame_errors), IGB_NETDEV_STAT(rx_fifo_errors), IGB_NETDEV_STAT(tx_fifo_errors), IGB_NETDEV_STAT(tx_heartbeat_errors) }; #define IGB_GLOBAL_STATS_LEN \ (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) #define IGB_NETDEV_STATS_LEN \ (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) #define IGB_RX_QUEUE_STATS_LEN \ (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) #define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ #define IGB_QUEUE_STATS_LEN \ ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ IGB_RX_QUEUE_STATS_LEN) + \ (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ IGB_TX_QUEUE_STATS_LEN)) #define IGB_STATS_LEN \ (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; u32 status; u32 speed; status = rd32(E1000_STATUS); if (hw->phy.media_type == e1000_media_type_copper) { ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full| SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_Pause); ecmd->advertising = ADVERTISED_TP; if (hw->mac.autoneg == 1) { ecmd->advertising |= ADVERTISED_Autoneg; /* the e1000 autoneg seems to match ethtool nicely */ ecmd->advertising |= hw->phy.autoneg_advertised; } ecmd->port = PORT_TP; ecmd->phy_address = hw->phy.addr; ecmd->transceiver = XCVR_INTERNAL; } else { ecmd->supported = (SUPPORTED_FIBRE | SUPPORTED_1000baseKX_Full | SUPPORTED_Autoneg | SUPPORTED_Pause); ecmd->advertising = (ADVERTISED_FIBRE | ADVERTISED_1000baseKX_Full); if (hw->mac.type == e1000_i354) { if ((hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && !(status & E1000_STATUS_2P5_SKU_OVER)) { ecmd->supported |= SUPPORTED_2500baseX_Full; ecmd->supported &= ~SUPPORTED_1000baseKX_Full; ecmd->advertising |= ADVERTISED_2500baseX_Full; ecmd->advertising &= ~ADVERTISED_1000baseKX_Full; } } if (eth_flags->e100_base_fx) { ecmd->supported |= SUPPORTED_100baseT_Full; ecmd->advertising |= ADVERTISED_100baseT_Full; } if (hw->mac.autoneg == 1) ecmd->advertising |= ADVERTISED_Autoneg; ecmd->port = PORT_FIBRE; ecmd->transceiver = XCVR_EXTERNAL; } if (hw->mac.autoneg != 1) ecmd->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); switch (hw->fc.requested_mode) { case e1000_fc_full: ecmd->advertising |= ADVERTISED_Pause; break; case e1000_fc_rx_pause: ecmd->advertising |= (ADVERTISED_Pause | ADVERTISED_Asym_Pause); break; case e1000_fc_tx_pause: ecmd->advertising |= ADVERTISED_Asym_Pause; break; default: ecmd->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); } if (status & E1000_STATUS_LU) { if ((status & E1000_STATUS_2P5_SKU) && !(status & E1000_STATUS_2P5_SKU_OVER)) { speed = SPEED_2500; } else if (status & E1000_STATUS_SPEED_1000) { speed = SPEED_1000; } else if (status & E1000_STATUS_SPEED_100) { speed = SPEED_100; } else { speed = SPEED_10; } if ((status & E1000_STATUS_FD) || hw->phy.media_type != e1000_media_type_copper) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; } else { speed = SPEED_UNKNOWN; ecmd->duplex = DUPLEX_UNKNOWN; } ethtool_cmd_speed_set(ecmd, speed); if ((hw->phy.media_type == e1000_media_type_fiber) || hw->mac.autoneg) ecmd->autoneg = AUTONEG_ENABLE; else ecmd->autoneg = AUTONEG_DISABLE; /* MDI-X => 2; MDI =>1; Invalid =>0 */ if (hw->phy.media_type == e1000_media_type_copper) ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; else ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; if (hw->phy.mdix == AUTO_ALL_MODES) ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; else ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; return 0; } static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ if (igb_check_reset_block(hw)) { dev_err(&adapter->pdev->dev, "Cannot change link characteristics when SoL/IDER is active.\n"); return -EINVAL; } /* MDI setting is only allowed when autoneg enabled because * some hardware doesn't allow MDI setting when speed or * duplex is forced. */ if (ecmd->eth_tp_mdix_ctrl) { if (hw->phy.media_type != e1000_media_type_copper) return -EOPNOTSUPP; if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && (ecmd->autoneg != AUTONEG_ENABLE)) { dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); return -EINVAL; } } while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (ecmd->autoneg == AUTONEG_ENABLE) { hw->mac.autoneg = 1; if (hw->phy.media_type == e1000_media_type_fiber) { hw->phy.autoneg_advertised = ecmd->advertising | ADVERTISED_FIBRE | ADVERTISED_Autoneg; switch (adapter->link_speed) { case SPEED_2500: hw->phy.autoneg_advertised = ADVERTISED_2500baseX_Full; break; case SPEED_1000: hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full; break; case SPEED_100: hw->phy.autoneg_advertised = ADVERTISED_100baseT_Full; break; default: break; } } else { hw->phy.autoneg_advertised = ecmd->advertising | ADVERTISED_TP | ADVERTISED_Autoneg; } ecmd->advertising = hw->phy.autoneg_advertised; if (adapter->fc_autoneg) hw->fc.requested_mode = e1000_fc_default; } else { u32 speed = ethtool_cmd_speed(ecmd); /* calling this overrides forced MDI setting */ if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { clear_bit(__IGB_RESETTING, &adapter->state); return -EINVAL; } } /* MDI-X => 2; MDI => 1; Auto => 3 */ if (ecmd->eth_tp_mdix_ctrl) { /* fix up the value for auto (3 => 0) as zero is mapped * internally to auto */ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) hw->phy.mdix = AUTO_ALL_MODES; else hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; } /* reset the link */ if (netif_running(adapter->netdev)) { igb_down(adapter); igb_up(adapter); } else igb_reset(adapter); clear_bit(__IGB_RESETTING, &adapter->state); return 0; } static u32 igb_get_link(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_mac_info *mac = &adapter->hw.mac; /* If the link is not reported up to netdev, interrupts are disabled, * and so the physical link state may have changed since we last * looked. Set get_link_status to make sure that the true link * state is interrogated, rather than pulling a cached and possibly * stale link state from the driver. */ if (!netif_carrier_ok(netdev)) mac->get_link_status = 1; return igb_has_link(adapter); } static void igb_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; pause->autoneg = (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); if (hw->fc.current_mode == e1000_fc_rx_pause) pause->rx_pause = 1; else if (hw->fc.current_mode == e1000_fc_tx_pause) pause->tx_pause = 1; else if (hw->fc.current_mode == e1000_fc_full) { pause->rx_pause = 1; pause->tx_pause = 1; } } static int igb_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int retval = 0; /* 100basefx does not support setting link flow control */ if (hw->dev_spec._82575.eth_flags.e100_base_fx) return -EINVAL; adapter->fc_autoneg = pause->autoneg; while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (adapter->fc_autoneg == AUTONEG_ENABLE) { hw->fc.requested_mode = e1000_fc_default; if (netif_running(adapter->netdev)) { igb_down(adapter); igb_up(adapter); } else { igb_reset(adapter); } } else { if (pause->rx_pause && pause->tx_pause) hw->fc.requested_mode = e1000_fc_full; else if (pause->rx_pause && !pause->tx_pause) hw->fc.requested_mode = e1000_fc_rx_pause; else if (!pause->rx_pause && pause->tx_pause) hw->fc.requested_mode = e1000_fc_tx_pause; else if (!pause->rx_pause && !pause->tx_pause) hw->fc.requested_mode = e1000_fc_none; hw->fc.current_mode = hw->fc.requested_mode; retval = ((hw->phy.media_type == e1000_media_type_copper) ? igb_force_mac_fc(hw) : igb_setup_link(hw)); } clear_bit(__IGB_RESETTING, &adapter->state); return retval; } static u32 igb_get_msglevel(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void igb_set_msglevel(struct net_device *netdev, u32 data) { struct igb_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = data; } static int igb_get_regs_len(struct net_device *netdev) { #define IGB_REGS_LEN 739 return IGB_REGS_LEN * sizeof(u32); } static void igb_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 *regs_buff = p; u8 i; memset(p, 0, IGB_REGS_LEN * sizeof(u32)); regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; /* General Registers */ regs_buff[0] = rd32(E1000_CTRL); regs_buff[1] = rd32(E1000_STATUS); regs_buff[2] = rd32(E1000_CTRL_EXT); regs_buff[3] = rd32(E1000_MDIC); regs_buff[4] = rd32(E1000_SCTL); regs_buff[5] = rd32(E1000_CONNSW); regs_buff[6] = rd32(E1000_VET); regs_buff[7] = rd32(E1000_LEDCTL); regs_buff[8] = rd32(E1000_PBA); regs_buff[9] = rd32(E1000_PBS); regs_buff[10] = rd32(E1000_FRTIMER); regs_buff[11] = rd32(E1000_TCPTIMER); /* NVM Register */ regs_buff[12] = rd32(E1000_EECD); /* Interrupt */ /* Reading EICS for EICR because they read the * same but EICS does not clear on read */ regs_buff[13] = rd32(E1000_EICS); regs_buff[14] = rd32(E1000_EICS); regs_buff[15] = rd32(E1000_EIMS); regs_buff[16] = rd32(E1000_EIMC); regs_buff[17] = rd32(E1000_EIAC); regs_buff[18] = rd32(E1000_EIAM); /* Reading ICS for ICR because they read the * same but ICS does not clear on read */ regs_buff[19] = rd32(E1000_ICS); regs_buff[20] = rd32(E1000_ICS); regs_buff[21] = rd32(E1000_IMS); regs_buff[22] = rd32(E1000_IMC); regs_buff[23] = rd32(E1000_IAC); regs_buff[24] = rd32(E1000_IAM); regs_buff[25] = rd32(E1000_IMIRVP); /* Flow Control */ regs_buff[26] = rd32(E1000_FCAL); regs_buff[27] = rd32(E1000_FCAH); regs_buff[28] = rd32(E1000_FCTTV); regs_buff[29] = rd32(E1000_FCRTL); regs_buff[30] = rd32(E1000_FCRTH); regs_buff[31] = rd32(E1000_FCRTV); /* Receive */ regs_buff[32] = rd32(E1000_RCTL); regs_buff[33] = rd32(E1000_RXCSUM); regs_buff[34] = rd32(E1000_RLPML); regs_buff[35] = rd32(E1000_RFCTL); regs_buff[36] = rd32(E1000_MRQC); regs_buff[37] = rd32(E1000_VT_CTL); /* Transmit */ regs_buff[38] = rd32(E1000_TCTL); regs_buff[39] = rd32(E1000_TCTL_EXT); regs_buff[40] = rd32(E1000_TIPG); regs_buff[41] = rd32(E1000_DTXCTL); /* Wake Up */ regs_buff[42] = rd32(E1000_WUC); regs_buff[43] = rd32(E1000_WUFC); regs_buff[44] = rd32(E1000_WUS); regs_buff[45] = rd32(E1000_IPAV); regs_buff[46] = rd32(E1000_WUPL); /* MAC */ regs_buff[47] = rd32(E1000_PCS_CFG0); regs_buff[48] = rd32(E1000_PCS_LCTL); regs_buff[49] = rd32(E1000_PCS_LSTAT); regs_buff[50] = rd32(E1000_PCS_ANADV); regs_buff[51] = rd32(E1000_PCS_LPAB); regs_buff[52] = rd32(E1000_PCS_NPTX); regs_buff[53] = rd32(E1000_PCS_LPABNP); /* Statistics */ regs_buff[54] = adapter->stats.crcerrs; regs_buff[55] = adapter->stats.algnerrc; regs_buff[56] = adapter->stats.symerrs; regs_buff[57] = adapter->stats.rxerrc; regs_buff[58] = adapter->stats.mpc; regs_buff[59] = adapter->stats.scc; regs_buff[60] = adapter->stats.ecol; regs_buff[61] = adapter->stats.mcc; regs_buff[62] = adapter->stats.latecol; regs_buff[63] = adapter->stats.colc; regs_buff[64] = adapter->stats.dc; regs_buff[65] = adapter->stats.tncrs; regs_buff[66] = adapter->stats.sec; regs_buff[67] = adapter->stats.htdpmc; regs_buff[68] = adapter->stats.rlec; regs_buff[69] = adapter->stats.xonrxc; regs_buff[70] = adapter->stats.xontxc; regs_buff[71] = adapter->stats.xoffrxc; regs_buff[72] = adapter->stats.xofftxc; regs_buff[73] = adapter->stats.fcruc; regs_buff[74] = adapter->stats.prc64; regs_buff[75] = adapter->stats.prc127; regs_buff[76] = adapter->stats.prc255; regs_buff[77] = adapter->stats.prc511; regs_buff[78] = adapter->stats.prc1023; regs_buff[79] = adapter->stats.prc1522; regs_buff[80] = adapter->stats.gprc; regs_buff[81] = adapter->stats.bprc; regs_buff[82] = adapter->stats.mprc; regs_buff[83] = adapter->stats.gptc; regs_buff[84] = adapter->stats.gorc; regs_buff[86] = adapter->stats.gotc; regs_buff[88] = adapter->stats.rnbc; regs_buff[89] = adapter->stats.ruc; regs_buff[90] = adapter->stats.rfc; regs_buff[91] = adapter->stats.roc; regs_buff[92] = adapter->stats.rjc; regs_buff[93] = adapter->stats.mgprc; regs_buff[94] = adapter->stats.mgpdc; regs_buff[95] = adapter->stats.mgptc; regs_buff[96] = adapter->stats.tor; regs_buff[98] = adapter->stats.tot; regs_buff[100] = adapter->stats.tpr; regs_buff[101] = adapter->stats.tpt; regs_buff[102] = adapter->stats.ptc64; regs_buff[103] = adapter->stats.ptc127; regs_buff[104] = adapter->stats.ptc255; regs_buff[105] = adapter->stats.ptc511; regs_buff[106] = adapter->stats.ptc1023; regs_buff[107] = adapter->stats.ptc1522; regs_buff[108] = adapter->stats.mptc; regs_buff[109] = adapter->stats.bptc; regs_buff[110] = adapter->stats.tsctc; regs_buff[111] = adapter->stats.iac; regs_buff[112] = adapter->stats.rpthc; regs_buff[113] = adapter->stats.hgptc; regs_buff[114] = adapter->stats.hgorc; regs_buff[116] = adapter->stats.hgotc; regs_buff[118] = adapter->stats.lenerrs; regs_buff[119] = adapter->stats.scvpc; regs_buff[120] = adapter->stats.hrmpc; for (i = 0; i < 4; i++) regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); for (i = 0; i < 4; i++) regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); for (i = 0; i < 4; i++) regs_buff[129 + i] = rd32(E1000_RDBAL(i)); for (i = 0; i < 4; i++) regs_buff[133 + i] = rd32(E1000_RDBAH(i)); for (i = 0; i < 4; i++) regs_buff[137 + i] = rd32(E1000_RDLEN(i)); for (i = 0; i < 4; i++) regs_buff[141 + i] = rd32(E1000_RDH(i)); for (i = 0; i < 4; i++) regs_buff[145 + i] = rd32(E1000_RDT(i)); for (i = 0; i < 4; i++) regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); for (i = 0; i < 10; i++) regs_buff[153 + i] = rd32(E1000_EITR(i)); for (i = 0; i < 8; i++) regs_buff[163 + i] = rd32(E1000_IMIR(i)); for (i = 0; i < 8; i++) regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); for (i = 0; i < 16; i++) regs_buff[179 + i] = rd32(E1000_RAL(i)); for (i = 0; i < 16; i++) regs_buff[195 + i] = rd32(E1000_RAH(i)); for (i = 0; i < 4; i++) regs_buff[211 + i] = rd32(E1000_TDBAL(i)); for (i = 0; i < 4; i++) regs_buff[215 + i] = rd32(E1000_TDBAH(i)); for (i = 0; i < 4; i++) regs_buff[219 + i] = rd32(E1000_TDLEN(i)); for (i = 0; i < 4; i++) regs_buff[223 + i] = rd32(E1000_TDH(i)); for (i = 0; i < 4; i++) regs_buff[227 + i] = rd32(E1000_TDT(i)); for (i = 0; i < 4; i++) regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); for (i = 0; i < 4; i++) regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); for (i = 0; i < 4; i++) regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); for (i = 0; i < 4; i++) regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); for (i = 0; i < 4; i++) regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); for (i = 0; i < 4; i++) regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); for (i = 0; i < 32; i++) regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); for (i = 0; i < 128; i++) regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); for (i = 0; i < 128; i++) regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); for (i = 0; i < 4; i++) regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); regs_buff[547] = rd32(E1000_TDFH); regs_buff[548] = rd32(E1000_TDFT); regs_buff[549] = rd32(E1000_TDFHS); regs_buff[550] = rd32(E1000_TDFPC); if (hw->mac.type > e1000_82580) { regs_buff[551] = adapter->stats.o2bgptc; regs_buff[552] = adapter->stats.b2ospc; regs_buff[553] = adapter->stats.o2bspc; regs_buff[554] = adapter->stats.b2ogprc; } if (hw->mac.type != e1000_82576) return; for (i = 0; i < 12; i++) regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); for (i = 0; i < 4; i++) regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); for (i = 0; i < 12; i++) regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); for (i = 0; i < 12; i++) regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); for (i = 0; i < 12; i++) regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); for (i = 0; i < 12; i++) regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); for (i = 0; i < 12; i++) regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); for (i = 0; i < 12; i++) regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); for (i = 0; i < 12; i++) regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); for (i = 0; i < 12; i++) regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); for (i = 0; i < 12; i++) regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); for (i = 0; i < 12; i++) regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); for (i = 0; i < 12; i++) regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); for (i = 0; i < 12; i++) regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); for (i = 0; i < 12; i++) regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); for (i = 0; i < 12; i++) regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); } static int igb_get_eeprom_len(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); return adapter->hw.nvm.word_size * 2; } static int igb_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u16 *eeprom_buff; int first_word, last_word; int ret_val = 0; u16 i; if (eeprom->len == 0) return -EINVAL; eeprom->magic = hw->vendor_id | (hw->device_id << 16); first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1), GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; if (hw->nvm.type == e1000_nvm_eeprom_spi) ret_val = hw->nvm.ops.read(hw, first_word, last_word - first_word + 1, eeprom_buff); else { for (i = 0; i < last_word - first_word + 1; i++) { ret_val = hw->nvm.ops.read(hw, first_word + i, 1, &eeprom_buff[i]); if (ret_val) break; } } /* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < last_word - first_word + 1; i++) le16_to_cpus(&eeprom_buff[i]); memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); kfree(eeprom_buff); return ret_val; } static int igb_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u16 *eeprom_buff; void *ptr; int max_len, first_word, last_word, ret_val = 0; u16 i; if (eeprom->len == 0) return -EOPNOTSUPP; if ((hw->mac.type >= e1000_i210) && !igb_get_flash_presence_i210(hw)) { return -EOPNOTSUPP; } if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EFAULT; max_len = hw->nvm.word_size * 2; first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; eeprom_buff = kmalloc(max_len, GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; ptr = (void *)eeprom_buff; if (eeprom->offset & 1) { /* need read/modify/write of first changed EEPROM word * only the second byte of the word is being modified */ ret_val = hw->nvm.ops.read(hw, first_word, 1, &eeprom_buff[0]); ptr++; } if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { /* need read/modify/write of last changed EEPROM word * only the first byte of the word is being modified */ ret_val = hw->nvm.ops.read(hw, last_word, 1, &eeprom_buff[last_word - first_word]); } /* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < last_word - first_word + 1; i++) le16_to_cpus(&eeprom_buff[i]); memcpy(ptr, bytes, eeprom->len); for (i = 0; i < last_word - first_word + 1; i++) eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); ret_val = hw->nvm.ops.write(hw, first_word, last_word - first_word + 1, eeprom_buff); /* Update the checksum if nvm write succeeded */ if (ret_val == 0) hw->nvm.ops.update(hw); igb_set_fw_version(adapter); kfree(eeprom_buff); return ret_val; } static void igb_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct igb_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); /* EEPROM image version # is reported as firmware version # for * 82575 controllers */ strlcpy(drvinfo->fw_version, adapter->fw_version, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = IGB_STATS_LEN; drvinfo->testinfo_len = IGB_TEST_LEN; drvinfo->regdump_len = igb_get_regs_len(netdev); drvinfo->eedump_len = igb_get_eeprom_len(netdev); } static void igb_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct igb_adapter *adapter = netdev_priv(netdev); ring->rx_max_pending = IGB_MAX_RXD; ring->tx_max_pending = IGB_MAX_TXD; ring->rx_pending = adapter->rx_ring_count; ring->tx_pending = adapter->tx_ring_count; } static int igb_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct igb_adapter *adapter = netdev_priv(netdev); struct igb_ring *temp_ring; int i, err = 0; u16 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring_count) && (new_rx_count == adapter->rx_ring_count)) { /* nothing to do */ return 0; } while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->count = new_rx_count; adapter->tx_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } if (adapter->num_tx_queues > adapter->num_rx_queues) temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); else temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); if (!temp_ring) { err = -ENOMEM; goto clear_reset; } igb_down(adapter); /* We can't just free everything and then setup again, * because the ISRs in MSI-X mode get passed pointers * to the Tx and Rx ring structs. */ if (new_tx_count != adapter->tx_ring_count) { for (i = 0; i < adapter->num_tx_queues; i++) { memcpy(&temp_ring[i], adapter->tx_ring[i], sizeof(struct igb_ring)); temp_ring[i].count = new_tx_count; err = igb_setup_tx_resources(&temp_ring[i]); if (err) { while (i) { i--; igb_free_tx_resources(&temp_ring[i]); } goto err_setup; } } for (i = 0; i < adapter->num_tx_queues; i++) { igb_free_tx_resources(adapter->tx_ring[i]); memcpy(adapter->tx_ring[i], &temp_ring[i], sizeof(struct igb_ring)); } adapter->tx_ring_count = new_tx_count; } if (new_rx_count != adapter->rx_ring_count) { for (i = 0; i < adapter->num_rx_queues; i++) { memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct igb_ring)); temp_ring[i].count = new_rx_count; err = igb_setup_rx_resources(&temp_ring[i]); if (err) { while (i) { i--; igb_free_rx_resources(&temp_ring[i]); } goto err_setup; } } for (i = 0; i < adapter->num_rx_queues; i++) { igb_free_rx_resources(adapter->rx_ring[i]); memcpy(adapter->rx_ring[i], &temp_ring[i], sizeof(struct igb_ring)); } adapter->rx_ring_count = new_rx_count; } err_setup: igb_up(adapter); vfree(temp_ring); clear_reset: clear_bit(__IGB_RESETTING, &adapter->state); return err; } /* ethtool register test data */ struct igb_reg_test { u16 reg; u16 reg_offset; u16 array_len; u16 test_type; u32 mask; u32 write; }; /* In the hardware, registers are laid out either singly, in arrays * spaced 0x100 bytes apart, or in contiguous tables. We assume * most tests take place on arrays or single registers (handled * as a single-element array) and special-case the tables. * Table tests are always pattern tests. * * We also make provision for some required setup steps by specifying * registers to be written without any read-back testing. */ #define PATTERN_TEST 1 #define SET_READ_TEST 2 #define WRITE_NO_TEST 3 #define TABLE32_TEST 4 #define TABLE64_TEST_LO 5 #define TABLE64_TEST_HI 6 /* i210 reg test */ static struct igb_reg_test reg_test_i210[] = { { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, /* RDH is read-only for i210, only test RDT. */ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x900FFFFF, 0xFFFFFFFF }, { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { 0, 0, 0, 0, 0 } }; /* i350 reg test */ static struct igb_reg_test reg_test_i350[] = { { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, /* RDH is read-only for i350, only test RDT. */ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RA, 0, 16, TABLE64_TEST_HI, 0xC3FFFFFF, 0xFFFFFFFF }, { E1000_RA2, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RA2, 0, 16, TABLE64_TEST_HI, 0xC3FFFFFF, 0xFFFFFFFF }, { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { 0, 0, 0, 0 } }; /* 82580 reg test */ static struct igb_reg_test reg_test_82580[] = { { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, /* RDH is read-only for 82580, only test RDT. */ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { 0, 0, 0, 0 } }; /* 82576 reg test */ static struct igb_reg_test reg_test_82576[] = { { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, /* Enable all RX queues before testing. */ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, /* RDH is read-only for 82576, only test RDT. */ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { 0, 0, 0, 0 } }; /* 82575 register test */ static struct igb_reg_test reg_test_82575[] = { { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, /* Enable all four RX queues before testing. */ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, /* RDH is read-only for 82575, only test RDT. */ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { 0, 0, 0, 0 } }; static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { struct e1000_hw *hw = &adapter->hw; u32 pat, val; static const u32 _test[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { wr32(reg, (_test[pat] & write)); val = rd32(reg) & mask; if (val != (_test[pat] & write & mask)) { dev_err(&adapter->pdev->dev, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", reg, val, (_test[pat] & write & mask)); *data = reg; return true; } } return false; } static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { struct e1000_hw *hw = &adapter->hw; u32 val; wr32(reg, write & mask); val = rd32(reg); if ((write & mask) != (val & mask)) { dev_err(&adapter->pdev->dev, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; return true; } return false; } #define REG_PATTERN_TEST(reg, mask, write) \ do { \ if (reg_pattern_test(adapter, data, reg, mask, write)) \ return 1; \ } while (0) #define REG_SET_AND_CHECK(reg, mask, write) \ do { \ if (reg_set_and_check(adapter, data, reg, mask, write)) \ return 1; \ } while (0) static int igb_reg_test(struct igb_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; struct igb_reg_test *test; u32 value, before, after; u32 i, toggle; switch (adapter->hw.mac.type) { case e1000_i350: case e1000_i354: test = reg_test_i350; toggle = 0x7FEFF3FF; break; case e1000_i210: case e1000_i211: test = reg_test_i210; toggle = 0x7FEFF3FF; break; case e1000_82580: test = reg_test_82580; toggle = 0x7FEFF3FF; break; case e1000_82576: test = reg_test_82576; toggle = 0x7FFFF3FF; break; default: test = reg_test_82575; toggle = 0x7FFFF3FF; break; } /* Because the status register is such a special case, * we handle it separately from the rest of the register * tests. Some bits are read-only, some toggle, and some * are writable on newer MACs. */ before = rd32(E1000_STATUS); value = (rd32(E1000_STATUS) & toggle); wr32(E1000_STATUS, toggle); after = rd32(E1000_STATUS) & toggle; if (value != after) { dev_err(&adapter->pdev->dev, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", after, value); *data = 1; return 1; } /* restore previous status */ wr32(E1000_STATUS, before); /* Perform the remainder of the register test, looping through * the test table until we either fail or reach the null entry. */ while (test->reg) { for (i = 0; i < test->array_len; i++) { switch (test->test_type) { case PATTERN_TEST: REG_PATTERN_TEST(test->reg + (i * test->reg_offset), test->mask, test->write); break; case SET_READ_TEST: REG_SET_AND_CHECK(test->reg + (i * test->reg_offset), test->mask, test->write); break; case WRITE_NO_TEST: writel(test->write, (adapter->hw.hw_addr + test->reg) + (i * test->reg_offset)); break; case TABLE32_TEST: REG_PATTERN_TEST(test->reg + (i * 4), test->mask, test->write); break; case TABLE64_TEST_LO: REG_PATTERN_TEST(test->reg + (i * 8), test->mask, test->write); break; case TABLE64_TEST_HI: REG_PATTERN_TEST((test->reg + 4) + (i * 8), test->mask, test->write); break; } } test++; } *data = 0; return 0; } static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; *data = 0; /* Validate eeprom on all parts but flashless */ switch (hw->mac.type) { case e1000_i210: case e1000_i211: if (igb_get_flash_presence_i210(hw)) { if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) *data = 2; } break; default: if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) *data = 2; break; } return *data; } static irqreturn_t igb_test_intr(int irq, void *data) { struct igb_adapter *adapter = (struct igb_adapter *) data; struct e1000_hw *hw = &adapter->hw; adapter->test_icr |= rd32(E1000_ICR); return IRQ_HANDLED; } static int igb_intr_test(struct igb_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; u32 mask, ics_mask, i = 0, shared_int = true; u32 irq = adapter->pdev->irq; *data = 0; /* Hook up test interrupt handler just for this test */ if (adapter->flags & IGB_FLAG_HAS_MSIX) { if (request_irq(adapter->msix_entries[0].vector, igb_test_intr, 0, netdev->name, adapter)) { *data = 1; return -1; } } else if (adapter->flags & IGB_FLAG_HAS_MSI) { shared_int = false; if (request_irq(irq, igb_test_intr, 0, netdev->name, adapter)) { *data = 1; return -1; } } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, netdev->name, adapter)) { shared_int = false; } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, netdev->name, adapter)) { *data = 1; return -1; } dev_info(&adapter->pdev->dev, "testing %s interrupt\n", (shared_int ? "shared" : "unshared")); /* Disable all the interrupts */ wr32(E1000_IMC, ~0); wrfl(); usleep_range(10000, 11000); /* Define all writable bits for ICS */ switch (hw->mac.type) { case e1000_82575: ics_mask = 0x37F47EDD; break; case e1000_82576: ics_mask = 0x77D4FBFD; break; case e1000_82580: ics_mask = 0x77DCFED5; break; case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: ics_mask = 0x77DCFED5; break; default: ics_mask = 0x7FFFFFFF; break; } /* Test each interrupt */ for (; i < 31; i++) { /* Interrupt to test */ mask = 1 << i; if (!(mask & ics_mask)) continue; if (!shared_int) { /* Disable the interrupt to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was posted to the bus, the * test failed. */ adapter->test_icr = 0; /* Flush any pending interrupts */ wr32(E1000_ICR, ~0); wr32(E1000_IMC, mask); wr32(E1000_ICS, mask); wrfl(); usleep_range(10000, 11000); if (adapter->test_icr & mask) { *data = 3; break; } } /* Enable the interrupt to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was not posted to the bus, the * test failed. */ adapter->test_icr = 0; /* Flush any pending interrupts */ wr32(E1000_ICR, ~0); wr32(E1000_IMS, mask); wr32(E1000_ICS, mask); wrfl(); usleep_range(10000, 11000); if (!(adapter->test_icr & mask)) { *data = 4; break; } if (!shared_int) { /* Disable the other interrupts to be reported in * the cause register and then force the other * interrupts and see if any get posted. If * an interrupt was posted to the bus, the * test failed. */ adapter->test_icr = 0; /* Flush any pending interrupts */ wr32(E1000_ICR, ~0); wr32(E1000_IMC, ~mask); wr32(E1000_ICS, ~mask); wrfl(); usleep_range(10000, 11000); if (adapter->test_icr & mask) { *data = 5; break; } } } /* Disable all the interrupts */ wr32(E1000_IMC, ~0); wrfl(); usleep_range(10000, 11000); /* Unhook test interrupt handler */ if (adapter->flags & IGB_FLAG_HAS_MSIX) free_irq(adapter->msix_entries[0].vector, adapter); else free_irq(irq, adapter); return *data; } static void igb_free_desc_rings(struct igb_adapter *adapter) { igb_free_tx_resources(&adapter->test_tx_ring); igb_free_rx_resources(&adapter->test_rx_ring); } static int igb_setup_desc_rings(struct igb_adapter *adapter) { struct igb_ring *tx_ring = &adapter->test_tx_ring; struct igb_ring *rx_ring = &adapter->test_rx_ring; struct e1000_hw *hw = &adapter->hw; int ret_val; /* Setup Tx descriptor ring and Tx buffers */ tx_ring->count = IGB_DEFAULT_TXD; tx_ring->dev = &adapter->pdev->dev; tx_ring->netdev = adapter->netdev; tx_ring->reg_idx = adapter->vfs_allocated_count; if (igb_setup_tx_resources(tx_ring)) { ret_val = 1; goto err_nomem; } igb_setup_tctl(adapter); igb_configure_tx_ring(adapter, tx_ring); /* Setup Rx descriptor ring and Rx buffers */ rx_ring->count = IGB_DEFAULT_RXD; rx_ring->dev = &adapter->pdev->dev; rx_ring->netdev = adapter->netdev; rx_ring->reg_idx = adapter->vfs_allocated_count; if (igb_setup_rx_resources(rx_ring)) { ret_val = 3; goto err_nomem; } /* set the default queue to queue 0 of PF */ wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); /* enable receive ring */ igb_setup_rctl(adapter); igb_configure_rx_ring(adapter, rx_ring); igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); return 0; err_nomem: igb_free_desc_rings(adapter); return ret_val; } static void igb_phy_disable_receiver(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; /* Write out to PHY registers 29 and 30 to disable the Receiver. */ igb_write_phy_reg(hw, 29, 0x001F); igb_write_phy_reg(hw, 30, 0x8FFC); igb_write_phy_reg(hw, 29, 0x001A); igb_write_phy_reg(hw, 30, 0x8FF0); } static int igb_integrated_phy_loopback(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; hw->mac.autoneg = false; if (hw->phy.type == e1000_phy_m88) { if (hw->phy.id != I210_I_PHY_ID) { /* Auto-MDI/MDIX Off */ igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); /* reset to update Auto-MDI/MDIX */ igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); /* autoneg off */ igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); } else { /* force 1000, set loopback */ igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); } } else if (hw->phy.type == e1000_phy_82580) { /* enable MII loopback */ igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); } /* add small delay to avoid loopback test failure */ msleep(50); /* force 1000, set loopback */ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); /* Now set up the MAC to the same speed/duplex as the PHY. */ ctrl_reg = rd32(E1000_CTRL); ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ E1000_CTRL_FD | /* Force Duplex to FULL */ E1000_CTRL_SLU); /* Set link up enable bit */ if (hw->phy.type == e1000_phy_m88) ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ wr32(E1000_CTRL, ctrl_reg); /* Disable the receiver on the PHY so when a cable is plugged in, the * PHY does not begin to autoneg when a cable is reconnected to the NIC. */ if (hw->phy.type == e1000_phy_m88) igb_phy_disable_receiver(adapter); mdelay(500); return 0; } static int igb_set_phy_loopback(struct igb_adapter *adapter) { return igb_integrated_phy_loopback(adapter); } static int igb_setup_loopback_test(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 reg; reg = rd32(E1000_CTRL_EXT); /* use CTRL_EXT to identify link type as SGMII can appear as copper */ if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || (hw->device_id == E1000_DEV_ID_I354_SGMII) || (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { /* Enable DH89xxCC MPHY for near end loopback */ reg = rd32(E1000_MPHY_ADDR_CTL); reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | E1000_MPHY_PCS_CLK_REG_OFFSET; wr32(E1000_MPHY_ADDR_CTL, reg); reg = rd32(E1000_MPHY_DATA); reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; wr32(E1000_MPHY_DATA, reg); } reg = rd32(E1000_RCTL); reg |= E1000_RCTL_LBM_TCVR; wr32(E1000_RCTL, reg); wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); reg = rd32(E1000_CTRL); reg &= ~(E1000_CTRL_RFCE | E1000_CTRL_TFCE | E1000_CTRL_LRST); reg |= E1000_CTRL_SLU | E1000_CTRL_FD; wr32(E1000_CTRL, reg); /* Unset switch control to serdes energy detect */ reg = rd32(E1000_CONNSW); reg &= ~E1000_CONNSW_ENRGSRC; wr32(E1000_CONNSW, reg); /* Unset sigdetect for SERDES loopback on * 82580 and newer devices. */ if (hw->mac.type >= e1000_82580) { reg = rd32(E1000_PCS_CFG0); reg |= E1000_PCS_CFG_IGN_SD; wr32(E1000_PCS_CFG0, reg); } /* Set PCS register for forced speed */ reg = rd32(E1000_PCS_LCTL); reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ E1000_PCS_LCTL_FSD | /* Force Speed */ E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ wr32(E1000_PCS_LCTL, reg); return 0; } return igb_set_phy_loopback(adapter); } static void igb_loopback_cleanup(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl; u16 phy_reg; if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || (hw->device_id == E1000_DEV_ID_I354_SGMII)) { u32 reg; /* Disable near end loopback on DH89xxCC */ reg = rd32(E1000_MPHY_ADDR_CTL); reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | E1000_MPHY_PCS_CLK_REG_OFFSET; wr32(E1000_MPHY_ADDR_CTL, reg); reg = rd32(E1000_MPHY_DATA); reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; wr32(E1000_MPHY_DATA, reg); } rctl = rd32(E1000_RCTL); rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); wr32(E1000_RCTL, rctl); hw->mac.autoneg = true; igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); if (phy_reg & MII_CR_LOOPBACK) { phy_reg &= ~MII_CR_LOOPBACK; igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); igb_phy_sw_reset(hw); } } static void igb_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) { memset(skb->data, 0xFF, frame_size); frame_size /= 2; memset(&skb->data[frame_size], 0xAA, frame_size - 1); memset(&skb->data[frame_size + 10], 0xBE, 1); memset(&skb->data[frame_size + 12], 0xAF, 1); } static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, unsigned int frame_size) { unsigned char *data; bool match = true; frame_size >>= 1; data = kmap(rx_buffer->page); if (data[3] != 0xFF || data[frame_size + 10] != 0xBE || data[frame_size + 12] != 0xAF) match = false; kunmap(rx_buffer->page); return match; } static int igb_clean_test_rings(struct igb_ring *rx_ring, struct igb_ring *tx_ring, unsigned int size) { union e1000_adv_rx_desc *rx_desc; struct igb_rx_buffer *rx_buffer_info; struct igb_tx_buffer *tx_buffer_info; u16 rx_ntc, tx_ntc, count = 0; /* initialize next to clean and descriptor values */ rx_ntc = rx_ring->next_to_clean; tx_ntc = tx_ring->next_to_clean; rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { /* check Rx buffer */ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; /* sync Rx buffer for CPU read */ dma_sync_single_for_cpu(rx_ring->dev, rx_buffer_info->dma, IGB_RX_BUFSZ, DMA_FROM_DEVICE); /* verify contents of skb */ if (igb_check_lbtest_frame(rx_buffer_info, size)) count++; /* sync Rx buffer for device write */ dma_sync_single_for_device(rx_ring->dev, rx_buffer_info->dma, IGB_RX_BUFSZ, DMA_FROM_DEVICE); /* unmap buffer on Tx side */ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); /* increment Rx/Tx next to clean counters */ rx_ntc++; if (rx_ntc == rx_ring->count) rx_ntc = 0; tx_ntc++; if (tx_ntc == tx_ring->count) tx_ntc = 0; /* fetch next descriptor */ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); } netdev_tx_reset_queue(txring_txq(tx_ring)); /* re-map buffers to ring, store next to clean values */ igb_alloc_rx_buffers(rx_ring, count); rx_ring->next_to_clean = rx_ntc; tx_ring->next_to_clean = tx_ntc; return count; } static int igb_run_loopback_test(struct igb_adapter *adapter) { struct igb_ring *tx_ring = &adapter->test_tx_ring; struct igb_ring *rx_ring = &adapter->test_rx_ring; u16 i, j, lc, good_cnt; int ret_val = 0; unsigned int size = IGB_RX_HDR_LEN; netdev_tx_t tx_ret_val; struct sk_buff *skb; /* allocate test skb */ skb = alloc_skb(size, GFP_KERNEL); if (!skb) return 11; /* place data into test skb */ igb_create_lbtest_frame(skb, size); skb_put(skb, size); /* Calculate the loop count based on the largest descriptor ring * The idea is to wrap the largest ring a number of times using 64 * send/receive pairs during each loop */ if (rx_ring->count <= tx_ring->count) lc = ((tx_ring->count / 64) * 2) + 1; else lc = ((rx_ring->count / 64) * 2) + 1; for (j = 0; j <= lc; j++) { /* loop count loop */ /* reset count of good packets */ good_cnt = 0; /* place 64 packets on the transmit queue*/ for (i = 0; i < 64; i++) { skb_get(skb); tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); if (tx_ret_val == NETDEV_TX_OK) good_cnt++; } if (good_cnt != 64) { ret_val = 12; break; } /* allow 200 milliseconds for packets to go from Tx to Rx */ msleep(200); good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); if (good_cnt != 64) { ret_val = 13; break; } } /* end loop count loop */ /* free the original skb */ kfree_skb(skb); return ret_val; } static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) { /* PHY loopback cannot be performed if SoL/IDER * sessions are active */ if (igb_check_reset_block(&adapter->hw)) { dev_err(&adapter->pdev->dev, "Cannot do PHY loopback test when SoL/IDER is active.\n"); *data = 0; goto out; } if (adapter->hw.mac.type == e1000_i354) { dev_info(&adapter->pdev->dev, "Loopback test not supported on i354.\n"); *data = 0; goto out; } *data = igb_setup_desc_rings(adapter); if (*data) goto out; *data = igb_setup_loopback_test(adapter); if (*data) goto err_loopback; *data = igb_run_loopback_test(adapter); igb_loopback_cleanup(adapter); err_loopback: igb_free_desc_rings(adapter); out: return *data; } static int igb_link_test(struct igb_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; *data = 0; if (hw->phy.media_type == e1000_media_type_internal_serdes) { int i = 0; hw->mac.serdes_has_link = false; /* On some blade server designs, link establishment * could take as long as 2-3 minutes */ do { hw->mac.ops.check_for_link(&adapter->hw); if (hw->mac.serdes_has_link) return *data; msleep(20); } while (i++ < 3750); *data = 1; } else { hw->mac.ops.check_for_link(&adapter->hw); if (hw->mac.autoneg) msleep(5000); if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) *data = 1; } return *data; } static void igb_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct igb_adapter *adapter = netdev_priv(netdev); u16 autoneg_advertised; u8 forced_speed_duplex, autoneg; bool if_running = netif_running(netdev); set_bit(__IGB_TESTING, &adapter->state); /* can't do offline tests on media switching devices */ if (adapter->hw.dev_spec._82575.mas_capable) eth_test->flags &= ~ETH_TEST_FL_OFFLINE; if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* Offline tests */ /* save speed, duplex, autoneg settings */ autoneg_advertised = adapter->hw.phy.autoneg_advertised; forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; autoneg = adapter->hw.mac.autoneg; dev_info(&adapter->pdev->dev, "offline testing starting\n"); /* power up link for link test */ igb_power_up_link(adapter); /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (igb_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; if (if_running) /* indicate we're in test mode */ dev_close(netdev); else igb_reset(adapter); if (igb_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); if (igb_eeprom_test(adapter, &data[1])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); if (igb_intr_test(adapter, &data[2])) eth_test->flags |= ETH_TEST_FL_FAILED; igb_reset(adapter); /* power up link for loopback test */ igb_power_up_link(adapter); if (igb_loopback_test(adapter, &data[3])) eth_test->flags |= ETH_TEST_FL_FAILED; /* restore speed, duplex, autoneg settings */ adapter->hw.phy.autoneg_advertised = autoneg_advertised; adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; adapter->hw.mac.autoneg = autoneg; /* force this routine to wait until autoneg complete/timeout */ adapter->hw.phy.autoneg_wait_to_complete = true; igb_reset(adapter); adapter->hw.phy.autoneg_wait_to_complete = false; clear_bit(__IGB_TESTING, &adapter->state); if (if_running) dev_open(netdev); } else { dev_info(&adapter->pdev->dev, "online testing starting\n"); /* PHY is powered down when interface is down */ if (if_running && igb_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; else data[4] = 0; /* Online tests aren't run; pass by default */ data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; clear_bit(__IGB_TESTING, &adapter->state); } msleep_interruptible(4 * 1000); } static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct igb_adapter *adapter = netdev_priv(netdev); wol->wolopts = 0; if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) return; wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; /* apply any specific unsupported masks here */ switch (adapter->hw.device_id) { default: break; } if (adapter->wol & E1000_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & E1000_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & E1000_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & E1000_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; if (adapter->wol & E1000_WUFC_LNKC) wol->wolopts |= WAKE_PHY; } static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct igb_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) return wol->wolopts ? -EOPNOTSUPP : 0; /* these settings will always override what we currently have */ adapter->wol = 0; if (wol->wolopts & WAKE_UCAST) adapter->wol |= E1000_WUFC_EX; if (wol->wolopts & WAKE_MCAST) adapter->wol |= E1000_WUFC_MC; if (wol->wolopts & WAKE_BCAST) adapter->wol |= E1000_WUFC_BC; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= E1000_WUFC_MAG; if (wol->wolopts & WAKE_PHY) adapter->wol |= E1000_WUFC_LNKC; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } /* bit defines for adapter->led_status */ #define IGB_LED_ON 0 static int igb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; switch (state) { case ETHTOOL_ID_ACTIVE: igb_blink_led(hw); return 2; case ETHTOOL_ID_ON: igb_blink_led(hw); break; case ETHTOOL_ID_OFF: igb_led_off(hw); break; case ETHTOOL_ID_INACTIVE: igb_led_off(hw); clear_bit(IGB_LED_ON, &adapter->led_status); igb_cleanup_led(hw); break; } return 0; } static int igb_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct igb_adapter *adapter = netdev_priv(netdev); int i; if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || ((ec->rx_coalesce_usecs > 3) && (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || (ec->rx_coalesce_usecs == 2)) return -EINVAL; if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || ((ec->tx_coalesce_usecs > 3) && (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || (ec->tx_coalesce_usecs == 2)) return -EINVAL; if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) return -EINVAL; /* If ITR is disabled, disable DMAC */ if (ec->rx_coalesce_usecs == 0) { if (adapter->flags & IGB_FLAG_DMAC) adapter->flags &= ~IGB_FLAG_DMAC; } /* convert to rate of irq's per second */ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) adapter->rx_itr_setting = ec->rx_coalesce_usecs; else adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; /* convert to rate of irq's per second */ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) adapter->tx_itr_setting = adapter->rx_itr_setting; else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) adapter->tx_itr_setting = ec->tx_coalesce_usecs; else adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; q_vector->tx.work_limit = adapter->tx_work_limit; if (q_vector->rx.ring) q_vector->itr_val = adapter->rx_itr_setting; else q_vector->itr_val = adapter->tx_itr_setting; if (q_vector->itr_val && q_vector->itr_val <= 3) q_vector->itr_val = IGB_START_ITR; q_vector->set_itr = 1; } return 0; } static int igb_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct igb_adapter *adapter = netdev_priv(netdev); if (adapter->rx_itr_setting <= 3) ec->rx_coalesce_usecs = adapter->rx_itr_setting; else ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { if (adapter->tx_itr_setting <= 3) ec->tx_coalesce_usecs = adapter->tx_itr_setting; else ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; } return 0; } static int igb_nway_reset(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) igb_reinit_locked(adapter); return 0; } static int igb_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: return IGB_STATS_LEN; case ETH_SS_TEST: return IGB_TEST_LEN; default: return -ENOTSUPP; } } static void igb_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct igb_adapter *adapter = netdev_priv(netdev); struct rtnl_link_stats64 *net_stats = &adapter->stats64; unsigned int start; struct igb_ring *ring; int i, j; char *p; spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, net_stats); for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { p = (char *)adapter + igb_gstrings_stats[i].stat_offset; data[i] = (igb_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; data[i] = (igb_gstrings_net_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } for (j = 0; j < adapter->num_tx_queues; j++) { u64 restart2; ring = adapter->tx_ring[j]; do { start = u64_stats_fetch_begin_irq(&ring->tx_syncp); data[i] = ring->tx_stats.packets; data[i+1] = ring->tx_stats.bytes; data[i+2] = ring->tx_stats.restart_queue; } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); do { start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); restart2 = ring->tx_stats.restart_queue2; } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); data[i+2] += restart2; i += IGB_TX_QUEUE_STATS_LEN; } for (j = 0; j < adapter->num_rx_queues; j++) { ring = adapter->rx_ring[j]; do { start = u64_stats_fetch_begin_irq(&ring->rx_syncp); data[i] = ring->rx_stats.packets; data[i+1] = ring->rx_stats.bytes; data[i+2] = ring->rx_stats.drops; data[i+3] = ring->rx_stats.csum_err; data[i+4] = ring->rx_stats.alloc_failed; } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); i += IGB_RX_QUEUE_STATS_LEN; } spin_unlock(&adapter->stats64_lock); } static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct igb_adapter *adapter = netdev_priv(netdev); u8 *p = data; int i; switch (stringset) { case ETH_SS_TEST: memcpy(data, *igb_gstrings_test, IGB_TEST_LEN*ETH_GSTRING_LEN); break; case ETH_SS_STATS: for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { memcpy(p, igb_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { memcpy(p, igb_gstrings_net_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < adapter->num_tx_queues; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_restart", i); p += ETH_GSTRING_LEN; } for (i = 0; i < adapter->num_rx_queues; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_drops", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_csum_err", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_alloc_failed", i); p += ETH_GSTRING_LEN; } /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ break; } } static int igb_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { struct igb_adapter *adapter = netdev_priv(dev); if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); else info->phc_index = -1; switch (adapter->hw.mac.type) { case e1000_82575: info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; return 0; case e1000_82576: case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; /* 82576 does not support timestamping all packets. */ if (adapter->hw.mac.type >= e1000_82580) info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL; else info->rx_filters |= (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; default: return -EOPNOTSUPP; } } static int igb_get_rss_hash_opts(struct igb_adapter *adapter, struct ethtool_rxnfc *cmd) { cmd->data = 0; /* Report default options for RSS on igb */ switch (cmd->flow_type) { case TCP_V4_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* Fall through */ case UDP_V4_FLOW: if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* Fall through */ case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case IPV4_FLOW: cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; case TCP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* Fall through */ case UDP_V6_FLOW: if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* Fall through */ case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case IPV6_FLOW: cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; default: return -EINVAL; } return 0; } static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct igb_adapter *adapter = netdev_priv(dev); int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = adapter->num_rx_queues; ret = 0; break; case ETHTOOL_GRXFH: ret = igb_get_rss_hash_opts(adapter, cmd); break; default: break; } return ret; } #define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ IGB_FLAG_RSS_FIELD_IPV6_UDP) static int igb_set_rss_hash_opt(struct igb_adapter *adapter, struct ethtool_rxnfc *nfc) { u32 flags = adapter->flags; /* RSS does not support anything other than hashing * to queues on src and dst IPs and ports */ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) return -EINVAL; switch (nfc->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || !(nfc->data & RXH_L4_B_0_1) || !(nfc->data & RXH_L4_B_2_3)) return -EINVAL; break; case UDP_V4_FLOW: if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; break; default: return -EINVAL; } break; case UDP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; break; default: return -EINVAL; } break; case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case SCTP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || (nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; break; default: return -EINVAL; } /* if we changed something we need to update flags */ if (flags != adapter->flags) { struct e1000_hw *hw = &adapter->hw; u32 mrqc = rd32(E1000_MRQC); if ((flags & UDP_RSS_FLAGS) && !(adapter->flags & UDP_RSS_FLAGS)) dev_err(&adapter->pdev->dev, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); adapter->flags = flags; /* Perform hash on these packet types */ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP | E1000_MRQC_RSS_FIELD_IPV6 | E1000_MRQC_RSS_FIELD_IPV6_TCP; mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | E1000_MRQC_RSS_FIELD_IPV6_UDP); if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; wr32(E1000_MRQC, mrqc); } return 0; } static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct igb_adapter *adapter = netdev_priv(dev); int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_SRXFH: ret = igb_set_rss_hash_opt(adapter, cmd); break; default: break; } return ret; } static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ret_val; u16 phy_data; if ((hw->mac.type < e1000_i350) || (hw->phy.media_type != e1000_media_type_copper)) return -EOPNOTSUPP; edata->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); if (!hw->dev_spec._82575.eee_disable) edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); /* The IPCNFG and EEER registers are not supported on I354. */ if (hw->mac.type == e1000_i354) { igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); } else { u32 eeer; eeer = rd32(E1000_EEER); /* EEE status on negotiated link */ if (eeer & E1000_EEER_EEE_NEG) edata->eee_active = true; if (eeer & E1000_EEER_TX_LPI_EN) edata->tx_lpi_enabled = true; } /* EEE Link Partner Advertised */ switch (hw->mac.type) { case e1000_i350: ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, &phy_data); if (ret_val) return -ENODATA; edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); break; case e1000_i354: case e1000_i210: case e1000_i211: ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, E1000_EEE_LP_ADV_DEV_I210, &phy_data); if (ret_val) return -ENODATA; edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); break; default: break; } edata->eee_enabled = !hw->dev_spec._82575.eee_disable; if ((hw->mac.type == e1000_i354) && (edata->eee_enabled)) edata->tx_lpi_enabled = true; /* Report correct negotiated EEE status for devices that * wrongly report EEE at half-duplex */ if (adapter->link_duplex == HALF_DUPLEX) { edata->eee_enabled = false; edata->eee_active = false; edata->tx_lpi_enabled = false; edata->advertised &= ~edata->advertised; } return 0; } static int igb_set_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct ethtool_eee eee_curr; bool adv1g_eee = true, adv100m_eee = true; s32 ret_val; if ((hw->mac.type < e1000_i350) || (hw->phy.media_type != e1000_media_type_copper)) return -EOPNOTSUPP; memset(&eee_curr, 0, sizeof(struct ethtool_eee)); ret_val = igb_get_eee(netdev, &eee_curr); if (ret_val) return ret_val; if (eee_curr.eee_enabled) { if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { dev_err(&adapter->pdev->dev, "Setting EEE tx-lpi is not supported\n"); return -EINVAL; } /* Tx LPI timer is not implemented currently */ if (edata->tx_lpi_timer) { dev_err(&adapter->pdev->dev, "Setting EEE Tx LPI timer is not supported\n"); return -EINVAL; } if (!edata->advertised || (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { dev_err(&adapter->pdev->dev, "EEE Advertisement supports only 100Tx and/or 100T full duplex\n"); return -EINVAL; } adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); } else if (!edata->eee_enabled) { dev_err(&adapter->pdev->dev, "Setting EEE options are not supported with EEE disabled\n"); return -EINVAL; } adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { hw->dev_spec._82575.eee_disable = !edata->eee_enabled; adapter->flags |= IGB_FLAG_EEE; /* reset link */ if (netif_running(netdev)) igb_reinit_locked(adapter); else igb_reset(adapter); } if (hw->mac.type == e1000_i354) ret_val = igb_set_eee_i354(hw, adv1g_eee, adv100m_eee); else ret_val = igb_set_eee_i350(hw, adv1g_eee, adv100m_eee); if (ret_val) { dev_err(&adapter->pdev->dev, "Problem setting EEE advertisement options\n"); return -EINVAL; } return 0; } static int igb_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 status = 0; u16 sff8472_rev, addr_mode; bool page_swap = false; if ((hw->phy.media_type == e1000_media_type_copper) || (hw->phy.media_type == e1000_media_type_unknown)) return -EOPNOTSUPP; /* Check whether we support SFF-8472 or not */ status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); if (status) return -EIO; /* addressing mode is not supported */ status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); if (status) return -EIO; /* addressing mode is not supported */ if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); page_swap = true; } if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { /* We have an SFP, but it does not support SFF-8472 */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; } else { /* We have an SFP which supports a revision of SFF-8472 */ modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; } return 0; } static int igb_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 status = 0; u16 *dataword; u16 first_word, last_word; int i = 0; if (ee->len == 0) return -EINVAL; first_word = ee->offset >> 1; last_word = (ee->offset + ee->len - 1) >> 1; dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1), GFP_KERNEL); if (!dataword) return -ENOMEM; /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ for (i = 0; i < last_word - first_word + 1; i++) { status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); if (status) { /* Error occurred while reading module */ kfree(dataword); return -EIO; } be16_to_cpus(&dataword[i]); } memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); kfree(dataword); return 0; } static int igb_ethtool_begin(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); pm_runtime_get_sync(&adapter->pdev->dev); return 0; } static void igb_ethtool_complete(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); pm_runtime_put(&adapter->pdev->dev); } static u32 igb_get_rxfh_indir_size(struct net_device *netdev) { return IGB_RETA_SIZE; } static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) { struct igb_adapter *adapter = netdev_priv(netdev); int i; for (i = 0; i < IGB_RETA_SIZE; i++) indir[i] = adapter->rss_indir_tbl[i]; return 0; } void igb_write_rss_indir_tbl(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 reg = E1000_RETA(0); u32 shift = 0; int i = 0; switch (hw->mac.type) { case e1000_82575: shift = 6; break; case e1000_82576: /* 82576 supports 2 RSS queues for SR-IOV */ if (adapter->vfs_allocated_count) shift = 3; break; default: break; } while (i < IGB_RETA_SIZE) { u32 val = 0; int j; for (j = 3; j >= 0; j--) { val <<= 8; val |= adapter->rss_indir_tbl[i + j]; } wr32(reg, val << shift); reg += 4; i += 4; } } static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int i; u32 num_queues; num_queues = adapter->rss_queues; switch (hw->mac.type) { case e1000_82576: /* 82576 supports 2 RSS queues for SR-IOV */ if (adapter->vfs_allocated_count) num_queues = 2; break; default: break; } /* Verify user input. */ for (i = 0; i < IGB_RETA_SIZE; i++) if (indir[i] >= num_queues) return -EINVAL; for (i = 0; i < IGB_RETA_SIZE; i++) adapter->rss_indir_tbl[i] = indir[i]; igb_write_rss_indir_tbl(adapter); return 0; } static unsigned int igb_max_channels(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; unsigned int max_combined = 0; switch (hw->mac.type) { case e1000_i211: max_combined = IGB_MAX_RX_QUEUES_I211; break; case e1000_82575: case e1000_i210: max_combined = IGB_MAX_RX_QUEUES_82575; break; case e1000_i350: if (!!adapter->vfs_allocated_count) { max_combined = 1; break; } /* fall through */ case e1000_82576: if (!!adapter->vfs_allocated_count) { max_combined = 2; break; } /* fall through */ case e1000_82580: case e1000_i354: default: max_combined = IGB_MAX_RX_QUEUES; break; } return max_combined; } static void igb_get_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct igb_adapter *adapter = netdev_priv(netdev); /* Report maximum channels */ ch->max_combined = igb_max_channels(adapter); /* Report info for other vector */ if (adapter->flags & IGB_FLAG_HAS_MSIX) { ch->max_other = NON_Q_VECTORS; ch->other_count = NON_Q_VECTORS; } ch->combined_count = adapter->rss_queues; } static int igb_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct igb_adapter *adapter = netdev_priv(netdev); unsigned int count = ch->combined_count; unsigned int max_combined = 0; /* Verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) return -EINVAL; /* Verify other_count is valid and has not been changed */ if (ch->other_count != NON_Q_VECTORS) return -EINVAL; /* Verify the number of channels doesn't exceed hw limits */ max_combined = igb_max_channels(adapter); if (count > max_combined) return -EINVAL; if (count != adapter->rss_queues) { adapter->rss_queues = count; igb_set_flag_queue_pairs(adapter, max_combined); /* Hardware has to reinitialize queues and interrupts to * match the new configuration. */ return igb_reinit_queues(adapter); } return 0; } static const struct ethtool_ops igb_ethtool_ops = { .get_settings = igb_get_settings, .set_settings = igb_set_settings, .get_drvinfo = igb_get_drvinfo, .get_regs_len = igb_get_regs_len, .get_regs = igb_get_regs, .get_wol = igb_get_wol, .set_wol = igb_set_wol, .get_msglevel = igb_get_msglevel, .set_msglevel = igb_set_msglevel, .nway_reset = igb_nway_reset, .get_link = igb_get_link, .get_eeprom_len = igb_get_eeprom_len, .get_eeprom = igb_get_eeprom, .set_eeprom = igb_set_eeprom, .get_ringparam = igb_get_ringparam, .set_ringparam = igb_set_ringparam, .get_pauseparam = igb_get_pauseparam, .set_pauseparam = igb_set_pauseparam, .self_test = igb_diag_test, .get_strings = igb_get_strings, .set_phys_id = igb_set_phys_id, .get_sset_count = igb_get_sset_count, .get_ethtool_stats = igb_get_ethtool_stats, .get_coalesce = igb_get_coalesce, .set_coalesce = igb_set_coalesce, .get_ts_info = igb_get_ts_info, .get_rxnfc = igb_get_rxnfc, .set_rxnfc = igb_set_rxnfc, .get_eee = igb_get_eee, .set_eee = igb_set_eee, .get_module_info = igb_get_module_info, .get_module_eeprom = igb_get_module_eeprom, .get_rxfh_indir_size = igb_get_rxfh_indir_size, .get_rxfh = igb_get_rxfh, .set_rxfh = igb_set_rxfh, .get_channels = igb_get_channels, .set_channels = igb_set_channels, .begin = igb_ethtool_begin, .complete = igb_ethtool_complete, }; void igb_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &igb_ethtool_ops; }
gpl-2.0
SlimRoms/kernel_nvidia_shieldtablet
kernel/exit.c
120
43840
/* * linux/kernel/exit.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/capability.h> #include <linux/completion.h> #include <linux/personality.h> #include <linux/tty.h> #include <linux/iocontext.h> #include <linux/key.h> #include <linux/security.h> #include <linux/cpu.h> #include <linux/acct.h> #include <linux/tsacct_kern.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/freezer.h> #include <linux/binfmts.h> #include <linux/nsproxy.h> #include <linux/pid_namespace.h> #include <linux/ptrace.h> #include <linux/profile.h> #include <linux/mount.h> #include <linux/proc_fs.h> #include <linux/kthread.h> #include <linux/mempolicy.h> #include <linux/taskstats_kern.h> #include <linux/delayacct.h> #include <linux/cgroup.h> #include <linux/syscalls.h> #include <linux/signal.h> #include <linux/posix-timers.h> #include <linux/cn_proc.h> #include <linux/mutex.h> #include <linux/futex.h> #include <linux/pipe_fs_i.h> #include <linux/audit.h> /* for audit_free() */ #include <linux/resource.h> #include <linux/blkdev.h> #include <linux/task_io_accounting_ops.h> #include <linux/tracehook.h> #include <linux/fs_struct.h> #include <linux/init_task.h> #include <linux/perf_event.h> #include <trace/events/sched.h> #include <linux/hw_breakpoint.h> #include <linux/oom.h> #include <linux/writeback.h> #include <linux/shm.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> static void exit_mm(struct task_struct * tsk); static void __unhash_process(struct task_struct *p, bool group_dead) { nr_threads--; detach_pid(p, PIDTYPE_PID); if (group_dead) { detach_pid(p, PIDTYPE_PGID); detach_pid(p, PIDTYPE_SID); list_del_rcu(&p->tasks); list_del_init(&p->sibling); __this_cpu_dec(process_counts); } list_del_rcu(&p->thread_group); } /* * This function expects the tasklist_lock write-locked. */ static void __exit_signal(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; bool group_dead = thread_group_leader(tsk); struct sighand_struct *sighand; struct tty_struct *uninitialized_var(tty); cputime_t utime, stime; sighand = rcu_dereference_check(tsk->sighand, lockdep_tasklist_lock_is_held()); spin_lock(&sighand->siglock); posix_cpu_timers_exit(tsk); if (group_dead) { posix_cpu_timers_exit_group(tsk); tty = sig->tty; sig->tty = NULL; } else { /* * This can only happen if the caller is de_thread(). * FIXME: this is the temporary hack, we should teach * posix-cpu-timers to handle this case correctly. */ if (unlikely(has_group_leader_pid(tsk))) posix_cpu_timers_exit_group(tsk); /* * If there is any task waiting for the group exit * then notify it: */ if (sig->notify_count > 0 && !--sig->notify_count) wake_up_process(sig->group_exit_task); if (tsk == sig->curr_target) sig->curr_target = next_thread(tsk); /* * Accumulate here the counters for all threads but the * group leader as they die, so they can be added into * the process-wide totals when those are taken. * The group leader stays around as a zombie as long * as there are other threads. When it gets reaped, * the exit.c code will add its counts into these totals. * We won't ever get here for the group leader, since it * will have been the last reference on the signal_struct. */ task_cputime(tsk, &utime, &stime); sig->utime += utime; sig->stime += stime; sig->gtime += task_gtime(tsk); sig->min_flt += tsk->min_flt; sig->maj_flt += tsk->maj_flt; sig->nvcsw += tsk->nvcsw; sig->nivcsw += tsk->nivcsw; sig->inblock += task_io_get_inblock(tsk); sig->oublock += task_io_get_oublock(tsk); task_io_accounting_add(&sig->ioac, &tsk->ioac); sig->sum_sched_runtime += tsk->se.sum_exec_runtime; } sig->nr_threads--; __unhash_process(tsk, group_dead); /* * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ flush_sigqueue(&tsk->pending); tsk->sighand = NULL; spin_unlock(&sighand->siglock); __cleanup_sighand(sighand); clear_tsk_thread_flag(tsk,TIF_SIGPENDING); if (group_dead) { flush_sigqueue(&sig->shared_pending); tty_kref_put(tty); } } static void delayed_put_task_struct(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); perf_event_delayed_put(tsk); trace_sched_process_free(tsk); put_task_struct(tsk); } void release_task(struct task_struct * p) { struct task_struct *leader; int zap_leader; repeat: /* don't need to get the RCU readlock here - the process is dead and * can't be modifying its own credentials. But shut RCU-lockdep up */ rcu_read_lock(); atomic_dec(&__task_cred(p)->user->processes); rcu_read_unlock(); proc_flush_task(p); write_lock_irq(&tasklist_lock); ptrace_release_task(p); __exit_signal(p); /* * If we are the last non-leader member of the thread * group, and the leader is zombie, then notify the * group leader's parent process. (if it wants notification.) */ zap_leader = 0; leader = p->group_leader; if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { /* * If we were the last child thread and the leader has * exited already, and the leader's parent ignores SIGCHLD, * then we are the one who should release the leader. */ zap_leader = do_notify_parent(leader, leader->exit_signal); if (zap_leader) leader->exit_state = EXIT_DEAD; } write_unlock_irq(&tasklist_lock); release_thread(p); call_rcu(&p->rcu, delayed_put_task_struct); p = leader; if (unlikely(zap_leader)) goto repeat; } /* * This checks not only the pgrp, but falls back on the pid if no * satisfactory pgrp is found. I dunno - gdb doesn't work correctly * without this... * * The caller must hold rcu lock or the tasklist lock. */ struct pid *session_of_pgrp(struct pid *pgrp) { struct task_struct *p; struct pid *sid = NULL; p = pid_task(pgrp, PIDTYPE_PGID); if (p == NULL) p = pid_task(pgrp, PIDTYPE_PID); if (p != NULL) sid = task_session(p); return sid; } /* * Determine if a process group is "orphaned", according to the POSIX * definition in 2.2.2.52. Orphaned process groups are not to be affected * by terminal-generated stop signals. Newly orphaned process groups are * to receive a SIGHUP and a SIGCONT. * * "I ask you, have you ever known what it is to be an orphan?" */ static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) { struct task_struct *p; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { if ((p == ignored_task) || (p->exit_state && thread_group_empty(p)) || is_global_init(p->real_parent)) continue; if (task_pgrp(p->real_parent) != pgrp && task_session(p->real_parent) == task_session(p)) return 0; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return 1; } int is_current_pgrp_orphaned(void) { int retval; read_lock(&tasklist_lock); retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); read_unlock(&tasklist_lock); return retval; } static bool has_stopped_jobs(struct pid *pgrp) { struct task_struct *p; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { if (p->signal->flags & SIGNAL_STOP_STOPPED) return true; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return false; } /* * Check to see if any process groups have become orphaned as * a result of our exiting, and if they have any stopped jobs, * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ static void kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) { struct pid *pgrp = task_pgrp(tsk); struct task_struct *ignored_task = tsk; if (!parent) /* exit: our father is in a different pgrp than * we are and we were the only connection outside. */ parent = tsk->real_parent; else /* reparent: our child is in a different pgrp than * we are, and it was the only connection outside. */ ignored_task = NULL; if (task_pgrp(parent) != pgrp && task_session(parent) == task_session(tsk) && will_become_orphaned_pgrp(pgrp, ignored_task) && has_stopped_jobs(pgrp)) { __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); } } void __set_special_pids(struct pid *pid) { struct task_struct *curr = current->group_leader; if (task_session(curr) != pid) change_pid(curr, PIDTYPE_SID, pid); if (task_pgrp(curr) != pid) change_pid(curr, PIDTYPE_PGID, pid); } /* * Let kernel threads use this to say that they allow a certain signal. * Must not be used if kthread was cloned with CLONE_SIGHAND. */ int allow_signal(int sig) { if (!valid_signal(sig) || sig < 1) return -EINVAL; spin_lock_irq(&current->sighand->siglock); /* This is only needed for daemonize()'ed kthreads */ sigdelset(&current->blocked, sig); /* * Kernel threads handle their own signals. Let the signal code * know it'll be handled, so that they don't get converted to * SIGKILL or just silently dropped. */ current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return 0; } EXPORT_SYMBOL(allow_signal); int disallow_signal(int sig) { if (!valid_signal(sig) || sig < 1) return -EINVAL; spin_lock_irq(&current->sighand->siglock); current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return 0; } EXPORT_SYMBOL(disallow_signal); #ifdef CONFIG_MM_OWNER /* * A task is exiting. If it owned this mm, find a new owner for the mm. */ void mm_update_next_owner(struct mm_struct *mm) { struct task_struct *c, *g, *p = current; retry: /* * If the exiting or execing task is not the owner, it's * someone else's problem. */ if (mm->owner != p) return; /* * The current owner is exiting/execing and there are no other * candidates. Do not leave the mm pointing to a possibly * freed task structure. */ if (atomic_read(&mm->mm_users) <= 1) { mm->owner = NULL; return; } read_lock(&tasklist_lock); /* * Search in the children */ list_for_each_entry(c, &p->children, sibling) { if (c->mm == mm) goto assign_new_owner; } /* * Search in the siblings */ list_for_each_entry(c, &p->real_parent->children, sibling) { if (c->mm == mm) goto assign_new_owner; } /* * Search through everything else. We should not get * here often */ do_each_thread(g, c) { if (c->mm == mm) goto assign_new_owner; } while_each_thread(g, c); read_unlock(&tasklist_lock); /* * We found no owner yet mm_users > 1: this implies that we are * most likely racing with swapoff (try_to_unuse()) or /proc or * ptrace or page migration (get_task_mm()). Mark owner as NULL. */ mm->owner = NULL; return; assign_new_owner: BUG_ON(c == p); get_task_struct(c); /* * The task_lock protects c->mm from changing. * We always want mm->owner->mm == mm */ task_lock(c); /* * Delay read_unlock() till we have the task_lock() * to ensure that c does not slip away underneath us */ read_unlock(&tasklist_lock); if (c->mm != mm) { task_unlock(c); put_task_struct(c); goto retry; } mm->owner = c; task_unlock(c); put_task_struct(c); } #endif /* CONFIG_MM_OWNER */ /* * Turn us into a lazy TLB process if we * aren't already.. */ static void exit_mm(struct task_struct * tsk) { struct mm_struct *mm = tsk->mm; struct core_state *core_state; mm_release(tsk, mm); if (!mm) return; sync_mm_rss(mm); /* * Serialize with any possible pending coredump. * We must hold mmap_sem around checking core_state * and clearing tsk->mm. The core-inducing thread * will increment ->nr_threads for each thread in the * group with ->mm != NULL. */ down_read(&mm->mmap_sem); core_state = mm->core_state; if (core_state) { struct core_thread self; up_read(&mm->mmap_sem); self.task = tsk; self.next = xchg(&core_state->dumper.next, &self); /* * Implies mb(), the result of xchg() must be visible * to core_state->dumper. */ if (atomic_dec_and_test(&core_state->nr_threads)) complete(&core_state->startup); for (;;) { set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (!self.task) /* see coredump_finish() */ break; freezable_schedule(); } __set_task_state(tsk, TASK_RUNNING); down_read(&mm->mmap_sem); } atomic_inc(&mm->mm_count); BUG_ON(mm != tsk->active_mm); /* more a memory barrier than a real lock */ task_lock(tsk); tsk->mm = NULL; up_read(&mm->mmap_sem); enter_lazy_tlb(mm, current); task_unlock(tsk); mm_update_next_owner(mm); mmput(mm); } /* * When we die, we re-parent all our children, and try to: * 1. give them to another thread in our thread group, if such a member exists * 2. give it to the first ancestor process which prctl'd itself as a * child_subreaper for its children (like a service manager) * 3. give it to the init process (PID 1) in our pid namespace */ static struct task_struct *find_new_reaper(struct task_struct *father) __releases(&tasklist_lock) __acquires(&tasklist_lock) { struct pid_namespace *pid_ns = task_active_pid_ns(father); struct task_struct *thread; thread = father; while_each_thread(father, thread) { if (thread->flags & PF_EXITING) continue; if (unlikely(pid_ns->child_reaper == father)) pid_ns->child_reaper = thread; return thread; } if (unlikely(pid_ns->child_reaper == father)) { write_unlock_irq(&tasklist_lock); if (unlikely(pid_ns == &init_pid_ns)) { panic("Attempted to kill init! exitcode=0x%08x\n", father->signal->group_exit_code ?: father->exit_code); } zap_pid_ns_processes(pid_ns); write_lock_irq(&tasklist_lock); } else if (father->signal->has_child_subreaper) { struct task_struct *reaper; /* * Find the first ancestor marked as child_subreaper. * Note that the code below checks same_thread_group(reaper, * pid_ns->child_reaper). This is what we need to DTRT in a * PID namespace. However we still need the check above, see * http://marc.info/?l=linux-kernel&m=131385460420380 */ for (reaper = father->real_parent; reaper != &init_task; reaper = reaper->real_parent) { if (same_thread_group(reaper, pid_ns->child_reaper)) break; if (!reaper->signal->is_child_subreaper) continue; thread = reaper; do { if (!(thread->flags & PF_EXITING)) return reaper; } while_each_thread(reaper, thread); } } return pid_ns->child_reaper; } /* * Any that need to be release_task'd are put on the @dead list. */ static void reparent_leader(struct task_struct *father, struct task_struct *p, struct list_head *dead) { list_move_tail(&p->sibling, &p->real_parent->children); if (p->exit_state == EXIT_DEAD) return; /* * If this is a threaded reparent there is no need to * notify anyone anything has happened. */ if (same_thread_group(p->real_parent, father)) return; /* We don't want people slaying init. */ p->exit_signal = SIGCHLD; /* If it has exited notify the new parent about this child's death. */ if (!p->ptrace && p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { if (do_notify_parent(p, p->exit_signal)) { p->exit_state = EXIT_DEAD; list_move_tail(&p->sibling, dead); } } kill_orphaned_pgrp(p, father); } static void forget_original_parent(struct task_struct *father) { struct task_struct *p, *n, *reaper; LIST_HEAD(dead_children); write_lock_irq(&tasklist_lock); /* * Note that exit_ptrace() and find_new_reaper() might * drop tasklist_lock and reacquire it. */ exit_ptrace(father); reaper = find_new_reaper(father); list_for_each_entry_safe(p, n, &father->children, sibling) { struct task_struct *t = p; do { t->real_parent = reaper; if (t->parent == father) { BUG_ON(t->ptrace); t->parent = t->real_parent; } if (t->pdeath_signal) group_send_sig_info(t->pdeath_signal, SEND_SIG_NOINFO, t); } while_each_thread(p, t); reparent_leader(father, p, &dead_children); } write_unlock_irq(&tasklist_lock); BUG_ON(!list_empty(&father->children)); list_for_each_entry_safe(p, n, &dead_children, sibling) { list_del_init(&p->sibling); release_task(p); } } /* * Send signals to all our closest relatives so that they know * to properly mourn us.. */ static void exit_notify(struct task_struct *tsk, int group_dead) { bool autoreap; /* * This does two things: * * A. Make init inherit all the child processes * B. Check to see if any process groups have become orphaned * as a result of our exiting, and if they have any stopped * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ forget_original_parent(tsk); write_lock_irq(&tasklist_lock); if (group_dead) kill_orphaned_pgrp(tsk->group_leader, NULL); if (unlikely(tsk->ptrace)) { int sig = thread_group_leader(tsk) && thread_group_empty(tsk) && !ptrace_reparented(tsk) ? tsk->exit_signal : SIGCHLD; autoreap = do_notify_parent(tsk, sig); } else if (thread_group_leader(tsk)) { autoreap = thread_group_empty(tsk) && do_notify_parent(tsk, tsk->exit_signal); } else { autoreap = true; } tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE; /* mt-exec, de_thread() is waiting for group leader */ if (unlikely(tsk->signal->notify_count < 0)) wake_up_process(tsk->signal->group_exit_task); write_unlock_irq(&tasklist_lock); /* If the process is dead, release it - nobody will wait for it */ if (autoreap) release_task(tsk); } #ifdef CONFIG_DEBUG_STACK_USAGE static void check_stack_usage(void) { static DEFINE_SPINLOCK(low_water_lock); static int lowest_to_date = THREAD_SIZE; unsigned long free; free = stack_not_used(current); if (free >= lowest_to_date) return; spin_lock(&low_water_lock); if (free < lowest_to_date) { printk(KERN_WARNING "%s (%d) used greatest stack depth: " "%lu bytes left\n", current->comm, task_pid_nr(current), free); lowest_to_date = free; } spin_unlock(&low_water_lock); } #else static inline void check_stack_usage(void) {} #endif void do_exit(long code) { struct task_struct *tsk = current; int group_dead; profile_task_exit(tsk); WARN_ON(blk_needs_flush_plug(tsk)); if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); /* * If do_exit is called because this processes oopsed, it's possible * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before * continuing. Amongst other possible reasons, this is to prevent * mm_release()->clear_child_tid() from writing to a user-controlled * kernel address. */ set_fs(USER_DS); ptrace_event(PTRACE_EVENT_EXIT, code); validate_creds_for_do_exit(tsk); /* * We're taking recursive faults here in do_exit. Safest is to just * leave this task alone and wait for reboot. */ if (unlikely(tsk->flags & PF_EXITING)) { printk(KERN_ALERT "Fixing recursive fault but reboot is needed!\n"); /* * We can do this unlocked here. The futex code uses * this flag just to verify whether the pi state * cleanup has been done or not. In the worst case it * loops once more. We pretend that the cleanup was * done as there is no way to return. Either the * OWNER_DIED bit is set by now or we push the blocked * task into the wait for ever nirwana as well. */ tsk->flags |= PF_EXITPIDONE; set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } exit_signals(tsk); /* sets PF_EXITING */ /* * tsk->flags are checked in the futex code to protect against * an exiting task cleaning up the robust pi futexes. */ smp_mb(); raw_spin_unlock_wait(&tsk->pi_lock); if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", current->comm, task_pid_nr(current), preempt_count()); acct_update_integrals(tsk); /* sync mm's RSS info before statistics gathering */ if (tsk->mm) sync_mm_rss(tsk->mm); group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { hrtimer_cancel(&tsk->signal->real_timer); exit_itimers(tsk->signal); if (tsk->mm) setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); } acct_collect(code, group_dead); if (group_dead) tty_audit_exit(); audit_free(tsk); tsk->exit_code = code; taskstats_exit(tsk, group_dead); exit_mm(tsk); if (group_dead) acct_process(); trace_sched_process_exit(tsk); exit_sem(tsk); exit_shm(tsk); exit_files(tsk); exit_fs(tsk); exit_task_namespaces(tsk); exit_task_work(tsk); check_stack_usage(); exit_thread(); /* * Flush inherited counters to the parent - before the parent * gets woken up by child-exit notifications. * * because of cgroup mode, must be called before cgroup_exit() */ perf_event_exit_task(tsk); cgroup_exit(tsk, 1); if (group_dead) disassociate_ctty(1); module_put(task_thread_info(tsk)->exec_domain->module); proc_exit_connector(tsk); /* * FIXME: do that only when needed, using sched_exit tracepoint */ ptrace_put_breakpoints(tsk); exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA task_lock(tsk); mpol_put(tsk->mempolicy); tsk->mempolicy = NULL; task_unlock(tsk); #endif #ifdef CONFIG_FUTEX if (unlikely(current->pi_state_cache)) kfree(current->pi_state_cache); #endif /* * Make sure we are holding no locks: */ debug_check_no_locks_held(); /* * We can do this unlocked here. The futex code uses this flag * just to verify whether the pi state cleanup has been done * or not. In the worst case it loops once more. */ tsk->flags |= PF_EXITPIDONE; if (tsk->io_context) exit_io_context(tsk); if (tsk->splice_pipe) free_pipe_info(tsk->splice_pipe); if (tsk->task_frag.page) put_page(tsk->task_frag.page); validate_creds_for_do_exit(tsk); preempt_disable(); if (tsk->nr_dirtied) __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); exit_rcu(); /* * The setting of TASK_RUNNING by try_to_wake_up() may be delayed * when the following two conditions become true. * - There is race condition of mmap_sem (It is acquired by * exit_mm()), and * - SMI occurs before setting TASK_RUNINNG. * (or hypervisor of virtual machine switches to other guest) * As a result, we may become TASK_RUNNING after becoming TASK_DEAD * * To avoid it, we have to wait for releasing tsk->pi_lock which * is held by try_to_wake_up() */ smp_mb(); raw_spin_unlock_wait(&tsk->pi_lock); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ schedule(); BUG(); /* Avoid "noreturn function does return". */ for (;;) cpu_relax(); /* For when BUG is null */ } EXPORT_SYMBOL_GPL(do_exit); void complete_and_exit(struct completion *comp, long code) { if (comp) complete(comp); do_exit(code); } EXPORT_SYMBOL(complete_and_exit); SYSCALL_DEFINE1(exit, int, error_code) { do_exit((error_code&0xff)<<8); } /* * Take down every thread in the group. This is called by fatal signals * as well as by sys_exit_group (below). */ void do_group_exit(int exit_code) { struct signal_struct *sig = current->signal; BUG_ON(exit_code & 0x80); /* core dumps don't get here */ if (signal_group_exit(sig)) exit_code = sig->group_exit_code; else if (!thread_group_empty(current)) { struct sighand_struct *const sighand = current->sighand; spin_lock_irq(&sighand->siglock); if (signal_group_exit(sig)) /* Another thread got here before we took the lock. */ exit_code = sig->group_exit_code; else { sig->group_exit_code = exit_code; sig->flags = SIGNAL_GROUP_EXIT; zap_other_threads(current); } spin_unlock_irq(&sighand->siglock); } do_exit(exit_code); /* NOTREACHED */ } /* * this kills every thread in the thread group. Note that any externally * wait4()-ing process will get the correct exit code - even if this * thread is not the thread group leader. */ SYSCALL_DEFINE1(exit_group, int, error_code) { do_group_exit((error_code & 0xff) << 8); /* NOTREACHED */ return 0; } struct wait_opts { enum pid_type wo_type; int wo_flags; struct pid *wo_pid; struct siginfo __user *wo_info; int __user *wo_stat; struct rusage __user *wo_rusage; wait_queue_t child_wait; int notask_error; }; static inline struct pid *task_pid_type(struct task_struct *task, enum pid_type type) { if (type != PIDTYPE_PID) task = task->group_leader; return task->pids[type].pid; } static int eligible_pid(struct wait_opts *wo, struct task_struct *p) { return wo->wo_type == PIDTYPE_MAX || task_pid_type(p, wo->wo_type) == wo->wo_pid; } static int eligible_child(struct wait_opts *wo, struct task_struct *p) { if (!eligible_pid(wo, p)) return 0; /* Wait for all children (clone and not) if __WALL is set; * otherwise, wait for clone children *only* if __WCLONE is * set; otherwise, wait for non-clone children *only*. (Note: * A "clone" child here is one that reports to its parent * using a signal other than SIGCHLD.) */ if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) && !(wo->wo_flags & __WALL)) return 0; return 1; } static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p, pid_t pid, uid_t uid, int why, int status) { struct siginfo __user *infop; int retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; put_task_struct(p); infop = wo->wo_info; if (infop) { if (!retval) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval) retval = put_user(0, &infop->si_errno); if (!retval) retval = put_user((short)why, &infop->si_code); if (!retval) retval = put_user(pid, &infop->si_pid); if (!retval) retval = put_user(uid, &infop->si_uid); if (!retval) retval = put_user(status, &infop->si_status); } if (!retval) retval = pid; return retval; } /* * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) { unsigned long state; int retval, status, traced; pid_t pid = task_pid_vnr(p); uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); struct siginfo __user *infop; if (!likely(wo->wo_flags & WEXITED)) return 0; if (unlikely(wo->wo_flags & WNOWAIT)) { int exit_code = p->exit_code; int why; get_task_struct(p); read_unlock(&tasklist_lock); if ((exit_code & 0x7f) == 0) { why = CLD_EXITED; status = exit_code >> 8; } else { why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; status = exit_code & 0x7f; } return wait_noreap_copyout(wo, p, pid, uid, why, status); } /* * Try to move the task's state to DEAD * only one thread is allowed to do this: */ state = xchg(&p->exit_state, EXIT_DEAD); if (state != EXIT_ZOMBIE) { BUG_ON(state != EXIT_DEAD); return 0; } traced = ptrace_reparented(p); /* * It can be ptraced but not reparented, check * thread_group_leader() to filter out sub-threads. */ if (likely(!traced) && thread_group_leader(p)) { struct signal_struct *psig; struct signal_struct *sig; unsigned long maxrss; cputime_t tgutime, tgstime; /* * The resource counters for the group leader are in its * own task_struct. Those for dead threads in the group * are in its signal_struct, as are those for the child * processes it has previously reaped. All these * accumulate in the parent's signal_struct c* fields. * * We don't bother to take a lock here to protect these * p->signal fields, because they are only touched by * __exit_signal, which runs with tasklist_lock * write-locked anyway, and so is excluded here. We do * need to protect the access to parent->signal fields, * as other threads in the parent group can be right * here reaping other children at the same time. * * We use thread_group_cputime_adjusted() to get times for the thread * group, which consolidates times for all threads in the * group including the group leader. */ thread_group_cputime_adjusted(p, &tgutime, &tgstime); spin_lock_irq(&p->real_parent->sighand->siglock); psig = p->real_parent->signal; sig = p->signal; psig->cutime += tgutime + sig->cutime; psig->cstime += tgstime + sig->cstime; psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; psig->cmin_flt += p->min_flt + sig->min_flt + sig->cmin_flt; psig->cmaj_flt += p->maj_flt + sig->maj_flt + sig->cmaj_flt; psig->cnvcsw += p->nvcsw + sig->nvcsw + sig->cnvcsw; psig->cnivcsw += p->nivcsw + sig->nivcsw + sig->cnivcsw; psig->cinblock += task_io_get_inblock(p) + sig->inblock + sig->cinblock; psig->coublock += task_io_get_oublock(p) + sig->oublock + sig->coublock; maxrss = max(sig->maxrss, sig->cmaxrss); if (psig->cmaxrss < maxrss) psig->cmaxrss = maxrss; task_io_accounting_add(&psig->ioac, &p->ioac); task_io_accounting_add(&psig->ioac, &sig->ioac); spin_unlock_irq(&p->real_parent->sighand->siglock); } /* * Now we are sure this task is interesting, and no other * thread can reap it because we set its state to EXIT_DEAD. */ read_unlock(&tasklist_lock); retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; status = (p->signal->flags & SIGNAL_GROUP_EXIT) ? p->signal->group_exit_code : p->exit_code; if (!retval && wo->wo_stat) retval = put_user(status, wo->wo_stat); infop = wo->wo_info; if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) { int why; if ((status & 0x7f) == 0) { why = CLD_EXITED; status >>= 8; } else { why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; status &= 0x7f; } retval = put_user((short)why, &infop->si_code); if (!retval) retval = put_user(status, &infop->si_status); } if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) retval = put_user(uid, &infop->si_uid); if (!retval) retval = pid; if (traced) { write_lock_irq(&tasklist_lock); /* We dropped tasklist, ptracer could die and untrace */ ptrace_unlink(p); /* * If this is not a sub-thread, notify the parent. * If parent wants a zombie, don't release it now. */ if (thread_group_leader(p) && !do_notify_parent(p, p->exit_signal)) { p->exit_state = EXIT_ZOMBIE; p = NULL; } write_unlock_irq(&tasklist_lock); } if (p != NULL) release_task(p); return retval; } static int *task_stopped_code(struct task_struct *p, bool ptrace) { if (ptrace) { if (task_is_stopped_or_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) return &p->exit_code; } else { if (p->signal->flags & SIGNAL_STOP_STOPPED) return &p->signal->group_exit_code; } return NULL; } /** * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED * @wo: wait options * @ptrace: is the wait for ptrace * @p: task to wait for * * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. * * CONTEXT: * read_lock(&tasklist_lock), which is released if return value is * non-zero. Also, grabs and releases @p->sighand->siglock. * * RETURNS: * 0 if wait condition didn't exist and search for other wait conditions * should continue. Non-zero return, -errno on failure and @p's pid on * success, implies that tasklist_lock is released and wait condition * search should terminate. */ static int wait_task_stopped(struct wait_opts *wo, int ptrace, struct task_struct *p) { struct siginfo __user *infop; int retval, exit_code, *p_code, why; uid_t uid = 0; /* unneeded, required by compiler */ pid_t pid; /* * Traditionally we see ptrace'd stopped tasks regardless of options. */ if (!ptrace && !(wo->wo_flags & WUNTRACED)) return 0; if (!task_stopped_code(p, ptrace)) return 0; exit_code = 0; spin_lock_irq(&p->sighand->siglock); p_code = task_stopped_code(p, ptrace); if (unlikely(!p_code)) goto unlock_sig; exit_code = *p_code; if (!exit_code) goto unlock_sig; if (!unlikely(wo->wo_flags & WNOWAIT)) *p_code = 0; uid = from_kuid_munged(current_user_ns(), task_uid(p)); unlock_sig: spin_unlock_irq(&p->sighand->siglock); if (!exit_code) return 0; /* * Now we are pretty sure this task is interesting. * Make sure it doesn't get reaped out from under us while we * give up the lock and then examine it below. We don't want to * keep holding onto the tasklist_lock while we call getrusage and * possibly take page faults for user memory. */ get_task_struct(p); pid = task_pid_vnr(p); why = ptrace ? CLD_TRAPPED : CLD_STOPPED; read_unlock(&tasklist_lock); if (unlikely(wo->wo_flags & WNOWAIT)) return wait_noreap_copyout(wo, p, pid, uid, why, exit_code); retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; if (!retval && wo->wo_stat) retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat); infop = wo->wo_info; if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) retval = put_user((short)why, &infop->si_code); if (!retval && infop) retval = put_user(exit_code, &infop->si_status); if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) retval = put_user(uid, &infop->si_uid); if (!retval) retval = pid; put_task_struct(p); BUG_ON(!retval); return retval; } /* * Handle do_wait work for one task in a live, non-stopped state. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) { int retval; pid_t pid; uid_t uid; if (!unlikely(wo->wo_flags & WCONTINUED)) return 0; if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) return 0; spin_lock_irq(&p->sighand->siglock); /* Re-check with the lock held. */ if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { spin_unlock_irq(&p->sighand->siglock); return 0; } if (!unlikely(wo->wo_flags & WNOWAIT)) p->signal->flags &= ~SIGNAL_STOP_CONTINUED; uid = from_kuid_munged(current_user_ns(), task_uid(p)); spin_unlock_irq(&p->sighand->siglock); pid = task_pid_vnr(p); get_task_struct(p); read_unlock(&tasklist_lock); if (!wo->wo_info) { retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; put_task_struct(p); if (!retval && wo->wo_stat) retval = put_user(0xffff, wo->wo_stat); if (!retval) retval = pid; } else { retval = wait_noreap_copyout(wo, p, pid, uid, CLD_CONTINUED, SIGCONT); BUG_ON(retval == 0); } return retval; } /* * Consider @p for a wait by @parent. * * -ECHILD should be in ->notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; * then ->notask_error is 0 if @p is an eligible child, * or another error from security_task_wait(), or still -ECHILD. */ static int wait_consider_task(struct wait_opts *wo, int ptrace, struct task_struct *p) { int ret = eligible_child(wo, p); if (!ret) return ret; ret = security_task_wait(p); if (unlikely(ret < 0)) { /* * If we have not yet seen any eligible child, * then let this error code replace -ECHILD. * A permission error will give the user a clue * to look for security policy problems, rather * than for mysterious wait bugs. */ if (wo->notask_error) wo->notask_error = ret; return 0; } /* dead body doesn't have much to contribute */ if (unlikely(p->exit_state == EXIT_DEAD)) { /* * But do not ignore this task until the tracer does * wait_task_zombie()->do_notify_parent(). */ if (likely(!ptrace) && unlikely(ptrace_reparented(p))) wo->notask_error = 0; return 0; } /* slay zombie? */ if (p->exit_state == EXIT_ZOMBIE) { /* * A zombie ptracee is only visible to its ptracer. * Notification and reaping will be cascaded to the real * parent when the ptracer detaches. */ if (likely(!ptrace) && unlikely(p->ptrace)) { /* it will become visible, clear notask_error */ wo->notask_error = 0; return 0; } /* we don't reap group leaders with subthreads */ if (!delay_group_leader(p)) return wait_task_zombie(wo, p); /* * Allow access to stopped/continued state via zombie by * falling through. Clearing of notask_error is complex. * * When !@ptrace: * * If WEXITED is set, notask_error should naturally be * cleared. If not, subset of WSTOPPED|WCONTINUED is set, * so, if there are live subthreads, there are events to * wait for. If all subthreads are dead, it's still safe * to clear - this function will be called again in finite * amount time once all the subthreads are released and * will then return without clearing. * * When @ptrace: * * Stopped state is per-task and thus can't change once the * target task dies. Only continued and exited can happen. * Clear notask_error if WCONTINUED | WEXITED. */ if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) wo->notask_error = 0; } else { /* * If @p is ptraced by a task in its real parent's group, * hide group stop/continued state when looking at @p as * the real parent; otherwise, a single stop can be * reported twice as group and ptrace stops. * * If a ptracer wants to distinguish the two events for its * own children, it should create a separate process which * takes the role of real parent. */ if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p)) return 0; /* * @p is alive and it's gonna stop, continue or exit, so * there always is something to wait for. */ wo->notask_error = 0; } /* * Wait for stopped. Depending on @ptrace, different stopped state * is used and the two don't interact with each other. */ ret = wait_task_stopped(wo, ptrace, p); if (ret) return ret; /* * Wait for continued. There's only one continued state and the * ptracer can consume it which can confuse the real parent. Don't * use WCONTINUED from ptracer. You don't need or want it. */ return wait_task_continued(wo, p); } /* * Do the work of do_wait() for one thread in the group, @tsk. * * -ECHILD should be in ->notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; then * ->notask_error is 0 if there were any eligible children, * or another error from security_task_wait(), or still -ECHILD. */ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) { struct task_struct *p; list_for_each_entry(p, &tsk->children, sibling) { int ret = wait_consider_task(wo, 0, p); if (ret) return ret; } return 0; } static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) { struct task_struct *p; list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { int ret = wait_consider_task(wo, 1, p); if (ret) return ret; } return 0; } static int child_wait_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait); struct task_struct *p = key; if (!eligible_pid(wo, p)) return 0; if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) return 0; return default_wake_function(wait, mode, sync, key); } void __wake_up_parent(struct task_struct *p, struct task_struct *parent) { __wake_up_sync_key(&parent->signal->wait_chldexit, TASK_INTERRUPTIBLE, 1, p); } static long do_wait(struct wait_opts *wo) { struct task_struct *tsk; int retval; trace_sched_process_wait(wo->wo_pid); init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); wo->child_wait.private = current; add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait); repeat: /* * If there is nothing that can match our critiera just get out. * We will clear ->notask_error to zero if we see any child that * might later match our criteria, even if we are not able to reap * it yet. */ wo->notask_error = -ECHILD; if ((wo->wo_type < PIDTYPE_MAX) && (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) goto notask; set_current_state(TASK_INTERRUPTIBLE); read_lock(&tasklist_lock); tsk = current; do { retval = do_wait_thread(wo, tsk); if (retval) goto end; retval = ptrace_do_wait(wo, tsk); if (retval) goto end; if (wo->wo_flags & __WNOTHREAD) break; } while_each_thread(current, tsk); read_unlock(&tasklist_lock); notask: retval = wo->notask_error; if (!retval && !(wo->wo_flags & WNOHANG)) { retval = -ERESTARTSYS; if (!signal_pending(current)) { schedule(); goto repeat; } } end: __set_current_state(TASK_RUNNING); remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait); return retval; } SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, infop, int, options, struct rusage __user *, ru) { struct wait_opts wo; struct pid *pid = NULL; enum pid_type type; long ret; if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) return -EINVAL; if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) return -EINVAL; switch (which) { case P_ALL: type = PIDTYPE_MAX; break; case P_PID: type = PIDTYPE_PID; if (upid <= 0) return -EINVAL; break; case P_PGID: type = PIDTYPE_PGID; if (upid <= 0) return -EINVAL; break; default: return -EINVAL; } if (type < PIDTYPE_MAX) pid = find_get_pid(upid); wo.wo_type = type; wo.wo_pid = pid; wo.wo_flags = options; wo.wo_info = infop; wo.wo_stat = NULL; wo.wo_rusage = ru; ret = do_wait(&wo); if (ret > 0) { ret = 0; } else if (infop) { /* * For a WNOHANG return, clear out all the fields * we would set so the user can easily tell the * difference. */ if (!ret) ret = put_user(0, &infop->si_signo); if (!ret) ret = put_user(0, &infop->si_errno); if (!ret) ret = put_user(0, &infop->si_code); if (!ret) ret = put_user(0, &infop->si_pid); if (!ret) ret = put_user(0, &infop->si_uid); if (!ret) ret = put_user(0, &infop->si_status); } put_pid(pid); return ret; } SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, int, options, struct rusage __user *, ru) { struct wait_opts wo; struct pid *pid = NULL; enum pid_type type; long ret; if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| __WNOTHREAD|__WCLONE|__WALL)) return -EINVAL; if (upid == -1) type = PIDTYPE_MAX; else if (upid < 0) { type = PIDTYPE_PGID; pid = find_get_pid(-upid); } else if (upid == 0) { type = PIDTYPE_PGID; pid = get_task_pid(current, PIDTYPE_PGID); } else /* upid > 0 */ { type = PIDTYPE_PID; pid = find_get_pid(upid); } wo.wo_type = type; wo.wo_pid = pid; wo.wo_flags = options | WEXITED; wo.wo_info = NULL; wo.wo_stat = stat_addr; wo.wo_rusage = ru; ret = do_wait(&wo); put_pid(pid); return ret; } #ifdef __ARCH_WANT_SYS_WAITPID /* * sys_waitpid() remains for compatibility. waitpid() should be * implemented by calling sys_wait4() from libc.a. */ SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) { return sys_wait4(pid, stat_addr, options, NULL); } #endif
gpl-2.0
knirps99/ffmz
target/linux/ubicom32/files/arch/ubicom32/mach-common/usb_tio.c
120
8282
/* * arch/ubicom32/mach-common/usb_tio.c * Linux side Ubicom USB TIO driver * * (C) Copyright 2009, Ubicom, Inc. * * This file is part of the Ubicom32 Linux Kernel Port. * * The Ubicom32 Linux Kernel Port is free software: you can redistribute * it and/or modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, either version 2 of the * License, or (at your option) any later version. * * The Ubicom32 Linux Kernel Port is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the Ubicom32 Linux Kernel Port. If not, * see <http://www.gnu.org/licenses/>. * * Ubicom32 implementation derived from (with many thanks): * arch/m68knommu * arch/blackfin * arch/parisc */ #include <linux/module.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <asm/devtree.h> #include "usb_tio.h" #ifdef CONFIG_SMP static DEFINE_SPINLOCK(tio_lock); #define USB_TIO_LOCK(lock, flag) spin_lock_irqsave(lock, flag) #define USB_TIO_UNLOCK(lock, flag) spin_unlock_irqrestore(lock, flag) #define USB_TIO_LOCK_ISLOCKED(lock) spin_try_lock(lock) #else #define USB_TIO_LOCK(lock, flag) local_irq_save(flag) #define USB_TIO_UNLOCK(lock, flag) local_irq_restore(flag) #endif spinlock_t usb_tio_lock; /* * usb_tio_set_hrt_interrupt() */ static inline void usb_tio_set_hrt_interrupt(void) { ubicom32_set_interrupt(usb_node->dn.sendirq); } static inline void usb_tio_wait_hrt(void) { while (unlikely(usb_node->pdesc)); } #if defined(USB_TIO_DEBUG) static void usb_tio_request_verify_magic(volatile struct usb_tio_request *req) { BUG_ON(req->magic != USB_TIO_REQUEST_MAGIC2); } static void usb_tio_request_clear_magic(volatile struct usb_tio_request *req) { req->magic = 0; } #endif static void usb_tio_request_set_magic(volatile struct usb_tio_request *req) { req->magic = USB_TIO_REQUEST_MAGIC1; } /* * usb_tio_commit_request() */ static inline void usb_tio_commit_request(volatile struct usb_tio_request *request) { wmb(); usb_node->pdesc = request; /* * next thing to do is alway checking if (usb_node->pdesc == NULL) * to see if the request is done, so add a mb() here */ mb(); usb_tio_set_hrt_interrupt(); } /* * usb_tio_read_u16() * Synchronously read 16 bits. */ u8_t usb_tio_read_u16(u32_t address, u16_t *data) { volatile struct usb_tio_request *tio_req = &usb_node->request; unsigned long flag; /* * Wait for any previous request to complete and then make this request. */ USB_TIO_LOCK(&tio_lock, flag); usb_tio_wait_hrt(); /* * Fill in the request. */ tio_req->address = address; tio_req->cmd = USB_TIO_READ16_SYNC; USB_TIO_REQUEST_SET_MAGIC(tio_req); usb_tio_commit_request(tio_req); /* * Wait for the result to show up. */ usb_tio_wait_hrt(); USB_TIO_REQUEST_VERIFY_MAGIC(tio_req); *data = (u16_t)tio_req->data; USB_TIO_REQUEST_CLEAR_MAGIC(tio_req); USB_TIO_UNLOCK(&tio_lock, flag); return USB_TIO_OK; } /* * usb_tio_read_u8() * Synchronously read 16 bits. */ u8_t usb_tio_read_u8(u32_t address, u8_t *data) { volatile struct usb_tio_request *tio_req = &usb_node->request; unsigned long flag; /* * Wait for any previous request to complete and then make this request. */ USB_TIO_LOCK(&tio_lock, flag); usb_tio_wait_hrt(); /* * Fill in the request. */ tio_req->address = address; tio_req->cmd = USB_TIO_READ8_SYNC; USB_TIO_REQUEST_SET_MAGIC(tio_req); /* * commit the request */ usb_tio_commit_request(tio_req); /* * Wait for the result to show up. */ usb_tio_wait_hrt(); USB_TIO_REQUEST_VERIFY_MAGIC(tio_req); *data = (u8_t)tio_req->data; USB_TIO_REQUEST_CLEAR_MAGIC(tio_req); USB_TIO_UNLOCK(&tio_lock, flag); return USB_TIO_OK; } /* * usb_tio_write_u16() * Asynchronously write 16 bits. */ u8_t usb_tio_write_u16(u32_t address, u16_t data) { volatile struct usb_tio_request *tio_req = &usb_node->request; unsigned long flag; /* * Wait for any previous write or pending read to complete. */ USB_TIO_LOCK(&tio_lock, flag); usb_tio_wait_hrt(); tio_req->address = address; tio_req->data = data; tio_req->cmd = USB_TIO_WRITE16_ASYNC; USB_TIO_REQUEST_SET_MAGIC(tio_req); /* * commit the request */ usb_tio_commit_request(tio_req); USB_TIO_UNLOCK(&tio_lock, flag); return USB_TIO_OK; } /* * usb_tio_write_u8() * Asynchronously write 8 bits. */ u8_t usb_tio_write_u8(u32_t address, u8_t data) { volatile struct usb_tio_request *tio_req = &usb_node->request; unsigned long flag; /* * Wait for any previous write or pending read to complete. */ USB_TIO_LOCK(&tio_lock, flag); usb_tio_wait_hrt(); tio_req->address = address; tio_req->data = data; tio_req->cmd = USB_TIO_WRITE8_ASYNC; USB_TIO_REQUEST_SET_MAGIC(tio_req); /* * commit the request */ usb_tio_commit_request(tio_req); USB_TIO_UNLOCK(&tio_lock, flag); return USB_TIO_OK; } /* * usb_tio_read_fifo() * Synchronously read FIFO. */ u8_t usb_tio_read_fifo(u32_t address, u32_t buffer, u32_t bytes) { volatile struct usb_tio_request *tio_req = &usb_node->request; unsigned long flag; /* * Wait for any previous request to complete and then make this request. */ USB_TIO_LOCK(&tio_lock, flag); usb_tio_wait_hrt(); /* * Fill in the request. */ tio_req->address = address; tio_req->cmd = USB_TIO_READ_FIFO_SYNC; tio_req->buffer = buffer; tio_req->transfer_length = bytes; USB_TIO_REQUEST_SET_MAGIC(tio_req); /* * commit the request */ usb_tio_commit_request(tio_req); /* * Wait for the result to show up. */ usb_tio_wait_hrt(); USB_TIO_REQUEST_VERIFY_MAGIC(tio_req); USB_TIO_REQUEST_CLEAR_MAGIC(tio_req); USB_TIO_UNLOCK(&tio_lock, flag); return USB_TIO_OK; } /* * usb_tio_write_fifo() * Synchronously write 32 bits. */ u8_t usb_tio_write_fifo(u32_t address, u32_t buffer, u32_t bytes) { volatile struct usb_tio_request *tio_req = &usb_node->request; unsigned long flag; USB_TIO_LOCK(&tio_lock, flag); usb_tio_wait_hrt(); tio_req->address = address; tio_req->buffer = buffer; tio_req->cmd = USB_TIO_WRITE_FIFO_SYNC; tio_req->transfer_length = bytes; USB_TIO_REQUEST_SET_MAGIC(tio_req); /* * commit the request */ usb_tio_commit_request(tio_req); /* * Wait for the result to show up. */ usb_tio_wait_hrt(); USB_TIO_REQUEST_VERIFY_MAGIC(tio_req); USB_TIO_REQUEST_CLEAR_MAGIC(tio_req); USB_TIO_UNLOCK(&tio_lock, flag); return USB_TIO_OK; } /* * usb_tio_write_fifo_async() * Asynchronously write 32 bits. */ u8_t usb_tio_write_fifo_async(u32_t address, u32_t buffer, u32_t bytes) { volatile struct usb_tio_request *tio_req = &usb_node->request; unsigned long flag; USB_TIO_LOCK(&tio_lock, flag); usb_tio_wait_hrt(); tio_req->address = address; /* * Is it necessary to make a local copy of the buffer? Any chance the URB is aborted before TIO finished the FIFO write? */ tio_req->buffer = buffer; tio_req->cmd = USB_TIO_WRITE_FIFO_SYNC; tio_req->transfer_length = bytes; USB_TIO_REQUEST_SET_MAGIC(tio_req); /* * commit the request */ usb_tio_commit_request(tio_req); USB_TIO_UNLOCK(&tio_lock, flag); return USB_TIO_OK; } /* * usb_tio_read_int_status() * read and clear the interrupt status registers */ void usb_tio_read_int_status(u8_t *int_usb, u16_t *int_tx, u16_t *int_rx) { /* * clear the interrupt must be syncronized with the TIO thread to prevent the racing condiiton * that TIO thread try to set it at same time */ asm volatile ( "1: bset (%0), (%0), #0 \n\t" \ " jmpne.f 1b \n\t" \ : : "a" (&usb_node->usb_vp_control) : "memory", "cc" ); *int_usb = usb_node->usb_vp_hw_int_usb; *int_tx = cpu_to_le16(usb_node->usb_vp_hw_int_tx); *int_rx = cpu_to_le16(usb_node->usb_vp_hw_int_rx); //printk(KERN_INFO "int read %x, %x, %x\n", *int_usb, *int_tx, *int_rx); /* * The interrupt status register is read-clean, so clear it now */ usb_node->usb_vp_hw_int_usb = 0; usb_node->usb_vp_hw_int_tx = 0; usb_node->usb_vp_hw_int_rx = 0; /* * release the lock bit */ usb_node->usb_vp_control &= 0xfffe; }
gpl-2.0
windxixi/ef39s_kernel
drivers/char/nwflash.c
888
13926
/* * Flash memory interface rev.5 driver for the Intel * Flash chips used on the NetWinder. * * 20/08/2000 RMK use __ioremap to map flash into virtual memory * make a few more places use "volatile" * 22/05/2001 RMK - Lock read against write * - merge printk level changes (with mods) from Alan Cox. * - use *ppos as the file position, not file->f_pos. * - fix check for out of range pos and r/w size * * Please note that we are tampering with the only flash chip in the * machine, which contains the bootup code. We therefore have the * power to convert these machines into doorstops... */ #include <linux/module.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/miscdevice.h> #include <linux/spinlock.h> #include <linux/rwsem.h> #include <linux/init.h> #include <linux/smp_lock.h> #include <linux/mutex.h> #include <linux/jiffies.h> #include <asm/hardware/dec21285.h> #include <asm/io.h> #include <asm/leds.h> #include <asm/mach-types.h> #include <asm/system.h> #include <asm/uaccess.h> /*****************************************************************************/ #include <asm/nwflash.h> #define NWFLASH_VERSION "6.4" static void kick_open(void); static int get_flash_id(void); static int erase_block(int nBlock); static int write_block(unsigned long p, const char __user *buf, int count); #define KFLASH_SIZE 1024*1024 //1 Meg #define KFLASH_SIZE4 4*1024*1024 //4 Meg #define KFLASH_ID 0x89A6 //Intel flash #define KFLASH_ID4 0xB0D4 //Intel flash 4Meg static int flashdebug; //if set - we will display progress msgs static int gbWriteEnable; static int gbWriteBase64Enable; static volatile unsigned char *FLASH_BASE; static int gbFlashSize = KFLASH_SIZE; static DEFINE_MUTEX(nwflash_mutex); static int get_flash_id(void) { volatile unsigned int c1, c2; /* * try to get flash chip ID */ kick_open(); c2 = inb(0x80); *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x90; udelay(15); c1 = *(volatile unsigned char *) FLASH_BASE; c2 = inb(0x80); /* * on 4 Meg flash the second byte is actually at offset 2... */ if (c1 == 0xB0) c2 = *(volatile unsigned char *) (FLASH_BASE + 2); else c2 = *(volatile unsigned char *) (FLASH_BASE + 1); c2 += (c1 << 8); /* * set it back to read mode */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF; if (c2 == KFLASH_ID4) gbFlashSize = KFLASH_SIZE4; return c2; } static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { lock_kernel(); switch (cmd) { case CMD_WRITE_DISABLE: gbWriteBase64Enable = 0; gbWriteEnable = 0; break; case CMD_WRITE_ENABLE: gbWriteEnable = 1; break; case CMD_WRITE_BASE64K_ENABLE: gbWriteBase64Enable = 1; break; default: gbWriteBase64Enable = 0; gbWriteEnable = 0; unlock_kernel(); return -EINVAL; } unlock_kernel(); return 0; } static ssize_t flash_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { ssize_t ret; if (flashdebug) printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, " "buffer=%p, count=0x%zx.\n", *ppos, buf, size); /* * We now lock against reads and writes. --rmk */ if (mutex_lock_interruptible(&nwflash_mutex)) return -ERESTARTSYS; ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize); mutex_unlock(&nwflash_mutex); return ret; } static ssize_t flash_write(struct file *file, const char __user *buf, size_t size, loff_t * ppos) { unsigned long p = *ppos; unsigned int count = size; int written; int nBlock, temp, rc; int i, j; if (flashdebug) printk("flash_write: offset=0x%lX, buffer=0x%p, count=0x%X.\n", p, buf, count); if (!gbWriteEnable) return -EINVAL; if (p < 64 * 1024 && (!gbWriteBase64Enable)) return -EINVAL; /* * check for out of range pos or count */ if (p >= gbFlashSize) return count ? -ENXIO : 0; if (count > gbFlashSize - p) count = gbFlashSize - p; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* * We now lock against reads and writes. --rmk */ if (mutex_lock_interruptible(&nwflash_mutex)) return -ERESTARTSYS; written = 0; leds_event(led_claim); leds_event(led_green_on); nBlock = (int) p >> 16; //block # of 64K bytes /* * # of 64K blocks to erase and write */ temp = ((int) (p + count) >> 16) - nBlock + 1; /* * write ends at exactly 64k boundary? */ if (((int) (p + count) & 0xFFFF) == 0) temp -= 1; if (flashdebug) printk(KERN_DEBUG "flash_write: writing %d block(s) " "starting at %d.\n", temp, nBlock); for (; temp; temp--, nBlock++) { if (flashdebug) printk(KERN_DEBUG "flash_write: erasing block %d.\n", nBlock); /* * first we have to erase the block(s), where we will write... */ i = 0; j = 0; RetryBlock: do { rc = erase_block(nBlock); i++; } while (rc && i < 10); if (rc) { printk(KERN_ERR "flash_write: erase error %x\n", rc); break; } if (flashdebug) printk(KERN_DEBUG "flash_write: writing offset %lX, " "from buf %p, bytes left %X.\n", p, buf, count - written); /* * write_block will limit write to space left in this block */ rc = write_block(p, buf, count - written); j++; /* * if somehow write verify failed? Can't happen?? */ if (!rc) { /* * retry up to 10 times */ if (j < 10) goto RetryBlock; else /* * else quit with error... */ rc = -1; } if (rc < 0) { printk(KERN_ERR "flash_write: write error %X\n", rc); break; } p += rc; buf += rc; written += rc; *ppos += rc; if (flashdebug) printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written); } /* * restore reg on exit */ leds_event(led_release); mutex_unlock(&nwflash_mutex); return written; } /* * The memory devices use the full 32/64 bits of the offset, and so we cannot * check against negative addresses: they are ok. The return value is weird, * though, in that case (0). * * also note that seeking relative to the "end of file" isn't supported: * it has no meaning, so it returns -EINVAL. */ static loff_t flash_llseek(struct file *file, loff_t offset, int orig) { loff_t ret; lock_kernel(); if (flashdebug) printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n", (unsigned int) offset, orig); switch (orig) { case 0: if (offset < 0) { ret = -EINVAL; break; } if ((unsigned int) offset > gbFlashSize) { ret = -EINVAL; break; } file->f_pos = (unsigned int) offset; ret = file->f_pos; break; case 1: if ((file->f_pos + offset) > gbFlashSize) { ret = -EINVAL; break; } if ((file->f_pos + offset) < 0) { ret = -EINVAL; break; } file->f_pos += offset; ret = file->f_pos; break; default: ret = -EINVAL; } unlock_kernel(); return ret; } /* * assume that main Write routine did the parameter checking... * so just go ahead and erase, what requested! */ static int erase_block(int nBlock) { volatile unsigned int c1; volatile unsigned char *pWritePtr; unsigned long timeout; int temp, temp1; /* * orange LED == erase */ leds_event(led_amber_on); /* * reset footbridge to the correct offset 0 (...0..3) */ *CSR_ROMWRITEREG = 0; /* * dummy ROM read */ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); kick_open(); /* * reset status if old errors */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; /* * erase a block... * aim at the middle of a current block... */ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + 0x8000 + (nBlock << 16))); /* * dummy read */ c1 = *pWritePtr; kick_open(); /* * erase */ *(volatile unsigned char *) pWritePtr = 0x20; /* * confirm */ *(volatile unsigned char *) pWritePtr = 0xD0; /* * wait 10 ms */ msleep(10); /* * wait while erasing in process (up to 10 sec) */ timeout = jiffies + 10 * HZ; c1 = 0; while (!(c1 & 0x80) && time_before(jiffies, timeout)) { msleep(10); /* * read any address */ c1 = *(volatile unsigned char *) (pWritePtr); // printk("Flash_erase: status=%X.\n",c1); } /* * set flash for normal read access */ kick_open(); // *(volatile unsigned char*)(FLASH_BASE+0x8000) = 0xFF; *(volatile unsigned char *) pWritePtr = 0xFF; //back to normal operation /* * check if erase errors were reported */ if (c1 & 0x20) { printk(KERN_ERR "flash_erase: err at %p\n", pWritePtr); /* * reset error */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; return -2; } /* * just to make sure - verify if erased OK... */ msleep(10); pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + (nBlock << 16))); for (temp = 0; temp < 16 * 1024; temp++, pWritePtr += 4) { if ((temp1 = *(volatile unsigned int *) pWritePtr) != 0xFFFFFFFF) { printk(KERN_ERR "flash_erase: verify err at %p = %X\n", pWritePtr, temp1); return -1; } } return 0; } /* * write_block will limit number of bytes written to the space in this block */ static int write_block(unsigned long p, const char __user *buf, int count) { volatile unsigned int c1; volatile unsigned int c2; unsigned char *pWritePtr; unsigned int uAddress; unsigned int offset; unsigned long timeout; unsigned long timeout1; /* * red LED == write */ leds_event(led_amber_off); leds_event(led_red_on); pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p)); /* * check if write will end in this block.... */ offset = p & 0xFFFF; if (offset + count > 0x10000) count = 0x10000 - offset; /* * wait up to 30 sec for this block */ timeout = jiffies + 30 * HZ; for (offset = 0; offset < count; offset++, pWritePtr++) { uAddress = (unsigned int) pWritePtr; uAddress &= 0xFFFFFFFC; if (__get_user(c2, buf + offset)) return -EFAULT; WriteRetry: /* * dummy read */ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); /* * kick open the write gate */ kick_open(); /* * program footbridge to the correct offset...0..3 */ *CSR_ROMWRITEREG = (unsigned int) pWritePtr & 3; /* * write cmd */ *(volatile unsigned char *) (uAddress) = 0x40; /* * data to write */ *(volatile unsigned char *) (uAddress) = c2; /* * get status */ *(volatile unsigned char *) (FLASH_BASE + 0x10000) = 0x70; c1 = 0; /* * wait up to 1 sec for this byte */ timeout1 = jiffies + 1 * HZ; /* * while not ready... */ while (!(c1 & 0x80) && time_before(jiffies, timeout1)) c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000); /* * if timeout getting status */ if (time_after_eq(jiffies, timeout1)) { kick_open(); /* * reset err */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; goto WriteRetry; } /* * switch on read access, as a default flash operation mode */ kick_open(); /* * read access */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF; /* * if hardware reports an error writing, and not timeout - * reset the chip and retry */ if (c1 & 0x10) { kick_open(); /* * reset err */ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50; /* * before timeout? */ if (time_before(jiffies, timeout)) { if (flashdebug) printk(KERN_DEBUG "write_block: Retrying write at 0x%X)n", pWritePtr - FLASH_BASE); /* * no LED == waiting */ leds_event(led_amber_off); /* * wait couple ms */ msleep(10); /* * red LED == write */ leds_event(led_red_on); goto WriteRetry; } else { printk(KERN_ERR "write_block: timeout at 0x%X\n", pWritePtr - FLASH_BASE); /* * return error -2 */ return -2; } } } /* * green LED == read/verify */ leds_event(led_amber_off); leds_event(led_green_on); msleep(10); pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p)); for (offset = 0; offset < count; offset++) { char c, c1; if (__get_user(c, buf)) return -EFAULT; buf++; if ((c1 = *pWritePtr++) != c) { printk(KERN_ERR "write_block: verify error at 0x%X (%02X!=%02X)\n", pWritePtr - FLASH_BASE, c1, c); return 0; } } return count; } static void kick_open(void) { unsigned long flags; /* * we want to write a bit pattern XXX1 to Xilinx to enable * the write gate, which will be open for about the next 2ms. */ spin_lock_irqsave(&nw_gpio_lock, flags); nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); spin_unlock_irqrestore(&nw_gpio_lock, flags); /* * let the ISA bus to catch on... */ udelay(25); } static const struct file_operations flash_fops = { .owner = THIS_MODULE, .llseek = flash_llseek, .read = flash_read, .write = flash_write, .unlocked_ioctl = flash_ioctl, }; static struct miscdevice flash_miscdev = { FLASH_MINOR, "nwflash", &flash_fops }; static int __init nwflash_init(void) { int ret = -ENODEV; if (machine_is_netwinder()) { int id; FLASH_BASE = ioremap(DC21285_FLASH, KFLASH_SIZE4); if (!FLASH_BASE) goto out; id = get_flash_id(); if ((id != KFLASH_ID) && (id != KFLASH_ID4)) { ret = -ENXIO; iounmap((void *)FLASH_BASE); printk("Flash: incorrect ID 0x%04X.\n", id); goto out; } printk("Flash ROM driver v.%s, flash device ID 0x%04X, size %d Mb.\n", NWFLASH_VERSION, id, gbFlashSize / (1024 * 1024)); ret = misc_register(&flash_miscdev); if (ret < 0) { iounmap((void *)FLASH_BASE); } } out: return ret; } static void __exit nwflash_exit(void) { misc_deregister(&flash_miscdev); iounmap((void *)FLASH_BASE); } MODULE_LICENSE("GPL"); module_param(flashdebug, bool, 0644); module_init(nwflash_init); module_exit(nwflash_exit);
gpl-2.0
ngxson/android_kernel_sony_msm8x27
drivers/staging/prima/CORE/SME/src/ccm/ccmLogDump.c
1400
2599
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /*============================================================================ ccmLogDump.c Implements the dump commands specific to the ccm module. Copyright (c) 2007 QUALCOMM Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary ============================================================================*/ #include "aniGlobal.h" #include "logDump.h" #if defined(ANI_LOGDUMP) static tDumpFuncEntry ccmMenuDumpTable[] = { {0, "CCM (861-870)", NULL}, //{861, "CCM: CCM testing ", dump_ccm} }; void ccmDumpInit(tHalHandle hHal) { logDumpRegisterTable( (tpAniSirGlobal) hHal, &ccmMenuDumpTable[0], sizeof(ccmMenuDumpTable)/sizeof(ccmMenuDumpTable[0]) ); } #endif //#if defined(ANI_LOGDUMP)
gpl-2.0
JooJooBee666/android_kernel_motorola_omap4-common
arch/mips/kernel/perf_event.c
2424
14191
/* * Linux performance counter support for MIPS. * * Copyright (C) 2010 MIPS Technologies, Inc. * Author: Deng-Cheng Zhu * * This code is based on the implementation for ARM, which is in turn * based on the sparc64 perf event code and the x86 code. Performance * counter access is based on the MIPS Oprofile code. And the callchain * support references the code of MIPS stacktrace.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/cpumask.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/stacktrace.h> #include <asm/time.h> /* For perf_irq */ /* These are for 32bit counters. For 64bit ones, define them accordingly. */ #define MAX_PERIOD ((1ULL << 32) - 1) #define VALID_COUNT 0x7fffffff #define TOTAL_BITS 32 #define HIGHEST_BIT 31 #define MIPS_MAX_HWEVENTS 4 struct cpu_hw_events { /* Array of events on this cpu. */ struct perf_event *events[MIPS_MAX_HWEVENTS]; /* * Set the bit (indexed by the counter number) when the counter * is used for an event. */ unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; /* * The borrowed MSB for the performance counter. A MIPS performance * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit * counters) as a factor of determining whether a counter overflow * should be signaled. So here we use a separate MSB for each * counter to make things easy. */ unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; /* * Software copy of the control register for each performance counter. * MIPS CPUs vary in performance counters. They use this differently, * and even may not use it. */ unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .saved_ctrl = {0}, }; /* The description of MIPS performance events. */ struct mips_perf_event { unsigned int event_id; /* * MIPS performance counters are indexed starting from 0. * CNTR_EVEN indicates the indexes of the counters to be used are * even numbers. */ unsigned int cntr_mask; #define CNTR_EVEN 0x55555555 #define CNTR_ODD 0xaaaaaaaa #ifdef CONFIG_MIPS_MT_SMP enum { T = 0, V = 1, P = 2, } range; #else #define T #define V #define P #endif }; static struct mips_perf_event raw_event; static DEFINE_MUTEX(raw_event_mutex); #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff #define C(x) PERF_COUNT_HW_CACHE_##x struct mips_pmu { const char *name; int irq; irqreturn_t (*handle_irq)(int irq, void *dev); int (*handle_shared_irq)(void); void (*start)(void); void (*stop)(void); int (*alloc_counter)(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc); u64 (*read_counter)(unsigned int idx); void (*write_counter)(unsigned int idx, u64 val); void (*enable_event)(struct hw_perf_event *evt, int idx); void (*disable_event)(int idx); const struct mips_perf_event *(*map_raw_event)(u64 config); const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; const struct mips_perf_event (*cache_event_map) [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; unsigned int num_counters; }; static const struct mips_pmu *mipspmu; static int mipspmu_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; int ret = 0; u64 uleft; unsigned long flags; if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (left > (s64)MAX_PERIOD) left = MAX_PERIOD; local64_set(&hwc->prev_count, (u64)-left); local_irq_save(flags); uleft = (u64)(-left) & MAX_PERIOD; uleft > VALID_COUNT ? set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs); mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT); local_irq_restore(flags); perf_event_update_userpage(event); return ret; } static void mipspmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); unsigned long flags; int shift = 64 - TOTAL_BITS; s64 prev_raw_count, new_raw_count; u64 delta; again: prev_raw_count = local64_read(&hwc->prev_count); local_irq_save(flags); /* Make the counter value be a "real" one. */ new_raw_count = mipspmu->read_counter(idx); if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) { new_raw_count &= VALID_COUNT; clear_bit(idx, cpuc->msbs); } else new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT); local_irq_restore(flags); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; delta = (new_raw_count << shift) - (prev_raw_count << shift); delta >>= shift; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); return; } static void mipspmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; if (!mipspmu) return; if (flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; /* Set the period for the event. */ mipspmu_event_set_period(event, hwc, hwc->idx); /* Enable the event. */ mipspmu->enable_event(hwc, hwc->idx); } static void mipspmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; if (!mipspmu) return; if (!(hwc->state & PERF_HES_STOPPED)) { /* We are working on a local event. */ mipspmu->disable_event(hwc->idx); barrier(); mipspmu_event_update(event, hwc, hwc->idx); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } static int mipspmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx; int err = 0; perf_pmu_disable(event->pmu); /* To look for a free counter for this event. */ idx = mipspmu->alloc_counter(cpuc, hwc); if (idx < 0) { err = idx; goto out; } /* * If there is an event in the counter we are going to use then * make sure it is disabled. */ event->hw.idx = idx; mipspmu->disable_event(idx); cpuc->events[idx] = event; hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; if (flags & PERF_EF_START) mipspmu_start(event, PERF_EF_RELOAD); /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); out: perf_pmu_enable(event->pmu); return err; } static void mipspmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; WARN_ON(idx < 0 || idx >= mipspmu->num_counters); mipspmu_stop(event, PERF_EF_UPDATE); cpuc->events[idx] = NULL; clear_bit(idx, cpuc->used_mask); perf_event_update_userpage(event); } static void mipspmu_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; /* Don't read disabled counters! */ if (hwc->idx < 0) return; mipspmu_event_update(event, hwc, hwc->idx); } static void mipspmu_enable(struct pmu *pmu) { if (mipspmu) mipspmu->start(); } static void mipspmu_disable(struct pmu *pmu) { if (mipspmu) mipspmu->stop(); } static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmu_reserve_mutex); static int (*save_perf_irq)(void); static int mipspmu_get_irq(void) { int err; if (mipspmu->irq >= 0) { /* Request my own irq handler. */ err = request_irq(mipspmu->irq, mipspmu->handle_irq, IRQF_DISABLED | IRQF_NOBALANCING, "mips_perf_pmu", NULL); if (err) { pr_warning("Unable to request IRQ%d for MIPS " "performance counters!\n", mipspmu->irq); } } else if (cp0_perfcount_irq < 0) { /* * We are sharing the irq number with the timer interrupt. */ save_perf_irq = perf_irq; perf_irq = mipspmu->handle_shared_irq; err = 0; } else { pr_warning("The platform hasn't properly defined its " "interrupt controller.\n"); err = -ENOENT; } return err; } static void mipspmu_free_irq(void) { if (mipspmu->irq >= 0) free_irq(mipspmu->irq, NULL); else if (cp0_perfcount_irq < 0) perf_irq = save_perf_irq; } /* * mipsxx/rm9000/loongson2 have different performance counters, they have * specific low-level init routines. */ static void reset_counters(void *arg); static int __hw_perf_event_init(struct perf_event *event); static void hw_perf_event_destroy(struct perf_event *event) { if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { /* * We must not call the destroy function with interrupts * disabled. */ on_each_cpu(reset_counters, (void *)(long)mipspmu->num_counters, 1); mipspmu_free_irq(); mutex_unlock(&pmu_reserve_mutex); } } static int mipspmu_event_init(struct perf_event *event) { int err = 0; switch (event->attr.type) { case PERF_TYPE_RAW: case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: break; default: return -ENOENT; } if (!mipspmu || event->cpu >= nr_cpumask_bits || (event->cpu >= 0 && !cpu_online(event->cpu))) return -ENODEV; if (!atomic_inc_not_zero(&active_events)) { if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { atomic_dec(&active_events); return -ENOSPC; } mutex_lock(&pmu_reserve_mutex); if (atomic_read(&active_events) == 0) err = mipspmu_get_irq(); if (!err) atomic_inc(&active_events); mutex_unlock(&pmu_reserve_mutex); } if (err) return err; err = __hw_perf_event_init(event); if (err) hw_perf_event_destroy(event); return err; } static struct pmu pmu = { .pmu_enable = mipspmu_enable, .pmu_disable = mipspmu_disable, .event_init = mipspmu_event_init, .add = mipspmu_add, .del = mipspmu_del, .start = mipspmu_start, .stop = mipspmu_stop, .read = mipspmu_read, }; static inline unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) { /* * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for * event_id. */ #ifdef CONFIG_MIPS_MT_SMP return ((unsigned int)pev->range << 24) | (pev->cntr_mask & 0xffff00) | (pev->event_id & 0xff); #else return (pev->cntr_mask & 0xffff00) | (pev->event_id & 0xff); #endif } static const struct mips_perf_event * mipspmu_map_general_event(int idx) { const struct mips_perf_event *pev; pev = ((*mipspmu->general_event_map)[idx].event_id == UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) : &(*mipspmu->general_event_map)[idx]); return pev; } static const struct mips_perf_event * mipspmu_map_cache_event(u64 config) { unsigned int cache_type, cache_op, cache_result; const struct mips_perf_event *pev; cache_type = (config >> 0) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return ERR_PTR(-EINVAL); cache_op = (config >> 8) & 0xff; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return ERR_PTR(-EINVAL); cache_result = (config >> 16) & 0xff; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return ERR_PTR(-EINVAL); pev = &((*mipspmu->cache_event_map) [cache_type] [cache_op] [cache_result]); if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID) return ERR_PTR(-EOPNOTSUPP); return pev; } static int validate_event(struct cpu_hw_events *cpuc, struct perf_event *event) { struct hw_perf_event fake_hwc = event->hw; /* Allow mixed event group. So return 1 to pass validation. */ if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) return 1; return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; } static int validate_group(struct perf_event *event) { struct perf_event *sibling, *leader = event->group_leader; struct cpu_hw_events fake_cpuc; memset(&fake_cpuc, 0, sizeof(fake_cpuc)); if (!validate_event(&fake_cpuc, leader)) return -ENOSPC; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { if (!validate_event(&fake_cpuc, sibling)) return -ENOSPC; } if (!validate_event(&fake_cpuc, event)) return -ENOSPC; return 0; } /* This is needed by specific irq handlers in perf_event_*.c */ static void handle_associated_event(struct cpu_hw_events *cpuc, int idx, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc = &event->hw; mipspmu_event_update(event, hwc, idx); data->period = event->hw.last_period; if (!mipspmu_event_set_period(event, hwc, idx)) return; if (perf_event_overflow(event, 0, data, regs)) mipspmu->disable_event(idx); } #include "perf_event_mipsxx.c" /* Callchain handling code. */ /* * Leave userspace callchain empty for now. When we find a way to trace * the user stack callchains, we add here. */ void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { } static void save_raw_perf_callchain(struct perf_callchain_entry *entry, unsigned long reg29) { unsigned long *sp = (unsigned long *)reg29; unsigned long addr; while (!kstack_end(sp)) { addr = *sp++; if (__kernel_text_address(addr)) { perf_callchain_store(entry, addr); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; } } } void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; if (raw_show_trace || !__kernel_text_address(pc)) { unsigned long stack_page = (unsigned long)task_stack_page(current); if (stack_page && sp >= stack_page && sp <= stack_page + THREAD_SIZE - 32) save_raw_perf_callchain(entry, sp); return; } do { perf_callchain_store(entry, pc); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; pc = unwind_stack(current, &sp, pc, &ra); } while (pc); #else save_raw_perf_callchain(entry, sp); #endif }
gpl-2.0
ptmr3/Fpg_Kernel
arch/arm/mach-omap1/board-palmz71.c
2424
8282
/* * linux/arch/arm/mach-omap1/board-palmz71.c * * Modified from board-generic.c * * Support for the Palm Zire71 PDA. * * Original version : Laurent Gonzalez * * Modified for zire71 : Marek Vasut * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/notifier.h> #include <linux/clk.h> #include <linux/irq.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/gpio.h> #include <plat/flash.h> #include <plat/mux.h> #include <plat/usb.h> #include <plat/dma.h> #include <plat/tc.h> #include <plat/board.h> #include <plat/irda.h> #include <plat/keypad.h> #include <plat/common.h> #include <plat/omap-alsa.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #define PALMZ71_USBDETECT_GPIO 0 #define PALMZ71_PENIRQ_GPIO 6 #define PALMZ71_MMC_WP_GPIO 8 #define PALMZ71_HDQ_GPIO 11 #define PALMZ71_HOTSYNC_GPIO OMAP_MPUIO(1) #define PALMZ71_CABLE_GPIO OMAP_MPUIO(2) #define PALMZ71_SLIDER_GPIO OMAP_MPUIO(3) #define PALMZ71_MMC_IN_GPIO OMAP_MPUIO(4) static void __init omap_palmz71_init_irq(void) { omap1_init_common_hw(); omap_init_irq(); } static const unsigned int palmz71_keymap[] = { KEY(0, 0, KEY_F1), KEY(1, 0, KEY_F2), KEY(2, 0, KEY_F3), KEY(3, 0, KEY_F4), KEY(4, 0, KEY_POWER), KEY(0, 1, KEY_LEFT), KEY(1, 1, KEY_DOWN), KEY(2, 1, KEY_UP), KEY(3, 1, KEY_RIGHT), KEY(4, 1, KEY_ENTER), KEY(0, 2, KEY_CAMERA), }; static const struct matrix_keymap_data palmz71_keymap_data = { .keymap = palmz71_keymap, .keymap_size = ARRAY_SIZE(palmz71_keymap), }; static struct omap_kp_platform_data palmz71_kp_data = { .rows = 8, .cols = 8, .keymap_data = &palmz71_keymap_data, .rep = true, .delay = 80, }; static struct resource palmz71_kp_resources[] = { [0] = { .start = INT_KEYBOARD, .end = INT_KEYBOARD, .flags = IORESOURCE_IRQ, }, }; static struct platform_device palmz71_kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &palmz71_kp_data, }, .num_resources = ARRAY_SIZE(palmz71_kp_resources), .resource = palmz71_kp_resources, }; static struct mtd_partition palmz71_rom_partitions[] = { /* PalmOS "Small ROM", contains the bootloader and the debugger */ { .name = "smallrom", .offset = 0, .size = 0xa000, .mask_flags = MTD_WRITEABLE, }, /* PalmOS "Big ROM", a filesystem with all the OS code and data */ { .name = "bigrom", .offset = SZ_128K, /* * 0x5f0000 bytes big in the multi-language ("EFIGS") version, * 0x7b0000 bytes in the English-only ("enUS") version. */ .size = 0x7b0000, .mask_flags = MTD_WRITEABLE, }, }; static struct physmap_flash_data palmz71_rom_data = { .width = 2, .set_vpp = omap1_set_vpp, .parts = palmz71_rom_partitions, .nr_parts = ARRAY_SIZE(palmz71_rom_partitions), }; static struct resource palmz71_rom_resource = { .start = OMAP_CS0_PHYS, .end = OMAP_CS0_PHYS + SZ_8M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device palmz71_rom_device = { .name = "physmap-flash", .id = -1, .dev = { .platform_data = &palmz71_rom_data, }, .num_resources = 1, .resource = &palmz71_rom_resource, }; static struct platform_device palmz71_lcd_device = { .name = "lcd_palmz71", .id = -1, }; static struct omap_irda_config palmz71_irda_config = { .transceiver_cap = IR_SIRMODE, .rx_channel = OMAP_DMA_UART3_RX, .tx_channel = OMAP_DMA_UART3_TX, .dest_start = UART3_THR, .src_start = UART3_RHR, .tx_trigger = 0, .rx_trigger = 0, }; static struct resource palmz71_irda_resources[] = { [0] = { .start = INT_UART3, .end = INT_UART3, .flags = IORESOURCE_IRQ, }, }; static struct platform_device palmz71_irda_device = { .name = "omapirda", .id = -1, .dev = { .platform_data = &palmz71_irda_config, }, .num_resources = ARRAY_SIZE(palmz71_irda_resources), .resource = palmz71_irda_resources, }; static struct platform_device palmz71_spi_device = { .name = "spi_palmz71", .id = -1, }; static struct omap_backlight_config palmz71_backlight_config = { .default_intensity = 0xa0, }; static struct platform_device palmz71_backlight_device = { .name = "omap-bl", .id = -1, .dev = { .platform_data = &palmz71_backlight_config, }, }; static struct platform_device *devices[] __initdata = { &palmz71_rom_device, &palmz71_kp_device, &palmz71_lcd_device, &palmz71_irda_device, &palmz71_spi_device, &palmz71_backlight_device, }; static int palmz71_get_pendown_state(void) { return !gpio_get_value(PALMZ71_PENIRQ_GPIO); } static const struct ads7846_platform_data palmz71_ts_info = { .model = 7846, .vref_delay_usecs = 100, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .get_pendown_state = palmz71_get_pendown_state, }; static struct spi_board_info __initdata palmz71_boardinfo[] = { { /* MicroWire (bus 2) CS0 has an ads7846e */ .modalias = "ads7846", .platform_data = &palmz71_ts_info, .irq = OMAP_GPIO_IRQ(PALMZ71_PENIRQ_GPIO), .max_speed_hz = 120000 /* max sample rate at 3V */ * 26 /* command + data + overhead */, .bus_num = 2, .chip_select = 0, } }; static struct omap_usb_config palmz71_usb_config __initdata = { .register_dev = 1, /* Mini-B only receptacle */ .hmc_mode = 0, .pins[0] = 2, }; static struct omap_lcd_config palmz71_lcd_config __initdata = { .ctrl_name = "internal", }; static struct omap_board_config_kernel palmz71_config[] __initdata = { {OMAP_TAG_LCD, &palmz71_lcd_config}, }; static irqreturn_t palmz71_powercable(int irq, void *dev_id) { if (gpio_get_value(PALMZ71_USBDETECT_GPIO)) { printk(KERN_INFO "PM: Power cable connected\n"); irq_set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO), IRQ_TYPE_EDGE_FALLING); } else { printk(KERN_INFO "PM: Power cable disconnected\n"); irq_set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO), IRQ_TYPE_EDGE_RISING); } return IRQ_HANDLED; } static void __init omap_mpu_wdt_mode(int mode) { if (mode) omap_writew(0x8000, OMAP_WDT_TIMER_MODE); else { omap_writew(0x00f5, OMAP_WDT_TIMER_MODE); omap_writew(0x00a0, OMAP_WDT_TIMER_MODE); } } static void __init palmz71_gpio_setup(int early) { if (early) { /* Only set GPIO1 so we have a working serial */ gpio_direction_output(1, 1); } else { /* Set MMC/SD host WP pin as input */ if (gpio_request(PALMZ71_MMC_WP_GPIO, "MMC WP") < 0) { printk(KERN_ERR "Could not reserve WP GPIO!\n"); return; } gpio_direction_input(PALMZ71_MMC_WP_GPIO); /* Monitor the Power-cable-connected signal */ if (gpio_request(PALMZ71_USBDETECT_GPIO, "USB detect") < 0) { printk(KERN_ERR "Could not reserve cable signal GPIO!\n"); return; } gpio_direction_input(PALMZ71_USBDETECT_GPIO); if (request_irq(gpio_to_irq(PALMZ71_USBDETECT_GPIO), palmz71_powercable, IRQF_SAMPLE_RANDOM, "palmz71-cable", 0)) printk(KERN_ERR "IRQ request for power cable failed!\n"); palmz71_powercable(gpio_to_irq(PALMZ71_USBDETECT_GPIO), 0); } } static void __init omap_palmz71_init(void) { /* mux pins for uarts */ omap_cfg_reg(UART1_TX); omap_cfg_reg(UART1_RTS); omap_cfg_reg(UART2_TX); omap_cfg_reg(UART2_RTS); omap_cfg_reg(UART3_TX); omap_cfg_reg(UART3_RX); palmz71_gpio_setup(1); omap_mpu_wdt_mode(0); omap_board_config = palmz71_config; omap_board_config_size = ARRAY_SIZE(palmz71_config); platform_add_devices(devices, ARRAY_SIZE(devices)); spi_register_board_info(palmz71_boardinfo, ARRAY_SIZE(palmz71_boardinfo)); omap1_usb_init(&palmz71_usb_config); omap_serial_init(); omap_register_i2c_bus(1, 100, NULL, 0); palmz71_gpio_setup(0); } static void __init omap_palmz71_map_io(void) { omap1_map_common_io(); } MACHINE_START(OMAP_PALMZ71, "OMAP310 based Palm Zire71") .boot_params = 0x10000100, .map_io = omap_palmz71_map_io, .reserve = omap_reserve, .init_irq = omap_palmz71_init_irq, .init_machine = omap_palmz71_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
cattleprod/XCeLL-V69-2
drivers/hid/hid-chicony.c
3192
2187
/* * HID driver for some chicony "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #define ch_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_MSVENDOR) return 0; set_bit(EV_REP, hi->input->evbit); switch (usage->hid & HID_USAGE) { case 0xff01: ch_map_key_clear(BTN_1); break; case 0xff02: ch_map_key_clear(BTN_2); break; case 0xff03: ch_map_key_clear(BTN_3); break; case 0xff04: ch_map_key_clear(BTN_4); break; case 0xff05: ch_map_key_clear(BTN_5); break; case 0xff06: ch_map_key_clear(BTN_6); break; case 0xff07: ch_map_key_clear(BTN_7); break; case 0xff08: ch_map_key_clear(BTN_8); break; case 0xff09: ch_map_key_clear(BTN_9); break; case 0xff0a: ch_map_key_clear(BTN_A); break; case 0xff0b: ch_map_key_clear(BTN_B); break; default: return 0; } return 1; } static const struct hid_device_id ch_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, { } }; MODULE_DEVICE_TABLE(hid, ch_devices); static struct hid_driver ch_driver = { .name = "chicony", .id_table = ch_devices, .input_mapping = ch_input_mapping, }; static int __init ch_init(void) { return hid_register_driver(&ch_driver); } static void __exit ch_exit(void) { hid_unregister_driver(&ch_driver); } module_init(ch_init); module_exit(ch_exit); MODULE_LICENSE("GPL");
gpl-2.0
Epirex/android_kernel_samsung_golden
drivers/rtc/rtc-s35390a.c
4216
7775
/* * Seiko Instruments S-35390A RTC Driver * * Copyright (c) 2007 Byron Bradley * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/i2c.h> #include <linux/bitrev.h> #include <linux/bcd.h> #include <linux/slab.h> #define S35390A_CMD_STATUS1 0 #define S35390A_CMD_STATUS2 1 #define S35390A_CMD_TIME1 2 #define S35390A_BYTE_YEAR 0 #define S35390A_BYTE_MONTH 1 #define S35390A_BYTE_DAY 2 #define S35390A_BYTE_WDAY 3 #define S35390A_BYTE_HOURS 4 #define S35390A_BYTE_MINS 5 #define S35390A_BYTE_SECS 6 #define S35390A_FLAG_POC 0x01 #define S35390A_FLAG_BLD 0x02 #define S35390A_FLAG_24H 0x40 #define S35390A_FLAG_RESET 0x80 #define S35390A_FLAG_TEST 0x01 static const struct i2c_device_id s35390a_id[] = { { "s35390a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, s35390a_id); struct s35390a { struct i2c_client *client[8]; struct rtc_device *rtc; int twentyfourhour; }; static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len) { struct i2c_client *client = s35390a->client[reg]; struct i2c_msg msg[] = { { client->addr, 0, len, buf }, }; if ((i2c_transfer(client->adapter, msg, 1)) != 1) return -EIO; return 0; } static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) { struct i2c_client *client = s35390a->client[reg]; struct i2c_msg msg[] = { { client->addr, I2C_M_RD, len, buf }, }; if ((i2c_transfer(client->adapter, msg, 1)) != 1) return -EIO; return 0; } static int s35390a_reset(struct s35390a *s35390a) { char buf[1]; if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0) return -EIO; if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD))) return 0; buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H); buf[0] &= 0xf0; return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); } static int s35390a_disable_test_mode(struct s35390a *s35390a) { char buf[1]; if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)) < 0) return -EIO; if (!(buf[0] & S35390A_FLAG_TEST)) return 0; buf[0] &= ~S35390A_FLAG_TEST; return s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)); } static char s35390a_hr2reg(struct s35390a *s35390a, int hour) { if (s35390a->twentyfourhour) return bin2bcd(hour); if (hour < 12) return bin2bcd(hour); return 0x40 | bin2bcd(hour - 12); } static int s35390a_reg2hr(struct s35390a *s35390a, char reg) { unsigned hour; if (s35390a->twentyfourhour) return bcd2bin(reg & 0x3f); hour = bcd2bin(reg & 0x3f); if (reg & 0x40) hour += 12; return hour; } static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm) { struct s35390a *s35390a = i2c_get_clientdata(client); int i, err; char buf[7]; dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, " "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); buf[S35390A_BYTE_YEAR] = bin2bcd(tm->tm_year - 100); buf[S35390A_BYTE_MONTH] = bin2bcd(tm->tm_mon + 1); buf[S35390A_BYTE_DAY] = bin2bcd(tm->tm_mday); buf[S35390A_BYTE_WDAY] = bin2bcd(tm->tm_wday); buf[S35390A_BYTE_HOURS] = s35390a_hr2reg(s35390a, tm->tm_hour); buf[S35390A_BYTE_MINS] = bin2bcd(tm->tm_min); buf[S35390A_BYTE_SECS] = bin2bcd(tm->tm_sec); /* This chip expects the bits of each byte to be in reverse order */ for (i = 0; i < 7; ++i) buf[i] = bitrev8(buf[i]); err = s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); return err; } static int s35390a_get_datetime(struct i2c_client *client, struct rtc_time *tm) { struct s35390a *s35390a = i2c_get_clientdata(client); char buf[7]; int i, err; err = s35390a_get_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); if (err < 0) return err; /* This chip returns the bits of each byte in reverse order */ for (i = 0; i < 7; ++i) buf[i] = bitrev8(buf[i]); tm->tm_sec = bcd2bin(buf[S35390A_BYTE_SECS]); tm->tm_min = bcd2bin(buf[S35390A_BYTE_MINS]); tm->tm_hour = s35390a_reg2hr(s35390a, buf[S35390A_BYTE_HOURS]); tm->tm_wday = bcd2bin(buf[S35390A_BYTE_WDAY]); tm->tm_mday = bcd2bin(buf[S35390A_BYTE_DAY]); tm->tm_mon = bcd2bin(buf[S35390A_BYTE_MONTH]) - 1; tm->tm_year = bcd2bin(buf[S35390A_BYTE_YEAR]) + 100; dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, " "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); return rtc_valid_tm(tm); } static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm) { return s35390a_get_datetime(to_i2c_client(dev), tm); } static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) { return s35390a_set_datetime(to_i2c_client(dev), tm); } static const struct rtc_class_ops s35390a_rtc_ops = { .read_time = s35390a_rtc_read_time, .set_time = s35390a_rtc_set_time, }; static struct i2c_driver s35390a_driver; static int s35390a_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err; unsigned int i; struct s35390a *s35390a; struct rtc_time tm; char buf[1]; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { err = -ENODEV; goto exit; } s35390a = kzalloc(sizeof(struct s35390a), GFP_KERNEL); if (!s35390a) { err = -ENOMEM; goto exit; } s35390a->client[0] = client; i2c_set_clientdata(client, s35390a); /* This chip uses multiple addresses, use dummy devices for them */ for (i = 1; i < 8; ++i) { s35390a->client[i] = i2c_new_dummy(client->adapter, client->addr + i); if (!s35390a->client[i]) { dev_err(&client->dev, "Address %02x unavailable\n", client->addr + i); err = -EBUSY; goto exit_dummy; } } err = s35390a_reset(s35390a); if (err < 0) { dev_err(&client->dev, "error resetting chip\n"); goto exit_dummy; } err = s35390a_disable_test_mode(s35390a); if (err < 0) { dev_err(&client->dev, "error disabling test mode\n"); goto exit_dummy; } err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); if (err < 0) { dev_err(&client->dev, "error checking 12/24 hour mode\n"); goto exit_dummy; } if (buf[0] & S35390A_FLAG_24H) s35390a->twentyfourhour = 1; else s35390a->twentyfourhour = 0; if (s35390a_get_datetime(client, &tm) < 0) dev_warn(&client->dev, "clock needs to be set\n"); s35390a->rtc = rtc_device_register(s35390a_driver.driver.name, &client->dev, &s35390a_rtc_ops, THIS_MODULE); if (IS_ERR(s35390a->rtc)) { err = PTR_ERR(s35390a->rtc); goto exit_dummy; } return 0; exit_dummy: for (i = 1; i < 8; ++i) if (s35390a->client[i]) i2c_unregister_device(s35390a->client[i]); kfree(s35390a); exit: return err; } static int s35390a_remove(struct i2c_client *client) { unsigned int i; struct s35390a *s35390a = i2c_get_clientdata(client); for (i = 1; i < 8; ++i) if (s35390a->client[i]) i2c_unregister_device(s35390a->client[i]); rtc_device_unregister(s35390a->rtc); kfree(s35390a); return 0; } static struct i2c_driver s35390a_driver = { .driver = { .name = "rtc-s35390a", }, .probe = s35390a_probe, .remove = s35390a_remove, .id_table = s35390a_id, }; static int __init s35390a_rtc_init(void) { return i2c_add_driver(&s35390a_driver); } static void __exit s35390a_rtc_exit(void) { i2c_del_driver(&s35390a_driver); } MODULE_AUTHOR("Byron Bradley <byron.bbradley@gmail.com>"); MODULE_DESCRIPTION("S35390A RTC driver"); MODULE_LICENSE("GPL"); module_init(s35390a_rtc_init); module_exit(s35390a_rtc_exit);
gpl-2.0
voidz777/android_kernel_samsung_tuna
arch/arm/mach-footbridge/dc21285.c
4216
9374
/* * linux/arch/arm/kernel/dec21285.c: PCI functions for DC21285 * * Copyright (C) 1998-2001 Russell King * Copyright (C) 1998-2000 Phil Blundell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/spinlock.h> #include <asm/irq.h> #include <asm/system.h> #include <asm/mach/pci.h> #include <asm/hardware/dec21285.h> #define MAX_SLOTS 21 #define PCICMD_ABORT ((PCI_STATUS_REC_MASTER_ABORT| \ PCI_STATUS_REC_TARGET_ABORT)<<16) #define PCICMD_ERROR_BITS ((PCI_STATUS_DETECTED_PARITY | \ PCI_STATUS_REC_MASTER_ABORT | \ PCI_STATUS_REC_TARGET_ABORT | \ PCI_STATUS_PARITY) << 16) extern int setup_arm_irq(int, struct irqaction *); extern void pcibios_report_status(u_int status_mask, int warn); static unsigned long dc21285_base_address(struct pci_bus *bus, unsigned int devfn) { unsigned long addr = 0; if (bus->number == 0) { if (PCI_SLOT(devfn) == 0) /* * For devfn 0, point at the 21285 */ addr = ARMCSR_BASE; else { devfn -= 1 << 3; if (devfn < PCI_DEVFN(MAX_SLOTS, 0)) addr = PCICFG0_BASE | 0xc00000 | (devfn << 8); } } else addr = PCICFG1_BASE | (bus->number << 16) | (devfn << 8); return addr; } static int dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr = dc21285_base_address(bus, devfn); u32 v = 0xffffffff; if (addr) switch (size) { case 1: asm("ldrb %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; case 2: asm("ldrh %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; case 4: asm("ldr %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; } *value = v; v = *CSR_PCICMD; if (v & PCICMD_ABORT) { *CSR_PCICMD = v & (0xffff|PCICMD_ABORT); return -1; } return PCIBIOS_SUCCESSFUL; } static int dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr = dc21285_base_address(bus, devfn); u32 v; if (addr) switch (size) { case 1: asm("strb %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; case 2: asm("strh %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; case 4: asm("str %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; } v = *CSR_PCICMD; if (v & PCICMD_ABORT) { *CSR_PCICMD = v & (0xffff|PCICMD_ABORT); return -1; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops dc21285_ops = { .read = dc21285_read_config, .write = dc21285_write_config, }; static struct timer_list serr_timer; static struct timer_list perr_timer; static void dc21285_enable_error(unsigned long __data) { switch (__data) { case IRQ_PCI_SERR: del_timer(&serr_timer); break; case IRQ_PCI_PERR: del_timer(&perr_timer); break; } enable_irq(__data); } /* * Warn on PCI errors. */ static irqreturn_t dc21285_abort_irq(int irq, void *dev_id) { unsigned int cmd; unsigned int status; cmd = *CSR_PCICMD; status = cmd >> 16; cmd = cmd & 0xffff; if (status & PCI_STATUS_REC_MASTER_ABORT) { printk(KERN_DEBUG "PCI: master abort, pc=0x%08lx\n", instruction_pointer(get_irq_regs())); cmd |= PCI_STATUS_REC_MASTER_ABORT << 16; } if (status & PCI_STATUS_REC_TARGET_ABORT) { printk(KERN_DEBUG "PCI: target abort: "); pcibios_report_status(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT, 1); printk("\n"); cmd |= PCI_STATUS_REC_TARGET_ABORT << 16; } *CSR_PCICMD = cmd; return IRQ_HANDLED; } static irqreturn_t dc21285_serr_irq(int irq, void *dev_id) { struct timer_list *timer = dev_id; unsigned int cntl; printk(KERN_DEBUG "PCI: system error received: "); pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1); printk("\n"); cntl = *CSR_SA110_CNTL & 0xffffdf07; *CSR_SA110_CNTL = cntl | SA110_CNTL_RXSERR; /* * back off this interrupt */ disable_irq(irq); timer->expires = jiffies + HZ; add_timer(timer); return IRQ_HANDLED; } static irqreturn_t dc21285_discard_irq(int irq, void *dev_id) { printk(KERN_DEBUG "PCI: discard timer expired\n"); *CSR_SA110_CNTL &= 0xffffde07; return IRQ_HANDLED; } static irqreturn_t dc21285_dparity_irq(int irq, void *dev_id) { unsigned int cmd; printk(KERN_DEBUG "PCI: data parity error detected: "); pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1); printk("\n"); cmd = *CSR_PCICMD & 0xffff; *CSR_PCICMD = cmd | 1 << 24; return IRQ_HANDLED; } static irqreturn_t dc21285_parity_irq(int irq, void *dev_id) { struct timer_list *timer = dev_id; unsigned int cmd; printk(KERN_DEBUG "PCI: parity error detected: "); pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1); printk("\n"); cmd = *CSR_PCICMD & 0xffff; *CSR_PCICMD = cmd | 1 << 31; /* * back off this interrupt */ disable_irq(irq); timer->expires = jiffies + HZ; add_timer(timer); return IRQ_HANDLED; } int __init dc21285_setup(int nr, struct pci_sys_data *sys) { struct resource *res; if (nr || !footbridge_cfn_mode()) return 0; res = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL); if (!res) { printk("out of memory for root bus resources"); return 0; } res[0].flags = IORESOURCE_MEM; res[0].name = "Footbridge non-prefetch"; res[1].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; res[1].name = "Footbridge prefetch"; allocate_resource(&iomem_resource, &res[1], 0x20000000, 0xa0000000, 0xffffffff, 0x20000000, NULL, NULL); allocate_resource(&iomem_resource, &res[0], 0x40000000, 0x80000000, 0xffffffff, 0x40000000, NULL, NULL); sys->resource[0] = &ioport_resource; sys->resource[1] = &res[0]; sys->resource[2] = &res[1]; sys->mem_offset = DC21285_PCI_MEM; return 1; } struct pci_bus * __init dc21285_scan_bus(int nr, struct pci_sys_data *sys) { return pci_scan_bus(0, &dc21285_ops, sys); } #define dc21285_request_irq(_a, _b, _c, _d, _e) \ WARN_ON(request_irq(_a, _b, _c, _d, _e) < 0) void __init dc21285_preinit(void) { unsigned int mem_size, mem_mask; int cfn_mode; mem_size = (unsigned int)high_memory - PAGE_OFFSET; for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1) if (mem_mask >= mem_size) break; /* * These registers need to be set up whether we're the * central function or not. */ *CSR_SDRAMBASEMASK = (mem_mask - 1) & 0x0ffc0000; *CSR_SDRAMBASEOFFSET = 0; *CSR_ROMBASEMASK = 0x80000000; *CSR_CSRBASEMASK = 0; *CSR_CSRBASEOFFSET = 0; *CSR_PCIADDR_EXTN = 0; cfn_mode = __footbridge_cfn_mode(); printk(KERN_INFO "PCI: DC21285 footbridge, revision %02lX, in " "%s mode\n", *CSR_CLASSREV & 0xff, cfn_mode ? "central function" : "addin"); if (footbridge_cfn_mode()) { /* * Clear any existing errors - we aren't * interested in historical data... */ *CSR_SA110_CNTL = (*CSR_SA110_CNTL & 0xffffde07) | SA110_CNTL_RXSERR; *CSR_PCICMD = (*CSR_PCICMD & 0xffff) | PCICMD_ERROR_BITS; } init_timer(&serr_timer); init_timer(&perr_timer); serr_timer.data = IRQ_PCI_SERR; serr_timer.function = dc21285_enable_error; perr_timer.data = IRQ_PCI_PERR; perr_timer.function = dc21285_enable_error; /* * We don't care if these fail. */ dc21285_request_irq(IRQ_PCI_SERR, dc21285_serr_irq, IRQF_DISABLED, "PCI system error", &serr_timer); dc21285_request_irq(IRQ_PCI_PERR, dc21285_parity_irq, IRQF_DISABLED, "PCI parity error", &perr_timer); dc21285_request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, IRQF_DISABLED, "PCI abort", NULL); dc21285_request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, IRQF_DISABLED, "Discard timer", NULL); dc21285_request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, IRQF_DISABLED, "PCI data parity", NULL); if (cfn_mode) { static struct resource csrio; csrio.flags = IORESOURCE_IO; csrio.name = "Footbridge"; allocate_resource(&ioport_resource, &csrio, 128, 0xff00, 0xffff, 128, NULL, NULL); /* * Map our SDRAM at a known address in PCI space, just in case * the firmware had other ideas. Using a nonzero base is * necessary, since some VGA cards forcefully use PCI addresses * in the range 0x000a0000 to 0x000c0000. (eg, S3 cards). */ *CSR_PCICSRBASE = 0xf4000000; *CSR_PCICSRIOBASE = csrio.start; *CSR_PCISDRAMBASE = __virt_to_bus(PAGE_OFFSET); *CSR_PCIROMBASE = 0; *CSR_PCICMD = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE | PCICMD_ERROR_BITS; } else if (footbridge_cfn_mode() != 0) { /* * If we are not compiled to accept "add-in" mode, then * we are using a constant virt_to_bus translation which * can not hope to cater for the way the host BIOS has * set up the machine. */ panic("PCI: this kernel is compiled for central " "function mode only"); } } void __init dc21285_postinit(void) { register_isa_ports(DC21285_PCI_MEM, DC21285_PCI_IO, 0); }
gpl-2.0
eagleeyetom/android_kernel_mtk_mt6572
arch/arm/mach-pxa/magician.c
4728
18174
/* * Support for HTC Magician PDA phones: * i-mate JAM, O2 Xda mini, Orange SPV M500, Qtek s100, Qtek s110 * and T-Mobile MDA Compact. * * Copyright (c) 2006-2007 Philipp Zabel * * Based on hx4700.c, spitz.c and others. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/mfd/htc-egpio.h> #include <linux/mfd/htc-pasic3.h> #include <linux/mtd/physmap.h> #include <linux/pda_power.h> #include <linux/pwm_backlight.h> #include <linux/regulator/driver.h> #include <linux/regulator/gpio-regulator.h> #include <linux/regulator/machine.h> #include <linux/usb/gpio_vbus.h> #include <linux/i2c/pxa-i2c.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/system_info.h> #include <mach/pxa27x.h> #include <mach/magician.h> #include <mach/pxafb.h> #include <mach/mmc.h> #include <mach/irda.h> #include <mach/ohci.h> #include "devices.h" #include "generic.h" static unsigned long magician_pin_config[] __initdata = { /* SDRAM and Static Memory I/O Signals */ GPIO20_nSDCS_2, GPIO21_nSDCS_3, GPIO15_nCS_1, GPIO78_nCS_2, /* PASIC3 */ GPIO79_nCS_3, /* EGPIO CPLD */ GPIO80_nCS_4, GPIO33_nCS_5, /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, /* PWM 0 */ GPIO16_PWM0_OUT, /* I2S */ GPIO28_I2S_BITCLK_OUT, GPIO29_I2S_SDATA_IN, GPIO31_I2S_SYNC, GPIO113_I2S_SYSCLK, /* SSP 1 */ GPIO23_SSP1_SCLK, GPIO24_SSP1_SFRM, GPIO25_SSP1_TXD, /* SSP 2 */ GPIO19_SSP2_SCLK, GPIO14_SSP2_SFRM, GPIO89_SSP2_TXD, GPIO88_SSP2_RXD, /* MMC */ GPIO32_MMC_CLK, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, GPIO112_MMC_CMD, /* LCD */ GPIOxx_LCD_TFT_16BPP, /* QCI */ GPIO12_CIF_DD_7, GPIO17_CIF_DD_6, GPIO50_CIF_DD_3, GPIO51_CIF_DD_2, GPIO52_CIF_DD_4, GPIO53_CIF_MCLK, GPIO54_CIF_PCLK, GPIO55_CIF_DD_1, GPIO81_CIF_DD_0, GPIO82_CIF_DD_5, GPIO84_CIF_FV, GPIO85_CIF_LV, /* Magician specific input GPIOs */ GPIO9_GPIO, /* unknown */ GPIO10_GPIO, /* GSM_IRQ */ GPIO13_GPIO, /* CPLD_IRQ */ GPIO107_GPIO, /* DS1WM_IRQ */ GPIO108_GPIO, /* GSM_READY */ GPIO115_GPIO, /* nPEN_IRQ */ /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, }; /* * IRDA */ static struct pxaficp_platform_data magician_ficp_info = { .gpio_pwdown = GPIO83_MAGICIAN_nIR_EN, .transceiver_cap = IR_SIRMODE | IR_OFF, }; /* * GPIO Keys */ #define INIT_KEY(_code, _gpio, _desc) \ { \ .code = KEY_##_code, \ .gpio = _gpio, \ .desc = _desc, \ .type = EV_KEY, \ .wakeup = 1, \ } static struct gpio_keys_button magician_button_table[] = { INIT_KEY(POWER, GPIO0_MAGICIAN_KEY_POWER, "Power button"), INIT_KEY(ESC, GPIO37_MAGICIAN_KEY_HANGUP, "Hangup button"), INIT_KEY(F10, GPIO38_MAGICIAN_KEY_CONTACTS, "Contacts button"), INIT_KEY(CALENDAR, GPIO90_MAGICIAN_KEY_CALENDAR, "Calendar button"), INIT_KEY(CAMERA, GPIO91_MAGICIAN_KEY_CAMERA, "Camera button"), INIT_KEY(UP, GPIO93_MAGICIAN_KEY_UP, "Up button"), INIT_KEY(DOWN, GPIO94_MAGICIAN_KEY_DOWN, "Down button"), INIT_KEY(LEFT, GPIO95_MAGICIAN_KEY_LEFT, "Left button"), INIT_KEY(RIGHT, GPIO96_MAGICIAN_KEY_RIGHT, "Right button"), INIT_KEY(KPENTER, GPIO97_MAGICIAN_KEY_ENTER, "Action button"), INIT_KEY(RECORD, GPIO98_MAGICIAN_KEY_RECORD, "Record button"), INIT_KEY(VOLUMEUP, GPIO100_MAGICIAN_KEY_VOL_UP, "Volume up"), INIT_KEY(VOLUMEDOWN, GPIO101_MAGICIAN_KEY_VOL_DOWN, "Volume down"), INIT_KEY(PHONE, GPIO102_MAGICIAN_KEY_PHONE, "Phone button"), INIT_KEY(PLAY, GPIO99_MAGICIAN_HEADPHONE_IN, "Headset button"), }; static struct gpio_keys_platform_data gpio_keys_data = { .buttons = magician_button_table, .nbuttons = ARRAY_SIZE(magician_button_table), }; static struct platform_device gpio_keys = { .name = "gpio-keys", .dev = { .platform_data = &gpio_keys_data, }, .id = -1, }; /* * EGPIO (Xilinx CPLD) * * 7 32-bit aligned 8-bit registers: 3x output, 1x irq, 3x input */ static struct resource egpio_resources[] = { [0] = { .start = PXA_CS3_PHYS, .end = PXA_CS3_PHYS + 0x20 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_GPIO_TO_IRQ(GPIO13_MAGICIAN_CPLD_IRQ), .end = PXA_GPIO_TO_IRQ(GPIO13_MAGICIAN_CPLD_IRQ), .flags = IORESOURCE_IRQ, }, }; static struct htc_egpio_chip egpio_chips[] = { [0] = { .reg_start = 0, .gpio_base = MAGICIAN_EGPIO(0, 0), .num_gpios = 24, .direction = HTC_EGPIO_OUTPUT, .initial_values = 0x40, /* EGPIO_MAGICIAN_GSM_RESET */ }, [1] = { .reg_start = 4, .gpio_base = MAGICIAN_EGPIO(4, 0), .num_gpios = 24, .direction = HTC_EGPIO_INPUT, }, }; static struct htc_egpio_platform_data egpio_info = { .reg_width = 8, .bus_width = 32, .irq_base = IRQ_BOARD_START, .num_irqs = 4, .ack_register = 3, .chip = egpio_chips, .num_chips = ARRAY_SIZE(egpio_chips), }; static struct platform_device egpio = { .name = "htc-egpio", .id = -1, .resource = egpio_resources, .num_resources = ARRAY_SIZE(egpio_resources), .dev = { .platform_data = &egpio_info, }, }; /* * LCD - Toppoly TD028STEB1 or Samsung LTP280QV */ static struct pxafb_mode_info toppoly_modes[] = { { .pixclock = 96153, .bpp = 16, .xres = 240, .yres = 320, .hsync_len = 11, .vsync_len = 3, .left_margin = 19, .upper_margin = 2, .right_margin = 10, .lower_margin = 2, .sync = 0, }, }; static struct pxafb_mode_info samsung_modes[] = { { .pixclock = 96153, .bpp = 16, .xres = 240, .yres = 320, .hsync_len = 8, .vsync_len = 4, .left_margin = 9, .upper_margin = 4, .right_margin = 9, .lower_margin = 4, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; static void toppoly_lcd_power(int on, struct fb_var_screeninfo *si) { pr_debug("Toppoly LCD power\n"); if (on) { pr_debug("on\n"); gpio_set_value(EGPIO_MAGICIAN_TOPPOLY_POWER, 1); gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 1); udelay(2000); gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 1); udelay(2000); /* FIXME: enable LCDC here */ udelay(2000); gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 1); udelay(2000); gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 1); } else { pr_debug("off\n"); msleep(15); gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 0); udelay(500); gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 0); udelay(1000); gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 0); gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 0); } } static void samsung_lcd_power(int on, struct fb_var_screeninfo *si) { pr_debug("Samsung LCD power\n"); if (on) { pr_debug("on\n"); if (system_rev < 3) gpio_set_value(GPIO75_MAGICIAN_SAMSUNG_POWER, 1); else gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 1); mdelay(10); gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 1); mdelay(10); gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 1); mdelay(30); gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 1); mdelay(10); } else { pr_debug("off\n"); mdelay(10); gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 0); mdelay(30); gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 0); mdelay(10); gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 0); mdelay(10); if (system_rev < 3) gpio_set_value(GPIO75_MAGICIAN_SAMSUNG_POWER, 0); else gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 0); } } static struct pxafb_mach_info toppoly_info = { .modes = toppoly_modes, .num_modes = 1, .fixed_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP, .pxafb_lcd_power = toppoly_lcd_power, }; static struct pxafb_mach_info samsung_info = { .modes = samsung_modes, .num_modes = 1, .fixed_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |\ LCD_ALTERNATE_MAPPING, .pxafb_lcd_power = samsung_lcd_power, }; /* * Backlight */ static struct gpio magician_bl_gpios[] = { { EGPIO_MAGICIAN_BL_POWER, GPIOF_DIR_OUT, "Backlight power" }, { EGPIO_MAGICIAN_BL_POWER2, GPIOF_DIR_OUT, "Backlight power 2" }, }; static int magician_backlight_init(struct device *dev) { return gpio_request_array(ARRAY_AND_SIZE(magician_bl_gpios)); } static int magician_backlight_notify(struct device *dev, int brightness) { gpio_set_value(EGPIO_MAGICIAN_BL_POWER, brightness); if (brightness >= 200) { gpio_set_value(EGPIO_MAGICIAN_BL_POWER2, 1); return brightness - 72; } else { gpio_set_value(EGPIO_MAGICIAN_BL_POWER2, 0); return brightness; } } static void magician_backlight_exit(struct device *dev) { gpio_free_array(ARRAY_AND_SIZE(magician_bl_gpios)); } static struct platform_pwm_backlight_data backlight_data = { .pwm_id = 0, .max_brightness = 272, .dft_brightness = 100, .pwm_period_ns = 30923, .init = magician_backlight_init, .notify = magician_backlight_notify, .exit = magician_backlight_exit, }; static struct platform_device backlight = { .name = "pwm-backlight", .id = -1, .dev = { .parent = &pxa27x_device_pwm0.dev, .platform_data = &backlight_data, }, }; /* * LEDs */ static struct gpio_led gpio_leds[] = { { .name = "magician::vibra", .default_trigger = "none", .gpio = GPIO22_MAGICIAN_VIBRA_EN, }, { .name = "magician::phone_bl", .default_trigger = "backlight", .gpio = GPIO103_MAGICIAN_LED_KP, }, }; static struct gpio_led_platform_data gpio_led_info = { .leds = gpio_leds, .num_leds = ARRAY_SIZE(gpio_leds), }; static struct platform_device leds_gpio = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &gpio_led_info, }, }; static struct pasic3_led pasic3_leds[] = { { .led = { .name = "magician:red", .default_trigger = "ds2760-battery.0-charging", }, .hw_num = 0, .bit2 = PASIC3_BIT2_LED0, .mask = PASIC3_MASK_LED0, }, { .led = { .name = "magician:green", .default_trigger = "ds2760-battery.0-charging-or-full", }, .hw_num = 1, .bit2 = PASIC3_BIT2_LED1, .mask = PASIC3_MASK_LED1, }, { .led = { .name = "magician:blue", .default_trigger = "bluetooth", }, .hw_num = 2, .bit2 = PASIC3_BIT2_LED2, .mask = PASIC3_MASK_LED2, }, }; static struct pasic3_leds_machinfo pasic3_leds_info = { .num_leds = ARRAY_SIZE(pasic3_leds), .power_gpio = EGPIO_MAGICIAN_LED_POWER, .leds = pasic3_leds, }; /* * PASIC3 with DS1WM */ static struct resource pasic3_resources[] = { [0] = { .start = PXA_CS2_PHYS, .end = PXA_CS2_PHYS + 0x1b, .flags = IORESOURCE_MEM, }, /* No IRQ handler in the PASIC3, DS1WM needs an external IRQ */ [1] = { .start = PXA_GPIO_TO_IRQ(GPIO107_MAGICIAN_DS1WM_IRQ), .end = PXA_GPIO_TO_IRQ(GPIO107_MAGICIAN_DS1WM_IRQ), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct pasic3_platform_data pasic3_platform_data = { .led_pdata = &pasic3_leds_info, .clock_rate = 4000000, }; static struct platform_device pasic3 = { .name = "pasic3", .id = -1, .num_resources = ARRAY_SIZE(pasic3_resources), .resource = pasic3_resources, .dev = { .platform_data = &pasic3_platform_data, }, }; /* * USB "Transceiver" */ static struct resource gpio_vbus_resource = { .flags = IORESOURCE_IRQ, .start = IRQ_MAGICIAN_VBUS, .end = IRQ_MAGICIAN_VBUS, }; static struct gpio_vbus_mach_info gpio_vbus_info = { .gpio_pullup = GPIO27_MAGICIAN_USBC_PUEN, .gpio_vbus = EGPIO_MAGICIAN_CABLE_STATE_USB, }; static struct platform_device gpio_vbus = { .name = "gpio-vbus", .id = -1, .num_resources = 1, .resource = &gpio_vbus_resource, .dev = { .platform_data = &gpio_vbus_info, }, }; /* * External power */ static int power_supply_init(struct device *dev) { return gpio_request(EGPIO_MAGICIAN_CABLE_STATE_AC, "CABLE_STATE_AC"); } static int magician_is_ac_online(void) { return gpio_get_value(EGPIO_MAGICIAN_CABLE_STATE_AC); } static void power_supply_exit(struct device *dev) { gpio_free(EGPIO_MAGICIAN_CABLE_STATE_AC); } static char *magician_supplicants[] = { "ds2760-battery.0", "backup-battery" }; static struct pda_power_pdata power_supply_info = { .init = power_supply_init, .is_ac_online = magician_is_ac_online, .exit = power_supply_exit, .supplied_to = magician_supplicants, .num_supplicants = ARRAY_SIZE(magician_supplicants), }; static struct resource power_supply_resources[] = { [0] = { .name = "ac", .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, .start = IRQ_MAGICIAN_VBUS, .end = IRQ_MAGICIAN_VBUS, }, [1] = { .name = "usb", .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, .start = IRQ_MAGICIAN_VBUS, .end = IRQ_MAGICIAN_VBUS, }, }; static struct platform_device power_supply = { .name = "pda-power", .id = -1, .dev = { .platform_data = &power_supply_info, }, .resource = power_supply_resources, .num_resources = ARRAY_SIZE(power_supply_resources), }; /* * Battery charger */ static struct regulator_consumer_supply bq24022_consumers[] = { { .supply = "vbus_draw", }, { .supply = "ac_draw", }, }; static struct regulator_init_data bq24022_init_data = { .constraints = { .max_uA = 500000, .valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(bq24022_consumers), .consumer_supplies = bq24022_consumers, }; static struct gpio bq24022_gpios[] = { { EGPIO_MAGICIAN_BQ24022_ISET2, GPIOF_OUT_INIT_LOW, "bq24022_iset2" }, }; static struct gpio_regulator_state bq24022_states[] = { { .value = 100000, .gpios = (0 << 0) }, { .value = 500000, .gpios = (1 << 0) }, }; static struct gpio_regulator_config bq24022_info = { .supply_name = "bq24022", .enable_gpio = GPIO30_MAGICIAN_BQ24022_nCHARGE_EN, .enable_high = 0, .enabled_at_boot = 0, .gpios = bq24022_gpios, .nr_gpios = ARRAY_SIZE(bq24022_gpios), .states = bq24022_states, .nr_states = ARRAY_SIZE(bq24022_states), .type = REGULATOR_CURRENT, .init_data = &bq24022_init_data, }; static struct platform_device bq24022 = { .name = "gpio-regulator", .id = -1, .dev = { .platform_data = &bq24022_info, }, }; /* * MMC/SD */ static int magician_mci_init(struct device *dev, irq_handler_t detect_irq, void *data) { return request_irq(IRQ_MAGICIAN_SD, detect_irq, IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "mmc card detect", data); } static void magician_mci_exit(struct device *dev, void *data) { free_irq(IRQ_MAGICIAN_SD, data); } static struct pxamci_platform_data magician_mci_info = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .init = magician_mci_init, .exit = magician_mci_exit, .gpio_card_detect = -1, .gpio_card_ro = EGPIO_MAGICIAN_nSD_READONLY, .gpio_card_ro_invert = 1, .gpio_power = EGPIO_MAGICIAN_SD_POWER, }; /* * USB OHCI */ static struct pxaohci_platform_data magician_ohci_info = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT1 | ENABLE_PORT3 | POWER_CONTROL_LOW, .power_budget = 0, }; /* * StrataFlash */ static void magician_set_vpp(struct platform_device *pdev, int vpp) { gpio_set_value(EGPIO_MAGICIAN_FLASH_VPP, vpp); } static struct resource strataflash_resource = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_64M - 1, .flags = IORESOURCE_MEM, }; static struct physmap_flash_data strataflash_data = { .width = 4, .set_vpp = magician_set_vpp, }; static struct platform_device strataflash = { .name = "physmap-flash", .id = -1, .resource = &strataflash_resource, .num_resources = 1, .dev = { .platform_data = &strataflash_data, }, }; /* * I2C */ static struct i2c_pxa_platform_data i2c_info = { .fast_mode = 1, }; /* * Platform devices */ static struct platform_device *devices[] __initdata = { &gpio_keys, &egpio, &backlight, &pasic3, &bq24022, &gpio_vbus, &power_supply, &strataflash, &leds_gpio, }; static struct gpio magician_global_gpios[] = { { GPIO13_MAGICIAN_CPLD_IRQ, GPIOF_IN, "CPLD_IRQ" }, { GPIO107_MAGICIAN_DS1WM_IRQ, GPIOF_IN, "DS1WM_IRQ" }, { GPIO104_MAGICIAN_LCD_POWER_1, GPIOF_OUT_INIT_LOW, "LCD power 1" }, { GPIO105_MAGICIAN_LCD_POWER_2, GPIOF_OUT_INIT_LOW, "LCD power 2" }, { GPIO106_MAGICIAN_LCD_POWER_3, GPIOF_OUT_INIT_LOW, "LCD power 3" }, { GPIO83_MAGICIAN_nIR_EN, GPIOF_OUT_INIT_HIGH, "nIR_EN" }, }; static void __init magician_init(void) { void __iomem *cpld; int lcd_select; int err; pxa2xx_mfp_config(ARRAY_AND_SIZE(magician_pin_config)); err = gpio_request_array(ARRAY_AND_SIZE(magician_global_gpios)); if (err) pr_err("magician: Failed to request GPIOs: %d\n", err); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); platform_add_devices(ARRAY_AND_SIZE(devices)); pxa_set_ficp_info(&magician_ficp_info); pxa27x_set_i2c_power_info(NULL); pxa_set_i2c_info(&i2c_info); pxa_set_mci_info(&magician_mci_info); pxa_set_ohci_info(&magician_ohci_info); /* Check LCD type we have */ cpld = ioremap_nocache(PXA_CS3_PHYS, 0x1000); if (cpld) { u8 board_id = __raw_readb(cpld+0x14); iounmap(cpld); system_rev = board_id & 0x7; lcd_select = board_id & 0x8; pr_info("LCD type: %s\n", lcd_select ? "Samsung" : "Toppoly"); if (lcd_select && (system_rev < 3)) gpio_request_one(GPIO75_MAGICIAN_SAMSUNG_POWER, GPIOF_OUT_INIT_LOW, "SAMSUNG_POWER"); pxa_set_fb_info(NULL, lcd_select ? &samsung_info : &toppoly_info); } else pr_err("LCD detection: CPLD mapping failed\n"); } MACHINE_START(MAGICIAN, "HTC Magician") .atag_offset = 0x100, .map_io = pxa27x_map_io, .nr_irqs = MAGICIAN_NR_IRQS, .init_irq = pxa27x_init_irq, .handle_irq = pxa27x_handle_irq, .init_machine = magician_init, .timer = &pxa_timer, .restart = pxa_restart, MACHINE_END
gpl-2.0
MoKee/android_kernel_zte_nx511j
arch/um/drivers/vde_kern.c
4728
3090
/* * Copyright (C) 2007 Luca Bigliardi (shammash@artha.org). * Licensed under the GPL. * * Transport usage: * ethN=vde,<vde_switch>,<mac addr>,<port>,<group>,<mode>,<description> * */ #include <linux/init.h> #include <linux/netdevice.h> #include <net_kern.h> #include <net_user.h> #include "vde.h" static void vde_init(struct net_device *dev, void *data) { struct vde_init *init = data; struct uml_net_private *pri; struct vde_data *vpri; pri = netdev_priv(dev); vpri = (struct vde_data *) pri->user; vpri->vde_switch = init->vde_switch; vpri->descr = init->descr ? init->descr : "UML vde_transport"; vpri->args = NULL; vpri->conn = NULL; vpri->dev = dev; printk("vde backend - %s, ", vpri->vde_switch ? vpri->vde_switch : "(default socket)"); vde_init_libstuff(vpri, init); printk("\n"); } static int vde_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) { struct vde_data *pri = (struct vde_data *) &lp->user; if (pri->conn != NULL) return vde_user_read(pri->conn, skb_mac_header(skb), skb->dev->mtu + ETH_HEADER_OTHER); printk(KERN_ERR "vde_read - we have no VDECONN to read from"); return -EBADF; } static int vde_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) { struct vde_data *pri = (struct vde_data *) &lp->user; if (pri->conn != NULL) return vde_user_write((void *)pri->conn, skb->data, skb->len); printk(KERN_ERR "vde_write - we have no VDECONN to write to"); return -EBADF; } static const struct net_kern_info vde_kern_info = { .init = vde_init, .protocol = eth_protocol, .read = vde_read, .write = vde_write, }; static int vde_setup(char *str, char **mac_out, void *data) { struct vde_init *init = data; char *remain, *port_str = NULL, *mode_str = NULL, *last; *init = ((struct vde_init) { .vde_switch = NULL, .descr = NULL, .port = 0, .group = NULL, .mode = 0 }); remain = split_if_spec(str, &init->vde_switch, mac_out, &port_str, &init->group, &mode_str, &init->descr, NULL); if (remain != NULL) printk(KERN_WARNING "vde_setup - Ignoring extra data :" "'%s'\n", remain); if (port_str != NULL) { init->port = simple_strtoul(port_str, &last, 10); if ((*last != '\0') || (last == port_str)) { printk(KERN_ERR "vde_setup - Bad port : '%s'\n", port_str); return 0; } } if (mode_str != NULL) { init->mode = simple_strtoul(mode_str, &last, 8); if ((*last != '\0') || (last == mode_str)) { printk(KERN_ERR "vde_setup - Bad mode : '%s'\n", mode_str); return 0; } } printk(KERN_INFO "Configured vde device: %s\n", init->vde_switch ? init->vde_switch : "(default socket)"); return 1; } static struct transport vde_transport = { .list = LIST_HEAD_INIT(vde_transport.list), .name = "vde", .setup = vde_setup, .user = &vde_user_info, .kern = &vde_kern_info, .private_size = sizeof(struct vde_data), .setup_size = sizeof(struct vde_init), }; static int register_vde(void) { register_transport(&vde_transport); return 0; } late_initcall(register_vde);
gpl-2.0
SlimRoms/kernel_lge_v500
drivers/net/ethernet/8390/zorro8390.c
4984
12915
/* * Amiga Linux/m68k and Linux/PPC Zorro NS8390 Ethernet Driver * * (C) Copyright 1998-2000 by some Elitist 680x0 Users(TM) * * --------------------------------------------------------------------------- * * This program is based on all the other NE2000 drivers for Linux * * --------------------------------------------------------------------------- * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux * distribution for more details. * * --------------------------------------------------------------------------- * * The Ariadne II and X-Surf are Zorro-II boards containing Realtek RTL8019AS * Ethernet Controllers. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/zorro.h> #include <linux/jiffies.h> #include <asm/irq.h> #include <asm/amigaints.h> #include <asm/amigahw.h> #define EI_SHIFT(x) (ei_local->reg_offset[x]) #define ei_inb(port) in_8(port) #define ei_outb(val, port) out_8(port, val) #define ei_inb_p(port) in_8(port) #define ei_outb_p(val, port) out_8(port, val) static const char version[] = "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; #include "lib8390.c" #define DRV_NAME "zorro8390" #define NE_BASE (dev->base_addr) #define NE_CMD (0x00 * 2) #define NE_DATAPORT (0x10 * 2) /* NatSemi-defined port window offset */ #define NE_RESET (0x1f * 2) /* Issue a read to reset, * a write to clear. */ #define NE_IO_EXTENT (0x20 * 2) #define NE_EN0_ISR (0x07 * 2) #define NE_EN0_DCFG (0x0e * 2) #define NE_EN0_RSARLO (0x08 * 2) #define NE_EN0_RSARHI (0x09 * 2) #define NE_EN0_RCNTLO (0x0a * 2) #define NE_EN0_RXCR (0x0c * 2) #define NE_EN0_TXCR (0x0d * 2) #define NE_EN0_RCNTHI (0x0b * 2) #define NE_EN0_IMR (0x0f * 2) #define NESM_START_PG 0x40 /* First page of TX buffer */ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ #define WORDSWAP(a) ((((a) >> 8) & 0xff) | ((a) << 8)) static struct card_info { zorro_id id; const char *name; unsigned int offset; } cards[] __devinitdata = { { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 }, { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 }, }; /* Hard reset the card. This used to pause for the same period that a * 8390 reset command required, but that shouldn't be necessary. */ static void zorro8390_reset_8390(struct net_device *dev) { unsigned long reset_start_time = jiffies; if (ei_debug > 1) netdev_dbg(dev, "resetting - t=%ld...\n", jiffies); z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); ei_status.txing = 0; ei_status.dmaing = 0; /* This check _should_not_ be necessary, omit eventually. */ while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RESET) == 0) if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) { netdev_warn(dev, "%s: did not complete\n", __func__); break; } z_writeb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr */ } /* Grab the 8390 specific header. Similar to the block_input routine, but * we don't need to be concerned with ring wrap as the header will be at * the start of a page, so we optimize accordingly. */ static void zorro8390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { int nic_base = dev->base_addr; int cnt; short *ptrs; /* This *shouldn't* happen. * If it does, it's the last thing you'll see */ if (ei_status.dmaing) { netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", __func__, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); z_writeb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO); z_writeb(0, nic_base + NE_EN0_RCNTHI); z_writeb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */ z_writeb(ring_page, nic_base + NE_EN0_RSARHI); z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD); ptrs = (short *)hdr; for (cnt = 0; cnt < sizeof(struct e8390_pkt_hdr) >> 1; cnt++) *ptrs++ = z_readw(NE_BASE + NE_DATAPORT); z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ hdr->count = WORDSWAP(hdr->count); ei_status.dmaing &= ~0x01; } /* Block input and output, similar to the Crynwr packet driver. * If you are porting to a new ethercard, look at the packet driver source * for hints. The NEx000 doesn't share the on-board packet memory -- * you have to put the packet out through the "remote DMA" dataport * using z_writeb. */ static void zorro8390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { int nic_base = dev->base_addr; char *buf = skb->data; short *ptrs; int cnt; /* This *shouldn't* happen. * If it does, it's the last thing you'll see */ if (ei_status.dmaing) { netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", __func__, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO); z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI); z_writeb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO); z_writeb(ring_offset >> 8, nic_base + NE_EN0_RSARHI); z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD); ptrs = (short *)buf; for (cnt = 0; cnt < count >> 1; cnt++) *ptrs++ = z_readw(NE_BASE + NE_DATAPORT); if (count & 0x01) buf[count - 1] = z_readb(NE_BASE + NE_DATAPORT); z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ ei_status.dmaing &= ~0x01; } static void zorro8390_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page) { int nic_base = NE_BASE; unsigned long dma_start; short *ptrs; int cnt; /* Round the count up for word writes. Do we need to do this? * What effect will an odd byte count have on the 8390? * I should check someday. */ if (count & 0x01) count++; /* This *shouldn't* happen. * If it does, it's the last thing you'll see */ if (ei_status.dmaing) { netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", __func__, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; /* We should already be in page 0, but to be safe... */ z_writeb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Now the normal output. */ z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO); z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI); z_writeb(0x00, nic_base + NE_EN0_RSARLO); z_writeb(start_page, nic_base + NE_EN0_RSARHI); z_writeb(E8390_RWRITE + E8390_START, nic_base + NE_CMD); ptrs = (short *)buf; for (cnt = 0; cnt < count >> 1; cnt++) z_writew(*ptrs++, NE_BASE + NE_DATAPORT); dma_start = jiffies; while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */ netdev_err(dev, "timeout waiting for Tx RDC\n"); zorro8390_reset_8390(dev); __NS8390_init(dev, 1); break; } z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ ei_status.dmaing &= ~0x01; } static int zorro8390_open(struct net_device *dev) { __ei_open(dev); return 0; } static int zorro8390_close(struct net_device *dev) { if (ei_debug > 1) netdev_dbg(dev, "Shutting down ethercard\n"); __ei_close(dev); return 0; } static void __devexit zorro8390_remove_one(struct zorro_dev *z) { struct net_device *dev = zorro_get_drvdata(z); unregister_netdev(dev); free_irq(IRQ_AMIGA_PORTS, dev); release_mem_region(ZTWO_PADDR(dev->base_addr), NE_IO_EXTENT * 2); free_netdev(dev); } static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = { { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, }, { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, { 0 } }; MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl); static const struct net_device_ops zorro8390_netdev_ops = { .ndo_open = zorro8390_open, .ndo_stop = zorro8390_close, .ndo_start_xmit = __ei_start_xmit, .ndo_tx_timeout = __ei_tx_timeout, .ndo_get_stats = __ei_get_stats, .ndo_set_rx_mode = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = __ei_poll, #endif }; static int __devinit zorro8390_init(struct net_device *dev, unsigned long board, const char *name, unsigned long ioaddr) { int i; int err; unsigned char SA_prom[32]; int start_page, stop_page; static u32 zorro8390_offsets[16] = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, }; /* Reset card. Who knows what dain-bramaged state it was left in. */ { unsigned long reset_start_time = jiffies; z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET); while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) { netdev_warn(dev, "not found (no reset ack)\n"); return -ENODEV; } z_writeb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */ } /* Read the 16 bytes of station address PROM. * We must first initialize registers, * similar to NS8390_init(eifdev, 0). * We can't reliably read the SAPROM address without this. * (I learned the hard way!). */ { static const struct { u32 value; u32 offset; } program_seq[] = { {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD}, /* Select page 0 */ {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */ {0x00, NE_EN0_RCNTHI}, {0x00, NE_EN0_IMR}, /* Mask completion irq */ {0xFF, NE_EN0_ISR}, {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */ {32, NE_EN0_RCNTLO}, {0x00, NE_EN0_RCNTHI}, {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */ {0x00, NE_EN0_RSARHI}, {E8390_RREAD + E8390_START, NE_CMD}, }; for (i = 0; i < ARRAY_SIZE(program_seq); i++) z_writeb(program_seq[i].value, ioaddr + program_seq[i].offset); } for (i = 0; i < 16; i++) { SA_prom[i] = z_readb(ioaddr + NE_DATAPORT); (void)z_readb(ioaddr + NE_DATAPORT); } /* We must set the 8390 for word mode. */ z_writeb(0x49, ioaddr + NE_EN0_DCFG); start_page = NESM_START_PG; stop_page = NESM_STOP_PG; dev->base_addr = ioaddr; dev->irq = IRQ_AMIGA_PORTS; /* Install the Interrupt handler */ i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, DRV_NAME, dev); if (i) return i; for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = SA_prom[i]; pr_debug("Found ethernet address: %pM\n", dev->dev_addr); ei_status.name = name; ei_status.tx_start_page = start_page; ei_status.stop_page = stop_page; ei_status.word16 = 1; ei_status.rx_start_page = start_page + TX_PAGES; ei_status.reset_8390 = zorro8390_reset_8390; ei_status.block_input = zorro8390_block_input; ei_status.block_output = zorro8390_block_output; ei_status.get_8390_hdr = zorro8390_get_8390_hdr; ei_status.reg_offset = zorro8390_offsets; dev->netdev_ops = &zorro8390_netdev_ops; __NS8390_init(dev, 0); err = register_netdev(dev); if (err) { free_irq(IRQ_AMIGA_PORTS, dev); return err; } netdev_info(dev, "%s at 0x%08lx, Ethernet Address %pM\n", name, board, dev->dev_addr); return 0; } static int __devinit zorro8390_init_one(struct zorro_dev *z, const struct zorro_device_id *ent) { struct net_device *dev; unsigned long board, ioaddr; int err, i; for (i = ARRAY_SIZE(cards) - 1; i >= 0; i--) if (z->id == cards[i].id) break; if (i < 0) return -ENODEV; board = z->resource.start; ioaddr = board + cards[i].offset; dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; if (!request_mem_region(ioaddr, NE_IO_EXTENT * 2, DRV_NAME)) { free_netdev(dev); return -EBUSY; } err = zorro8390_init(dev, board, cards[i].name, ZTWO_VADDR(ioaddr)); if (err) { release_mem_region(ioaddr, NE_IO_EXTENT * 2); free_netdev(dev); return err; } zorro_set_drvdata(z, dev); return 0; } static struct zorro_driver zorro8390_driver = { .name = "zorro8390", .id_table = zorro8390_zorro_tbl, .probe = zorro8390_init_one, .remove = __devexit_p(zorro8390_remove_one), }; static int __init zorro8390_init_module(void) { return zorro_register_driver(&zorro8390_driver); } static void __exit zorro8390_cleanup_module(void) { zorro_unregister_driver(&zorro8390_driver); } module_init(zorro8390_init_module); module_exit(zorro8390_cleanup_module); MODULE_LICENSE("GPL");
gpl-2.0
HardbitCoded/linux-3.4-rtws
drivers/isdn/hisax/hfc_sx.c
4984
44319
/* $Id: hfc_sx.c,v 1.12.2.5 2004/02/11 13:21:33 keil Exp $ * * level driver for Cologne Chip Designs hfc-s+/sp based cards * * Author Werner Cornelius * based on existing driver for CCD HFC PCI cards * Copyright by Werner Cornelius <werner@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "hfc_sx.h" #include "isdnl1.h" #include <linux/interrupt.h> #include <linux/isapnp.h> #include <linux/slab.h> static const char *hfcsx_revision = "$Revision: 1.12.2.5 $"; /***************************************/ /* IRQ-table for CCDs demo board */ /* IRQs 6,5,10,11,12,15 are supported */ /***************************************/ /* Teles 16.3c Vendor Id TAG2620, Version 1.0, Vendor version 2.1 * * Thanks to Uwe Wisniewski * * ISA-SLOT Signal PIN * B25 IRQ3 92 IRQ_G * B23 IRQ5 94 IRQ_A * B4 IRQ2/9 95 IRQ_B * D3 IRQ10 96 IRQ_C * D4 IRQ11 97 IRQ_D * D5 IRQ12 98 IRQ_E * D6 IRQ15 99 IRQ_F */ #undef CCD_DEMO_BOARD #ifdef CCD_DEMO_BOARD static u_char ccd_sp_irqtab[16] = { 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 3, 4, 5, 0, 0, 6 }; #else /* Teles 16.3c */ static u_char ccd_sp_irqtab[16] = { 0, 0, 0, 7, 0, 1, 0, 0, 0, 2, 3, 4, 5, 0, 0, 6 }; #endif #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */ #define byteout(addr, val) outb(val, addr) #define bytein(addr) inb(addr) /******************************/ /* In/Out access to registers */ /******************************/ static inline void Write_hfc(struct IsdnCardState *cs, u_char regnum, u_char val) { byteout(cs->hw.hfcsx.base + 1, regnum); byteout(cs->hw.hfcsx.base, val); } static inline u_char Read_hfc(struct IsdnCardState *cs, u_char regnum) { u_char ret; byteout(cs->hw.hfcsx.base + 1, regnum); ret = bytein(cs->hw.hfcsx.base); return (ret); } /**************************************************/ /* select a fifo and remember which one for reuse */ /**************************************************/ static void fifo_select(struct IsdnCardState *cs, u_char fifo) { if (fifo == cs->hw.hfcsx.last_fifo) return; /* still valid */ byteout(cs->hw.hfcsx.base + 1, HFCSX_FIF_SEL); byteout(cs->hw.hfcsx.base, fifo); while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */ udelay(4); byteout(cs->hw.hfcsx.base, fifo); while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */ } /******************************************/ /* reset the specified fifo to defaults. */ /* If its a send fifo init needed markers */ /******************************************/ static void reset_fifo(struct IsdnCardState *cs, u_char fifo) { fifo_select(cs, fifo); /* first select the fifo */ byteout(cs->hw.hfcsx.base + 1, HFCSX_CIRM); byteout(cs->hw.hfcsx.base, cs->hw.hfcsx.cirm | 0x80); /* reset cmd */ udelay(1); while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */ } /*************************************************************/ /* write_fifo writes the skb contents to the desired fifo */ /* if no space is available or an error occurs 0 is returned */ /* the skb is not released in any way. */ /*************************************************************/ static int write_fifo(struct IsdnCardState *cs, struct sk_buff *skb, u_char fifo, int trans_max) { unsigned short *msp; int fifo_size, count, z1, z2; u_char f_msk, f1, f2, *src; if (skb->len <= 0) return (0); if (fifo & 1) return (0); /* no write fifo */ fifo_select(cs, fifo); if (fifo & 4) { fifo_size = D_FIFO_SIZE; /* D-channel */ f_msk = MAX_D_FRAMES; if (trans_max) return (0); /* only HDLC */ } else { fifo_size = cs->hw.hfcsx.b_fifo_size; /* B-channel */ f_msk = MAX_B_FRAMES; } z1 = Read_hfc(cs, HFCSX_FIF_Z1H); z1 = ((z1 << 8) | Read_hfc(cs, HFCSX_FIF_Z1L)); /* Check for transparent mode */ if (trans_max) { z2 = Read_hfc(cs, HFCSX_FIF_Z2H); z2 = ((z2 << 8) | Read_hfc(cs, HFCSX_FIF_Z2L)); count = z2 - z1; if (count <= 0) count += fifo_size; /* free bytes */ if (count < skb->len + 1) return (0); /* no room */ count = fifo_size - count; /* bytes still not send */ if (count > 2 * trans_max) return (0); /* delay to long */ count = skb->len; src = skb->data; while (count--) Write_hfc(cs, HFCSX_FIF_DWR, *src++); return (1); /* success */ } msp = ((struct hfcsx_extra *)(cs->hw.hfcsx.extra))->marker; msp += (((fifo >> 1) & 3) * (MAX_B_FRAMES + 1)); f1 = Read_hfc(cs, HFCSX_FIF_F1) & f_msk; f2 = Read_hfc(cs, HFCSX_FIF_F2) & f_msk; count = f1 - f2; /* frame count actually buffered */ if (count < 0) count += (f_msk + 1); /* if wrap around */ if (count > f_msk - 1) { if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_write_fifo %d more as %d frames", fifo, f_msk - 1); return (0); } *(msp + f1) = z1; /* remember marker */ if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_write_fifo %d f1(%x) f2(%x) z1(f1)(%x)", fifo, f1, f2, z1); /* now determine free bytes in FIFO buffer */ count = *(msp + f2) - z1; if (count <= 0) count += fifo_size; /* count now contains available bytes */ if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_write_fifo %d count(%u/%d)", fifo, skb->len, count); if (count < skb->len) { if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_write_fifo %d no fifo mem", fifo); return (0); } count = skb->len; /* get frame len */ src = skb->data; /* source pointer */ while (count--) Write_hfc(cs, HFCSX_FIF_DWR, *src++); Read_hfc(cs, HFCSX_FIF_INCF1); /* increment F1 */ udelay(1); while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */ return (1); } /***************************************************************/ /* read_fifo reads data to an skb from the desired fifo */ /* if no data is available or an error occurs NULL is returned */ /* the skb is not released in any way. */ /***************************************************************/ static struct sk_buff * read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max) { int fifo_size, count, z1, z2; u_char f_msk, f1, f2, *dst; struct sk_buff *skb; if (!(fifo & 1)) return (NULL); /* no read fifo */ fifo_select(cs, fifo); if (fifo & 4) { fifo_size = D_FIFO_SIZE; /* D-channel */ f_msk = MAX_D_FRAMES; if (trans_max) return (NULL); /* only hdlc */ } else { fifo_size = cs->hw.hfcsx.b_fifo_size; /* B-channel */ f_msk = MAX_B_FRAMES; } /* transparent mode */ if (trans_max) { z1 = Read_hfc(cs, HFCSX_FIF_Z1H); z1 = ((z1 << 8) | Read_hfc(cs, HFCSX_FIF_Z1L)); z2 = Read_hfc(cs, HFCSX_FIF_Z2H); z2 = ((z2 << 8) | Read_hfc(cs, HFCSX_FIF_Z2L)); /* now determine bytes in actual FIFO buffer */ count = z1 - z2; if (count <= 0) count += fifo_size; /* count now contains buffered bytes */ count++; if (count > trans_max) count = trans_max; /* limit length */ skb = dev_alloc_skb(count); if (skb) { dst = skb_put(skb, count); while (count--) *dst++ = Read_hfc(cs, HFCSX_FIF_DRD); return skb; } else return NULL; /* no memory */ } do { f1 = Read_hfc(cs, HFCSX_FIF_F1) & f_msk; f2 = Read_hfc(cs, HFCSX_FIF_F2) & f_msk; if (f1 == f2) return (NULL); /* no frame available */ z1 = Read_hfc(cs, HFCSX_FIF_Z1H); z1 = ((z1 << 8) | Read_hfc(cs, HFCSX_FIF_Z1L)); z2 = Read_hfc(cs, HFCSX_FIF_Z2H); z2 = ((z2 << 8) | Read_hfc(cs, HFCSX_FIF_Z2L)); if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_read_fifo %d f1(%x) f2(%x) z1(f2)(%x) z2(f2)(%x)", fifo, f1, f2, z1, z2); /* now determine bytes in actual FIFO buffer */ count = z1 - z2; if (count <= 0) count += fifo_size; /* count now contains buffered bytes */ count++; if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_read_fifo %d count %u)", fifo, count); if ((count > fifo_size) || (count < 4)) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfcsx_read_fifo %d paket inv. len %d ", fifo , count); while (count) { count--; /* empty fifo */ Read_hfc(cs, HFCSX_FIF_DRD); } skb = NULL; } else if ((skb = dev_alloc_skb(count - 3))) { count -= 3; dst = skb_put(skb, count); while (count--) *dst++ = Read_hfc(cs, HFCSX_FIF_DRD); Read_hfc(cs, HFCSX_FIF_DRD); /* CRC 1 */ Read_hfc(cs, HFCSX_FIF_DRD); /* CRC 2 */ if (Read_hfc(cs, HFCSX_FIF_DRD)) { dev_kfree_skb_irq(skb); if (cs->debug & L1_DEB_ISAC_FIFO) debugl1(cs, "hfcsx_read_fifo %d crc error", fifo); skb = NULL; } } else { printk(KERN_WARNING "HFC-SX: receive out of memory\n"); return (NULL); } Read_hfc(cs, HFCSX_FIF_INCF2); /* increment F2 */ udelay(1); while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */ udelay(1); } while (!skb); /* retry in case of crc error */ return (skb); } /******************************************/ /* free hardware resources used by driver */ /******************************************/ static void release_io_hfcsx(struct IsdnCardState *cs) { cs->hw.hfcsx.int_m2 = 0; /* interrupt output off ! */ Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2); Write_hfc(cs, HFCSX_CIRM, HFCSX_RESET); /* Reset On */ msleep(30); /* Timeout 30ms */ Write_hfc(cs, HFCSX_CIRM, 0); /* Reset Off */ del_timer(&cs->hw.hfcsx.timer); release_region(cs->hw.hfcsx.base, 2); /* release IO-Block */ kfree(cs->hw.hfcsx.extra); cs->hw.hfcsx.extra = NULL; } /**********************************************************/ /* set_fifo_size determines the size of the RAM and FIFOs */ /* returning 0 -> need to reset the chip again. */ /**********************************************************/ static int set_fifo_size(struct IsdnCardState *cs) { if (cs->hw.hfcsx.b_fifo_size) return (1); /* already determined */ if ((cs->hw.hfcsx.chip >> 4) == 9) { cs->hw.hfcsx.b_fifo_size = B_FIFO_SIZE_32K; return (1); } cs->hw.hfcsx.b_fifo_size = B_FIFO_SIZE_8K; cs->hw.hfcsx.cirm |= 0x10; /* only 8K of ram */ return (0); } /********************************************************************************/ /* function called to reset the HFC SX chip. A complete software reset of chip */ /* and fifos is done. */ /********************************************************************************/ static void reset_hfcsx(struct IsdnCardState *cs) { cs->hw.hfcsx.int_m2 = 0; /* interrupt output off ! */ Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2); printk(KERN_INFO "HFC_SX: resetting card\n"); while (1) { Write_hfc(cs, HFCSX_CIRM, HFCSX_RESET | cs->hw.hfcsx.cirm); /* Reset */ mdelay(30); Write_hfc(cs, HFCSX_CIRM, cs->hw.hfcsx.cirm); /* Reset Off */ mdelay(20); if (Read_hfc(cs, HFCSX_STATUS) & 2) printk(KERN_WARNING "HFC-SX init bit busy\n"); cs->hw.hfcsx.last_fifo = 0xff; /* invalidate */ if (!set_fifo_size(cs)) continue; break; } cs->hw.hfcsx.trm = 0 + HFCSX_BTRANS_THRESMASK; /* no echo connect , threshold */ Write_hfc(cs, HFCSX_TRM, cs->hw.hfcsx.trm); Write_hfc(cs, HFCSX_CLKDEL, 0x0e); /* ST-Bit delay for TE-Mode */ cs->hw.hfcsx.sctrl_e = HFCSX_AUTO_AWAKE; Write_hfc(cs, HFCSX_SCTRL_E, cs->hw.hfcsx.sctrl_e); /* S/T Auto awake */ cs->hw.hfcsx.bswapped = 0; /* no exchange */ cs->hw.hfcsx.nt_mode = 0; /* we are in TE mode */ cs->hw.hfcsx.ctmt = HFCSX_TIM3_125 | HFCSX_AUTO_TIMER; Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt); cs->hw.hfcsx.int_m1 = HFCSX_INTS_DTRANS | HFCSX_INTS_DREC | HFCSX_INTS_L1STATE | HFCSX_INTS_TIMER; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); /* Clear already pending ints */ if (Read_hfc(cs, HFCSX_INT_S1)); Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 2); /* HFC ST 2 */ udelay(10); Write_hfc(cs, HFCSX_STATES, 2); /* HFC ST 2 */ cs->hw.hfcsx.mst_m = HFCSX_MASTER; /* HFC Master Mode */ Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m); cs->hw.hfcsx.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */ Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl); cs->hw.hfcsx.sctrl_r = 0; Write_hfc(cs, HFCSX_SCTRL_R, cs->hw.hfcsx.sctrl_r); /* Init GCI/IOM2 in master mode */ /* Slots 0 and 1 are set for B-chan 1 and 2 */ /* D- and monitor/CI channel are not enabled */ /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */ /* STIO2 is used as data input, B1+B2 from IOM->ST */ /* ST B-channel send disabled -> continuous 1s */ /* The IOM slots are always enabled */ cs->hw.hfcsx.conn = 0x36; /* set data flow directions */ Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn); Write_hfc(cs, HFCSX_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */ Write_hfc(cs, HFCSX_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */ Write_hfc(cs, HFCSX_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */ Write_hfc(cs, HFCSX_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */ /* Finally enable IRQ output */ cs->hw.hfcsx.int_m2 = HFCSX_IRQ_ENABLE; Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2); if (Read_hfc(cs, HFCSX_INT_S2)); } /***************************************************/ /* Timer function called when kernel timer expires */ /***************************************************/ static void hfcsx_Timer(struct IsdnCardState *cs) { cs->hw.hfcsx.timer.expires = jiffies + 75; /* WD RESET */ /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcsx.ctmt | 0x80); add_timer(&cs->hw.hfcsx.timer); */ } /************************************************/ /* select a b-channel entry matching and active */ /************************************************/ static struct BCState * Sel_BCS(struct IsdnCardState *cs, int channel) { if (cs->bcs[0].mode && (cs->bcs[0].channel == channel)) return (&cs->bcs[0]); else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel)) return (&cs->bcs[1]); else return (NULL); } /*******************************/ /* D-channel receive procedure */ /*******************************/ static int receive_dmsg(struct IsdnCardState *cs) { struct sk_buff *skb; int count = 5; if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { debugl1(cs, "rec_dmsg blocked"); return (1); } do { skb = read_fifo(cs, HFCSX_SEL_D_RX, 0); if (skb) { skb_queue_tail(&cs->rq, skb); schedule_event(cs, D_RCVBUFREADY); } } while (--count && skb); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); return (1); } /**********************************/ /* B-channel main receive routine */ /**********************************/ static void main_rec_hfcsx(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int count = 5; struct sk_buff *skb; Begin: count--; if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { debugl1(cs, "rec_data %d blocked", bcs->channel); return; } skb = read_fifo(cs, ((bcs->channel) && (!cs->hw.hfcsx.bswapped)) ? HFCSX_SEL_B2_RX : HFCSX_SEL_B1_RX, (bcs->mode == L1_MODE_TRANS) ? HFCSX_BTRANS_THRESHOLD : 0); if (skb) { skb_queue_tail(&bcs->rqueue, skb); schedule_event(bcs, B_RCVBUFREADY); } test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); if (count && skb) goto Begin; return; } /**************************/ /* D-channel send routine */ /**************************/ static void hfcsx_fill_dfifo(struct IsdnCardState *cs) { if (!cs->tx_skb) return; if (cs->tx_skb->len <= 0) return; if (write_fifo(cs, cs->tx_skb, HFCSX_SEL_D_TX, 0)) { dev_kfree_skb_any(cs->tx_skb); cs->tx_skb = NULL; } return; } /**************************/ /* B-channel send routine */ /**************************/ static void hfcsx_fill_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; if (!bcs->tx_skb) return; if (bcs->tx_skb->len <= 0) return; if (write_fifo(cs, bcs->tx_skb, ((bcs->channel) && (!cs->hw.hfcsx.bswapped)) ? HFCSX_SEL_B2_TX : HFCSX_SEL_B1_TX, (bcs->mode == L1_MODE_TRANS) ? HFCSX_BTRANS_THRESHOLD : 0)) { bcs->tx_cnt -= bcs->tx_skb->len; if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) && (PACKET_NOACK != bcs->tx_skb->pkt_type)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += bcs->tx_skb->len; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } /**********************************************/ /* D-channel l1 state call for leased NT-mode */ /**********************************************/ static void dch_nt_l2l1(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; switch (pr) { case (PH_DATA | REQUEST): case (PH_PULL | REQUEST): case (PH_PULL | INDICATION): st->l1.l1hw(st, pr, arg); break; case (PH_ACTIVATE | REQUEST): st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL); break; case (PH_TESTLOOP | REQUEST): if (1 & (long) arg) debugl1(cs, "PH_TEST_LOOP B1"); if (2 & (long) arg) debugl1(cs, "PH_TEST_LOOP B2"); if (!(3 & (long) arg)) debugl1(cs, "PH_TEST_LOOP DISABLED"); st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg); break; default: if (cs->debug) debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr); break; } } /***********************/ /* set/reset echo mode */ /***********************/ static int hfcsx_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) { unsigned long flags; int i = *(unsigned int *) ic->parm.num; if ((ic->arg == 98) && (!(cs->hw.hfcsx.int_m1 & (HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC + HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC)))) { spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 0); /* HFC ST G0 */ udelay(10); cs->hw.hfcsx.sctrl |= SCTRL_MODE_NT; Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl); /* set NT-mode */ udelay(10); Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 1); /* HFC ST G1 */ udelay(10); Write_hfc(cs, HFCSX_STATES, 1 | HFCSX_ACTIVATE | HFCSX_DO_ACTION); cs->dc.hfcsx.ph_state = 1; cs->hw.hfcsx.nt_mode = 1; cs->hw.hfcsx.nt_timer = 0; spin_unlock_irqrestore(&cs->lock, flags); cs->stlist->l2.l2l1 = dch_nt_l2l1; debugl1(cs, "NT mode activated"); return (0); } if ((cs->chanlimit > 1) || (cs->hw.hfcsx.bswapped) || (cs->hw.hfcsx.nt_mode) || (ic->arg != 12)) return (-EINVAL); if (i) { cs->logecho = 1; cs->hw.hfcsx.trm |= 0x20; /* enable echo chan */ cs->hw.hfcsx.int_m1 |= HFCSX_INTS_B2REC; /* reset Channel !!!!! */ } else { cs->logecho = 0; cs->hw.hfcsx.trm &= ~0x20; /* disable echo chan */ cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_B2REC; } cs->hw.hfcsx.sctrl_r &= ~SCTRL_B2_ENA; cs->hw.hfcsx.sctrl &= ~SCTRL_B2_ENA; cs->hw.hfcsx.conn |= 0x10; /* B2-IOM -> B2-ST */ cs->hw.hfcsx.ctmt &= ~2; spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt); Write_hfc(cs, HFCSX_SCTRL_R, cs->hw.hfcsx.sctrl_r); Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl); Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn); Write_hfc(cs, HFCSX_TRM, cs->hw.hfcsx.trm); Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); spin_unlock_irqrestore(&cs->lock, flags); return (0); } /* hfcsx_auxcmd */ /*****************************/ /* E-channel receive routine */ /*****************************/ static void receive_emsg(struct IsdnCardState *cs) { int count = 5; u_char *ptr; struct sk_buff *skb; if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { debugl1(cs, "echo_rec_data blocked"); return; } do { skb = read_fifo(cs, HFCSX_SEL_B2_RX, 0); if (skb) { if (cs->debug & DEB_DLOG_HEX) { ptr = cs->dlog; if ((skb->len) < MAX_DLOG_SPACE / 3 - 10) { *ptr++ = 'E'; *ptr++ = 'C'; *ptr++ = 'H'; *ptr++ = 'O'; *ptr++ = ':'; ptr += QuickHex(ptr, skb->data, skb->len); ptr--; *ptr++ = '\n'; *ptr = 0; HiSax_putstatus(cs, NULL, cs->dlog); } else HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); } dev_kfree_skb_any(skb); } } while (--count && skb); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); return; } /* receive_emsg */ /*********************/ /* Interrupt handler */ /*********************/ static irqreturn_t hfcsx_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char exval; struct BCState *bcs; int count = 15; u_long flags; u_char val, stat; if (!(cs->hw.hfcsx.int_m2 & 0x08)) return IRQ_NONE; /* not initialised */ spin_lock_irqsave(&cs->lock, flags); if (HFCSX_ANYINT & (stat = Read_hfc(cs, HFCSX_STATUS))) { val = Read_hfc(cs, HFCSX_INT_S1); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFC-SX: stat(%02x) s1(%02x)", stat, val); } else { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFC-SX irq %x %s", val, test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ? "locked" : "unlocked"); val &= cs->hw.hfcsx.int_m1; if (val & 0x40) { /* state machine irq */ exval = Read_hfc(cs, HFCSX_STATES) & 0xf; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcsx.ph_state, exval); cs->dc.hfcsx.ph_state = exval; schedule_event(cs, D_L1STATECHANGE); val &= ~0x40; } if (val & 0x80) { /* timer irq */ if (cs->hw.hfcsx.nt_mode) { if ((--cs->hw.hfcsx.nt_timer) < 0) schedule_event(cs, D_L1STATECHANGE); } val &= ~0x80; Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt | HFCSX_CLTIMER); } while (val) { if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { cs->hw.hfcsx.int_s1 |= val; spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } if (cs->hw.hfcsx.int_s1 & 0x18) { exval = val; val = cs->hw.hfcsx.int_s1; cs->hw.hfcsx.int_s1 = exval; } if (val & 0x08) { if (!(bcs = Sel_BCS(cs, cs->hw.hfcsx.bswapped ? 1 : 0))) { if (cs->debug) debugl1(cs, "hfcsx spurious 0x08 IRQ"); } else main_rec_hfcsx(bcs); } if (val & 0x10) { if (cs->logecho) receive_emsg(cs); else if (!(bcs = Sel_BCS(cs, 1))) { if (cs->debug) debugl1(cs, "hfcsx spurious 0x10 IRQ"); } else main_rec_hfcsx(bcs); } if (val & 0x01) { if (!(bcs = Sel_BCS(cs, cs->hw.hfcsx.bswapped ? 1 : 0))) { if (cs->debug) debugl1(cs, "hfcsx spurious 0x01 IRQ"); } else { if (bcs->tx_skb) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { schedule_event(bcs, B_XMTBUFREADY); } } } } if (val & 0x02) { if (!(bcs = Sel_BCS(cs, 1))) { if (cs->debug) debugl1(cs, "hfcsx spurious 0x02 IRQ"); } else { if (bcs->tx_skb) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "fill_data %d blocked", bcs->channel); } else { schedule_event(bcs, B_XMTBUFREADY); } } } } if (val & 0x20) { /* receive dframe */ receive_dmsg(cs); } if (val & 0x04) { /* dframe transmitted */ if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { if (cs->tx_skb->len) { if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else { debugl1(cs, "hfcsx_fill_dfifo irq blocked"); } goto afterXPR; } else { dev_kfree_skb_irq(cs->tx_skb); cs->tx_cnt = 0; cs->tx_skb = NULL; } } if ((cs->tx_skb = skb_dequeue(&cs->sq))) { cs->tx_cnt = 0; if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else { debugl1(cs, "hfcsx_fill_dfifo irq blocked"); } } else schedule_event(cs, D_XMTBUFREADY); } afterXPR: if (cs->hw.hfcsx.int_s1 && count--) { val = cs->hw.hfcsx.int_s1; cs->hw.hfcsx.int_s1 = 0; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFC-SX irq %x loop %d", val, 15 - count); } else val = 0; } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } /********************************************************************/ /* timer callback for D-chan busy resolution. Currently no function */ /********************************************************************/ static void hfcsx_dbusy_timer(struct IsdnCardState *cs) { } /*************************************/ /* Layer 1 D-channel hardware access */ /*************************************/ static void HFCSX_l1hw(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; struct sk_buff *skb = arg; u_long flags; switch (pr) { case (PH_DATA | REQUEST): if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { skb_queue_tail(&cs->sq, skb); #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA Queued", 0); #endif } else { cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA", 0); #endif if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "hfcsx_fill_dfifo blocked"); } spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { if (cs->debug & L1_DEB_WARN) debugl1(cs, " l2l1 tx_skb exist this shouldn't happen"); skb_queue_tail(&cs->sq, skb); spin_unlock_irqrestore(&cs->lock, flags); break; } if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA_PULLED", 0); #endif if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_dfifo(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "hfcsx_fill_dfifo blocked"); spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | REQUEST): #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) debugl1(cs, "-> PH_REQUEST_PULL"); #endif if (!cs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (HW_RESET | REQUEST): spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 3); /* HFC ST 3 */ udelay(6); Write_hfc(cs, HFCSX_STATES, 3); /* HFC ST 2 */ cs->hw.hfcsx.mst_m |= HFCSX_MASTER; Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m); Write_hfc(cs, HFCSX_STATES, HFCSX_ACTIVATE | HFCSX_DO_ACTION); spin_unlock_irqrestore(&cs->lock, flags); l1_msg(cs, HW_POWERUP | CONFIRM, NULL); break; case (HW_ENABLE | REQUEST): spin_lock_irqsave(&cs->lock, flags); Write_hfc(cs, HFCSX_STATES, HFCSX_ACTIVATE | HFCSX_DO_ACTION); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_DEACTIVATE | REQUEST): spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcsx.mst_m &= ~HFCSX_MASTER; Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_INFO3 | REQUEST): spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcsx.mst_m |= HFCSX_MASTER; Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_TESTLOOP | REQUEST): spin_lock_irqsave(&cs->lock, flags); switch ((long) arg) { case (1): Write_hfc(cs, HFCSX_B1_SSL, 0x80); /* tx slot */ Write_hfc(cs, HFCSX_B1_RSL, 0x80); /* rx slot */ cs->hw.hfcsx.conn = (cs->hw.hfcsx.conn & ~7) | 1; Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn); break; case (2): Write_hfc(cs, HFCSX_B2_SSL, 0x81); /* tx slot */ Write_hfc(cs, HFCSX_B2_RSL, 0x81); /* rx slot */ cs->hw.hfcsx.conn = (cs->hw.hfcsx.conn & ~0x38) | 0x08; Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn); break; default: spin_unlock_irqrestore(&cs->lock, flags); if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfcsx_l1hw loop invalid %4lx", (unsigned long)arg); return; } cs->hw.hfcsx.trm |= 0x80; /* enable IOM-loop */ Write_hfc(cs, HFCSX_TRM, cs->hw.hfcsx.trm); spin_unlock_irqrestore(&cs->lock, flags); break; default: if (cs->debug & L1_DEB_WARN) debugl1(cs, "hfcsx_l1hw unknown pr %4x", pr); break; } } /***********************************************/ /* called during init setting l1 stack pointer */ /***********************************************/ static void setstack_hfcsx(struct PStack *st, struct IsdnCardState *cs) { st->l1.l1hw = HFCSX_l1hw; } /**************************************/ /* send B-channel data if not blocked */ /**************************************/ static void hfcsx_send_data(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { hfcsx_fill_fifo(bcs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } else debugl1(cs, "send_data %d blocked", bcs->channel); } /***************************************************************/ /* activate/deactivate hardware for selected channels and mode */ /***************************************************************/ static void mode_hfcsx(struct BCState *bcs, int mode, int bc) { struct IsdnCardState *cs = bcs->cs; int fifo2; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HFCSX bchannel mode %d bchan %d/%d", mode, bc, bcs->channel); bcs->mode = mode; bcs->channel = bc; fifo2 = bc; if (cs->chanlimit > 1) { cs->hw.hfcsx.bswapped = 0; /* B1 and B2 normal mode */ cs->hw.hfcsx.sctrl_e &= ~0x80; } else { if (bc) { if (mode != L1_MODE_NULL) { cs->hw.hfcsx.bswapped = 1; /* B1 and B2 exchanged */ cs->hw.hfcsx.sctrl_e |= 0x80; } else { cs->hw.hfcsx.bswapped = 0; /* B1 and B2 normal mode */ cs->hw.hfcsx.sctrl_e &= ~0x80; } fifo2 = 0; } else { cs->hw.hfcsx.bswapped = 0; /* B1 and B2 normal mode */ cs->hw.hfcsx.sctrl_e &= ~0x80; } } switch (mode) { case (L1_MODE_NULL): if (bc) { cs->hw.hfcsx.sctrl &= ~SCTRL_B2_ENA; cs->hw.hfcsx.sctrl_r &= ~SCTRL_B2_ENA; } else { cs->hw.hfcsx.sctrl &= ~SCTRL_B1_ENA; cs->hw.hfcsx.sctrl_r &= ~SCTRL_B1_ENA; } if (fifo2) { cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC); } else { cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC); } break; case (L1_MODE_TRANS): if (bc) { cs->hw.hfcsx.sctrl |= SCTRL_B2_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B2_ENA; } else { cs->hw.hfcsx.sctrl |= SCTRL_B1_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B1_ENA; } if (fifo2) { cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC); cs->hw.hfcsx.ctmt |= 2; cs->hw.hfcsx.conn &= ~0x18; } else { cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC); cs->hw.hfcsx.ctmt |= 1; cs->hw.hfcsx.conn &= ~0x03; } break; case (L1_MODE_HDLC): if (bc) { cs->hw.hfcsx.sctrl |= SCTRL_B2_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B2_ENA; } else { cs->hw.hfcsx.sctrl |= SCTRL_B1_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B1_ENA; } if (fifo2) { cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC); cs->hw.hfcsx.ctmt &= ~2; cs->hw.hfcsx.conn &= ~0x18; } else { cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC); cs->hw.hfcsx.ctmt &= ~1; cs->hw.hfcsx.conn &= ~0x03; } break; case (L1_MODE_EXTRN): if (bc) { cs->hw.hfcsx.conn |= 0x10; cs->hw.hfcsx.sctrl |= SCTRL_B2_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B2_ENA; cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC); } else { cs->hw.hfcsx.conn |= 0x02; cs->hw.hfcsx.sctrl |= SCTRL_B1_ENA; cs->hw.hfcsx.sctrl_r |= SCTRL_B1_ENA; cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC); } break; } Write_hfc(cs, HFCSX_SCTRL_E, cs->hw.hfcsx.sctrl_e); Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl); Write_hfc(cs, HFCSX_SCTRL_R, cs->hw.hfcsx.sctrl_r); Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt); Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn); if (mode != L1_MODE_EXTRN) { reset_fifo(cs, fifo2 ? HFCSX_SEL_B2_RX : HFCSX_SEL_B1_RX); reset_fifo(cs, fifo2 ? HFCSX_SEL_B2_TX : HFCSX_SEL_B1_TX); } } /******************************/ /* Layer2 -> Layer 1 Transfer */ /******************************/ static void hfcsx_l2l1(struct PStack *st, int pr, void *arg) { struct BCState *bcs = st->l1.bcs; struct sk_buff *skb = arg; u_long flags; switch (pr) { case (PH_DATA | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { skb_queue_tail(&bcs->squeue, skb); } else { bcs->tx_skb = skb; // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n"); } else { // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->tx_skb = skb; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | REQUEST): if (!bcs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (PH_ACTIVATE | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag); mode_hfcsx(bcs, st->l1.mode, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | REQUEST): l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | CONFIRM): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag); test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); mode_hfcsx(bcs, 0, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL); break; } } /******************************************/ /* deactivate B-channel access and queues */ /******************************************/ static void close_hfcsx(struct BCState *bcs) { mode_hfcsx(bcs, 0, bcs->channel); if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) { skb_queue_purge(&bcs->rqueue); skb_queue_purge(&bcs->squeue); if (bcs->tx_skb) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } } /*************************************/ /* init B-channel queues and control */ /*************************************/ static int open_hfcsxstate(struct IsdnCardState *cs, struct BCState *bcs) { if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) { skb_queue_head_init(&bcs->rqueue); skb_queue_head_init(&bcs->squeue); } bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); bcs->event = 0; bcs->tx_cnt = 0; return (0); } /*********************************/ /* inits the stack for B-channel */ /*********************************/ static int setstack_2b(struct PStack *st, struct BCState *bcs) { bcs->channel = st->l1.bc; if (open_hfcsxstate(st->l1.hardware, bcs)) return (-1); st->l1.bcs = bcs; st->l2.l2l1 = hfcsx_l2l1; setstack_manager(st); bcs->st = st; setstack_l1_B(st); return (0); } /***************************/ /* handle L1 state changes */ /***************************/ static void hfcsx_bh(struct work_struct *work) { struct IsdnCardState *cs = container_of(work, struct IsdnCardState, tqueue); u_long flags; if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { if (!cs->hw.hfcsx.nt_mode) switch (cs->dc.hfcsx.ph_state) { case (0): l1_msg(cs, HW_RESET | INDICATION, NULL); break; case (3): l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL); break; case (8): l1_msg(cs, HW_RSYNC | INDICATION, NULL); break; case (6): l1_msg(cs, HW_INFO2 | INDICATION, NULL); break; case (7): l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL); break; default: break; } else { switch (cs->dc.hfcsx.ph_state) { case (2): spin_lock_irqsave(&cs->lock, flags); if (cs->hw.hfcsx.nt_timer < 0) { cs->hw.hfcsx.nt_timer = 0; cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); /* Clear already pending ints */ if (Read_hfc(cs, HFCSX_INT_S1)); Write_hfc(cs, HFCSX_STATES, 4 | HFCSX_LOAD_STATE); udelay(10); Write_hfc(cs, HFCSX_STATES, 4); cs->dc.hfcsx.ph_state = 4; } else { cs->hw.hfcsx.int_m1 |= HFCSX_INTS_TIMER; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); cs->hw.hfcsx.ctmt &= ~HFCSX_AUTO_TIMER; cs->hw.hfcsx.ctmt |= HFCSX_TIM3_125; Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt | HFCSX_CLTIMER); Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt | HFCSX_CLTIMER); cs->hw.hfcsx.nt_timer = NT_T1_COUNT; Write_hfc(cs, HFCSX_STATES, 2 | HFCSX_NT_G2_G3); /* allow G2 -> G3 transition */ } spin_unlock_irqrestore(&cs->lock, flags); break; case (1): case (3): case (4): spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcsx.nt_timer = 0; cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); spin_unlock_irqrestore(&cs->lock, flags); break; default: break; } } } if (test_and_clear_bit(D_RCVBUFREADY, &cs->event)) DChannel_proc_rcv(cs); if (test_and_clear_bit(D_XMTBUFREADY, &cs->event)) DChannel_proc_xmt(cs); } /********************************/ /* called for card init message */ /********************************/ static void inithfcsx(struct IsdnCardState *cs) { cs->setstack_d = setstack_hfcsx; cs->BC_Send_Data = &hfcsx_send_data; cs->bcs[0].BC_SetStack = setstack_2b; cs->bcs[1].BC_SetStack = setstack_2b; cs->bcs[0].BC_Close = close_hfcsx; cs->bcs[1].BC_Close = close_hfcsx; mode_hfcsx(cs->bcs, 0, 0); mode_hfcsx(cs->bcs + 1, 0, 1); } /*******************************************/ /* handle card messages from control layer */ /*******************************************/ static int hfcsx_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "HFCSX: card_msg %x", mt); switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_hfcsx(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: release_io_hfcsx(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); inithfcsx(cs); spin_unlock_irqrestore(&cs->lock, flags); msleep(80); /* Timeout 80ms */ /* now switch timer interrupt off */ spin_lock_irqsave(&cs->lock, flags); cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); /* reinit mode reg */ Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_TEST: return (0); } return (0); } #ifdef __ISAPNP__ static struct isapnp_device_id hfc_ids[] __devinitdata = { { ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2620), ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2620), (unsigned long) "Teles 16.3c2" }, { 0, } }; static struct isapnp_device_id *ipid __devinitdata = &hfc_ids[0]; static struct pnp_card *pnp_c __devinitdata = NULL; #endif int __devinit setup_hfcsx(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; strcpy(tmp, hfcsx_revision); printk(KERN_INFO "HiSax: HFC-SX driver Rev. %s\n", HiSax_getrev(tmp)); #ifdef __ISAPNP__ if (!card->para[1] && isapnp_present()) { struct pnp_dev *pnp_d; while (ipid->card_vendor) { if ((pnp_c = pnp_find_card(ipid->card_vendor, ipid->card_device, pnp_c))) { pnp_d = NULL; if ((pnp_d = pnp_find_dev(pnp_c, ipid->vendor, ipid->function, pnp_d))) { int err; printk(KERN_INFO "HiSax: %s detected\n", (char *)ipid->driver_data); pnp_disable_dev(pnp_d); err = pnp_activate_dev(pnp_d); if (err < 0) { printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n", __func__, err); return (0); } card->para[1] = pnp_port_start(pnp_d, 0); card->para[0] = pnp_irq(pnp_d, 0); if (!card->para[0] || !card->para[1]) { printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n", card->para[0], card->para[1]); pnp_disable_dev(pnp_d); return (0); } break; } else { printk(KERN_ERR "HFC PnP: PnP error card found, no device\n"); } } ipid++; pnp_c = NULL; } if (!ipid->card_vendor) { printk(KERN_INFO "HFC PnP: no ISAPnP card found\n"); return (0); } } #endif cs->hw.hfcsx.base = card->para[1] & 0xfffe; cs->irq = card->para[0]; cs->hw.hfcsx.int_s1 = 0; cs->dc.hfcsx.ph_state = 0; cs->hw.hfcsx.fifo = 255; if ((cs->typ == ISDN_CTYPE_HFC_SX) || (cs->typ == ISDN_CTYPE_HFC_SP_PCMCIA)) { if ((!cs->hw.hfcsx.base) || !request_region(cs->hw.hfcsx.base, 2, "HFCSX isdn")) { printk(KERN_WARNING "HiSax: HFC-SX io-base %#lx already in use\n", cs->hw.hfcsx.base); return (0); } byteout(cs->hw.hfcsx.base, cs->hw.hfcsx.base & 0xFF); byteout(cs->hw.hfcsx.base + 1, ((cs->hw.hfcsx.base >> 8) & 3) | 0x54); udelay(10); cs->hw.hfcsx.chip = Read_hfc(cs, HFCSX_CHIP_ID); switch (cs->hw.hfcsx.chip >> 4) { case 1: tmp[0] = '+'; break; case 9: tmp[0] = 'P'; break; default: printk(KERN_WARNING "HFC-SX: invalid chip id 0x%x\n", cs->hw.hfcsx.chip >> 4); release_region(cs->hw.hfcsx.base, 2); return (0); } if (!ccd_sp_irqtab[cs->irq & 0xF]) { printk(KERN_WARNING "HFC_SX: invalid irq %d specified\n", cs->irq & 0xF); release_region(cs->hw.hfcsx.base, 2); return (0); } if (!(cs->hw.hfcsx.extra = (void *) kmalloc(sizeof(struct hfcsx_extra), GFP_ATOMIC))) { release_region(cs->hw.hfcsx.base, 2); printk(KERN_WARNING "HFC-SX: unable to allocate memory\n"); return (0); } printk(KERN_INFO "HFC-S%c chip detected at base 0x%x IRQ %d HZ %d\n", tmp[0], (u_int) cs->hw.hfcsx.base, cs->irq, HZ); cs->hw.hfcsx.int_m2 = 0; /* disable alle interrupts */ cs->hw.hfcsx.int_m1 = 0; Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1); Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2); } else return (0); /* no valid card type */ cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); INIT_WORK(&cs->tqueue, hfcsx_bh); cs->readisac = NULL; cs->writeisac = NULL; cs->readisacfifo = NULL; cs->writeisacfifo = NULL; cs->BC_Read_Reg = NULL; cs->BC_Write_Reg = NULL; cs->irq_func = &hfcsx_interrupt; cs->hw.hfcsx.timer.function = (void *) hfcsx_Timer; cs->hw.hfcsx.timer.data = (long) cs; cs->hw.hfcsx.b_fifo_size = 0; /* fifo size still unknown */ cs->hw.hfcsx.cirm = ccd_sp_irqtab[cs->irq & 0xF]; /* RAM not evaluated */ init_timer(&cs->hw.hfcsx.timer); reset_hfcsx(cs); cs->cardmsg = &hfcsx_card_msg; cs->auxcmd = &hfcsx_auxcmd; return (1); }
gpl-2.0
NamelessRom/android_kernel_lge_hammerhead
drivers/scsi/pcmcia/nsp_cs.c
5240
46656
/*====================================================================== NinjaSCSI-3 / NinjaSCSI-32Bi PCMCIA SCSI host adapter card driver By: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp> Ver.2.8 Support 32bit MMIO mode Support Synchronous Data Transfer Request (SDTR) mode Ver.2.0 Support 32bit PIO mode Ver.1.1.2 Fix for scatter list buffer exceeds Ver.1.1 Support scatter list Ver.0.1 Initial version This software may be used and distributed according to the terms of the GNU General Public License. ======================================================================*/ /*********************************************************************** This driver is for these PCcards. I-O DATA PCSC-F (Workbit NinjaSCSI-3) "WBT", "NinjaSCSI-3", "R1.0" I-O DATA CBSC-II (Workbit NinjaSCSI-32Bi in 16bit mode) "IO DATA", "CBSC16 ", "1" ***********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/stat.h> #include <asm/io.h> #include <asm/irq.h> #include <../drivers/scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include <scsi/scsi_ioctl.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include "nsp_cs.h" MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>"); MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module"); MODULE_SUPPORTED_DEVICE("sd,sr,sg,st"); #ifdef MODULE_LICENSE MODULE_LICENSE("GPL"); #endif #include "nsp_io.h" /*====================================================================*/ /* Parameters that can be set with 'insmod' */ static int nsp_burst_mode = BURST_MEM32; module_param(nsp_burst_mode, int, 0); MODULE_PARM_DESC(nsp_burst_mode, "Burst transfer mode (0=io8, 1=io32, 2=mem32(default))"); /* Release IO ports after configuration? */ static bool free_ports = 0; module_param(free_ports, bool, 0); MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))"); static struct scsi_host_template nsp_driver_template = { .proc_name = "nsp_cs", .proc_info = nsp_proc_info, .name = "WorkBit NinjaSCSI-3/32Bi(16bit)", .info = nsp_info, .queuecommand = nsp_queuecommand, /* .eh_abort_handler = nsp_eh_abort,*/ .eh_bus_reset_handler = nsp_eh_bus_reset, .eh_host_reset_handler = nsp_eh_host_reset, .can_queue = 1, .this_id = NSP_INITIATOR_ID, .sg_tablesize = SG_ALL, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, }; static nsp_hw_data nsp_data_base; /* attach <-> detect glue */ /* * debug, error print */ #ifndef NSP_DEBUG # define NSP_DEBUG_MASK 0x000000 # define nsp_msg(type, args...) nsp_cs_message("", 0, (type), args) # define nsp_dbg(mask, args...) /* */ #else # define NSP_DEBUG_MASK 0xffffff # define nsp_msg(type, args...) \ nsp_cs_message (__func__, __LINE__, (type), args) # define nsp_dbg(mask, args...) \ nsp_cs_dmessage(__func__, __LINE__, (mask), args) #endif #define NSP_DEBUG_QUEUECOMMAND BIT(0) #define NSP_DEBUG_REGISTER BIT(1) #define NSP_DEBUG_AUTOSCSI BIT(2) #define NSP_DEBUG_INTR BIT(3) #define NSP_DEBUG_SGLIST BIT(4) #define NSP_DEBUG_BUSFREE BIT(5) #define NSP_DEBUG_CDB_CONTENTS BIT(6) #define NSP_DEBUG_RESELECTION BIT(7) #define NSP_DEBUG_MSGINOCCUR BIT(8) #define NSP_DEBUG_EEPROM BIT(9) #define NSP_DEBUG_MSGOUTOCCUR BIT(10) #define NSP_DEBUG_BUSRESET BIT(11) #define NSP_DEBUG_RESTART BIT(12) #define NSP_DEBUG_SYNC BIT(13) #define NSP_DEBUG_WAIT BIT(14) #define NSP_DEBUG_TARGETFLAG BIT(15) #define NSP_DEBUG_PROC BIT(16) #define NSP_DEBUG_INIT BIT(17) #define NSP_DEBUG_DATA_IO BIT(18) #define NSP_SPECIAL_PRINT_REGISTER BIT(20) #define NSP_DEBUG_BUF_LEN 150 static inline void nsp_inc_resid(struct scsi_cmnd *SCpnt, int residInc) { scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) + residInc); } static void nsp_cs_message(const char *func, int line, char *type, char *fmt, ...) { va_list args; char buf[NSP_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); #ifndef NSP_DEBUG printk("%snsp_cs: %s\n", type, buf); #else printk("%snsp_cs: %s (%d): %s\n", type, func, line, buf); #endif } #ifdef NSP_DEBUG static void nsp_cs_dmessage(const char *func, int line, int mask, char *fmt, ...) { va_list args; char buf[NSP_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (mask & NSP_DEBUG_MASK) { printk("nsp_cs-debug: 0x%x %s (%d): %s\n", mask, func, line, buf); } } #endif /***********************************************************/ /*==================================================== * Clenaup parameters and call done() functions. * You must be set SCpnt->result before call this function. */ static void nsp_scsi_done(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; data->CurrentSC = NULL; SCpnt->scsi_done(SCpnt); } static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { #ifdef NSP_DEBUG /*unsigned int host_id = SCpnt->device->host->this_id;*/ /*unsigned int base = SCpnt->device->host->io_port;*/ unsigned char target = scmd_id(SCpnt); #endif nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "SCpnt=0x%p target=%d lun=%d sglist=0x%p bufflen=%d sg_count=%d", SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt), scsi_bufflen(SCpnt), scsi_sg_count(SCpnt)); //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC); SCpnt->scsi_done = done; if (data->CurrentSC != NULL) { nsp_msg(KERN_DEBUG, "CurrentSC!=NULL this can't be happen"); SCpnt->result = DID_BAD_TARGET << 16; nsp_scsi_done(SCpnt); return 0; } #if 0 /* XXX: pcmcia-cs generates SCSI command with "scsi_info" utility. This makes kernel crash when suspending... */ if (data->ScsiInfo->stop != 0) { nsp_msg(KERN_INFO, "suspending device. reject command."); SCpnt->result = DID_BAD_TARGET << 16; nsp_scsi_done(SCpnt); return SCSI_MLQUEUE_HOST_BUSY; } #endif show_command(SCpnt); data->CurrentSC = SCpnt; SCpnt->SCp.Status = CHECK_CONDITION; SCpnt->SCp.Message = 0; SCpnt->SCp.have_data_in = IO_UNKNOWN; SCpnt->SCp.sent_command = 0; SCpnt->SCp.phase = PH_UNDETERMINED; scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); /* setup scratch area SCp.ptr : buffer pointer SCp.this_residual : buffer length SCp.buffer : next buffer SCp.buffers_residual : left buffers in list SCp.phase : current state of the command */ if (scsi_bufflen(SCpnt)) { SCpnt->SCp.buffer = scsi_sglist(SCpnt); SCpnt->SCp.ptr = BUFFER_ADDR; SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1; } else { SCpnt->SCp.ptr = NULL; SCpnt->SCp.this_residual = 0; SCpnt->SCp.buffer = NULL; SCpnt->SCp.buffers_residual = 0; } if (nsphw_start_selection(SCpnt) == FALSE) { nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "selection fail"); SCpnt->result = DID_BUS_BUSY << 16; nsp_scsi_done(SCpnt); return 0; } //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "out"); #ifdef NSP_DEBUG data->CmdId++; #endif return 0; } static DEF_SCSI_QCMD(nsp_queuecommand) /* * setup PIO FIFO transfer mode and enable/disable to data out */ static void nsp_setup_fifo(nsp_hw_data *data, int enabled) { unsigned int base = data->BaseAddress; unsigned char transfer_mode_reg; //nsp_dbg(NSP_DEBUG_DATA_IO, "enabled=%d", enabled); if (enabled != FALSE) { transfer_mode_reg = TRANSFER_GO | BRAIND; } else { transfer_mode_reg = 0; } transfer_mode_reg |= data->TransferMode; nsp_index_write(base, TRANSFERMODE, transfer_mode_reg); } static void nsphw_init_sync(nsp_hw_data *data) { sync_data tmp_sync = { .SyncNegotiation = SYNC_NOT_YET, .SyncPeriod = 0, .SyncOffset = 0 }; int i; /* setup sync data */ for ( i = 0; i < ARRAY_SIZE(data->Sync); i++ ) { data->Sync[i] = tmp_sync; } } /* * Initialize Ninja hardware */ static int nsphw_init(nsp_hw_data *data) { unsigned int base = data->BaseAddress; nsp_dbg(NSP_DEBUG_INIT, "in base=0x%x", base); data->ScsiClockDiv = CLOCK_40M | FAST_20; data->CurrentSC = NULL; data->FifoCount = 0; data->TransferMode = MODE_IO8; nsphw_init_sync(data); /* block all interrupts */ nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK); /* setup SCSI interface */ nsp_write(base, IFSELECT, IF_IFSEL); nsp_index_write(base, SCSIIRQMODE, 0); nsp_index_write(base, TRANSFERMODE, MODE_IO8); nsp_index_write(base, CLOCKDIV, data->ScsiClockDiv); nsp_index_write(base, PARITYCTRL, 0); nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER_CLEAR | REQ_COUNTER_CLEAR | HOST_COUNTER_CLEAR); /* setup fifo asic */ nsp_write(base, IFSELECT, IF_REGSEL); nsp_index_write(base, TERMPWRCTRL, 0); if ((nsp_index_read(base, OTHERCONTROL) & TPWR_SENSE) == 0) { nsp_msg(KERN_INFO, "terminator power on"); nsp_index_write(base, TERMPWRCTRL, POWER_ON); } nsp_index_write(base, TIMERCOUNT, 0); nsp_index_write(base, TIMERCOUNT, 0); /* requires 2 times!! */ nsp_index_write(base, SYNCREG, 0); nsp_index_write(base, ACKWIDTH, 0); /* enable interrupts and ack them */ nsp_index_write(base, SCSIIRQMODE, SCSI_PHASE_CHANGE_EI | RESELECT_EI | SCSI_RESET_IRQ_EI ); nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR); nsp_setup_fifo(data, FALSE); return TRUE; } /* * Start selection phase */ static int nsphw_start_selection(struct scsi_cmnd *SCpnt) { unsigned int host_id = SCpnt->device->host->this_id; unsigned int base = SCpnt->device->host->io_port; unsigned char target = scmd_id(SCpnt); nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; int time_out; unsigned char phase, arbit; //nsp_dbg(NSP_DEBUG_RESELECTION, "in"); phase = nsp_index_read(base, SCSIBUSMON); if(phase != BUSMON_BUS_FREE) { //nsp_dbg(NSP_DEBUG_RESELECTION, "bus busy"); return FALSE; } /* start arbitration */ //nsp_dbg(NSP_DEBUG_RESELECTION, "start arbit"); SCpnt->SCp.phase = PH_ARBSTART; nsp_index_write(base, SETARBIT, ARBIT_GO); time_out = 1000; do { /* XXX: what a stupid chip! */ arbit = nsp_index_read(base, ARBITSTATUS); //nsp_dbg(NSP_DEBUG_RESELECTION, "arbit=%d, wait_count=%d", arbit, wait_count); udelay(1); /* hold 1.2us */ } while((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 && (time_out-- != 0)); if (!(arbit & ARBIT_WIN)) { //nsp_dbg(NSP_DEBUG_RESELECTION, "arbit fail"); nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR); return FALSE; } /* assert select line */ //nsp_dbg(NSP_DEBUG_RESELECTION, "assert SEL line"); SCpnt->SCp.phase = PH_SELSTART; udelay(3); /* wait 2.4us */ nsp_index_write(base, SCSIDATALATCH, BIT(host_id) | BIT(target)); nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_ATN); udelay(2); /* wait >1.2us */ nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_DATAOUT_ENB | SCSI_ATN); nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR); /*udelay(1);*/ /* wait >90ns */ nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_DATAOUT_ENB | SCSI_ATN); /* check selection timeout */ nsp_start_timer(SCpnt, 1000/51); data->SelectionTimeOut = 1; return TRUE; } struct nsp_sync_table { unsigned int min_period; unsigned int max_period; unsigned int chip_period; unsigned int ack_width; }; static struct nsp_sync_table nsp_sync_table_40M[] = { {0x0c, 0x0c, 0x1, 0}, /* 20MB 50ns*/ {0x19, 0x19, 0x3, 1}, /* 10MB 100ns*/ {0x1a, 0x25, 0x5, 2}, /* 7.5MB 150ns*/ {0x26, 0x32, 0x7, 3}, /* 5MB 200ns*/ { 0, 0, 0, 0}, }; static struct nsp_sync_table nsp_sync_table_20M[] = { {0x19, 0x19, 0x1, 0}, /* 10MB 100ns*/ {0x1a, 0x25, 0x2, 0}, /* 7.5MB 150ns*/ {0x26, 0x32, 0x3, 1}, /* 5MB 200ns*/ { 0, 0, 0, 0}, }; /* * setup synchronous data transfer mode */ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt) { unsigned char target = scmd_id(SCpnt); // unsigned char lun = SCpnt->device->lun; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; sync_data *sync = &(data->Sync[target]); struct nsp_sync_table *sync_table; unsigned int period, offset; int i; nsp_dbg(NSP_DEBUG_SYNC, "in"); period = sync->SyncPeriod; offset = sync->SyncOffset; nsp_dbg(NSP_DEBUG_SYNC, "period=0x%x, offset=0x%x", period, offset); if ((data->ScsiClockDiv & (BIT(0)|BIT(1))) == CLOCK_20M) { sync_table = nsp_sync_table_20M; } else { sync_table = nsp_sync_table_40M; } for ( i = 0; sync_table->max_period != 0; i++, sync_table++) { if ( period >= sync_table->min_period && period <= sync_table->max_period ) { break; } } if (period != 0 && sync_table->max_period == 0) { /* * No proper period/offset found */ nsp_dbg(NSP_DEBUG_SYNC, "no proper period/offset"); sync->SyncPeriod = 0; sync->SyncOffset = 0; sync->SyncRegister = 0; sync->AckWidth = 0; return FALSE; } sync->SyncRegister = (sync_table->chip_period << SYNCREG_PERIOD_SHIFT) | (offset & SYNCREG_OFFSET_MASK); sync->AckWidth = sync_table->ack_width; nsp_dbg(NSP_DEBUG_SYNC, "sync_reg=0x%x, ack_width=0x%x", sync->SyncRegister, sync->AckWidth); return TRUE; } /* * start ninja hardware timer */ static void nsp_start_timer(struct scsi_cmnd *SCpnt, int time) { unsigned int base = SCpnt->device->host->io_port; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; //nsp_dbg(NSP_DEBUG_INTR, "in SCpnt=0x%p, time=%d", SCpnt, time); data->TimerCount = time; nsp_index_write(base, TIMERCOUNT, time); } /* * wait for bus phase change */ static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask, char *str) { unsigned int base = SCpnt->device->host->io_port; unsigned char reg; int time_out; //nsp_dbg(NSP_DEBUG_INTR, "in"); time_out = 100; do { reg = nsp_index_read(base, SCSIBUSMON); if (reg == 0xff) { break; } } while ((--time_out != 0) && (reg & mask) != 0); if (time_out == 0) { nsp_msg(KERN_DEBUG, " %s signal off timeut", str); } return 0; } /* * expect Ninja Irq */ static int nsp_expect_signal(struct scsi_cmnd *SCpnt, unsigned char current_phase, unsigned char mask) { unsigned int base = SCpnt->device->host->io_port; int time_out; unsigned char phase, i_src; //nsp_dbg(NSP_DEBUG_INTR, "current_phase=0x%x, mask=0x%x", current_phase, mask); time_out = 100; do { phase = nsp_index_read(base, SCSIBUSMON); if (phase == 0xff) { //nsp_dbg(NSP_DEBUG_INTR, "ret -1"); return -1; } i_src = nsp_read(base, IRQSTATUS); if (i_src & IRQSTATUS_SCSI) { //nsp_dbg(NSP_DEBUG_INTR, "ret 0 found scsi signal"); return 0; } if ((phase & mask) != 0 && (phase & BUSMON_PHASE_MASK) == current_phase) { //nsp_dbg(NSP_DEBUG_INTR, "ret 1 phase=0x%x", phase); return 1; } } while(time_out-- != 0); //nsp_dbg(NSP_DEBUG_INTR, "timeout"); return -1; } /* * transfer SCSI message */ static int nsp_xfer(struct scsi_cmnd *SCpnt, int phase) { unsigned int base = SCpnt->device->host->io_port; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; char *buf = data->MsgBuffer; int len = min(MSGBUF_SIZE, data->MsgLen); int ptr; int ret; //nsp_dbg(NSP_DEBUG_DATA_IO, "in"); for (ptr = 0; len > 0; len--, ptr++) { ret = nsp_expect_signal(SCpnt, phase, BUSMON_REQ); if (ret <= 0) { nsp_dbg(NSP_DEBUG_DATA_IO, "xfer quit"); return 0; } /* if last byte, negate ATN */ if (len == 1 && SCpnt->SCp.phase == PH_MSG_OUT) { nsp_index_write(base, SCSIBUSCTRL, AUTODIRECTION | ACKENB); } /* read & write message */ if (phase & BUSMON_IO) { nsp_dbg(NSP_DEBUG_DATA_IO, "read msg"); buf[ptr] = nsp_index_read(base, SCSIDATAWITHACK); } else { nsp_dbg(NSP_DEBUG_DATA_IO, "write msg"); nsp_index_write(base, SCSIDATAWITHACK, buf[ptr]); } nsp_negate_signal(SCpnt, BUSMON_ACK, "xfer<ack>"); } return len; } /* * get extra SCSI data from fifo */ static int nsp_dataphase_bypass(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; unsigned int count; //nsp_dbg(NSP_DEBUG_DATA_IO, "in"); if (SCpnt->SCp.have_data_in != IO_IN) { return 0; } count = nsp_fifo_count(SCpnt); if (data->FifoCount == count) { //nsp_dbg(NSP_DEBUG_DATA_IO, "not use bypass quirk"); return 0; } /* * XXX: NSP_QUIRK * data phase skip only occures in case of SCSI_LOW_READ */ nsp_dbg(NSP_DEBUG_DATA_IO, "use bypass quirk"); SCpnt->SCp.phase = PH_DATA; nsp_pio_read(SCpnt); nsp_setup_fifo(data, FALSE); return 0; } /* * accept reselection */ static int nsp_reselected(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned int host_id = SCpnt->device->host->this_id; //nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; unsigned char bus_reg; unsigned char id_reg, tmp; int target; nsp_dbg(NSP_DEBUG_RESELECTION, "in"); id_reg = nsp_index_read(base, RESELECTID); tmp = id_reg & (~BIT(host_id)); target = 0; while(tmp != 0) { if (tmp & BIT(0)) { break; } tmp >>= 1; target++; } if (scmd_id(SCpnt) != target) { nsp_msg(KERN_ERR, "XXX: reselect ID must be %d in this implementation.", target); } nsp_negate_signal(SCpnt, BUSMON_SEL, "reselect<SEL>"); nsp_nexus(SCpnt); bus_reg = nsp_index_read(base, SCSIBUSCTRL) & ~(SCSI_BSY | SCSI_ATN); nsp_index_write(base, SCSIBUSCTRL, bus_reg); nsp_index_write(base, SCSIBUSCTRL, bus_reg | AUTODIRECTION | ACKENB); return TRUE; } /* * count how many data transferd */ static int nsp_fifo_count(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned int count; unsigned int l, m, h, dummy; nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER); l = nsp_index_read(base, TRANSFERCOUNT); m = nsp_index_read(base, TRANSFERCOUNT); h = nsp_index_read(base, TRANSFERCOUNT); dummy = nsp_index_read(base, TRANSFERCOUNT); /* required this! */ count = (h << 16) | (m << 8) | (l << 0); //nsp_dbg(NSP_DEBUG_DATA_IO, "count=0x%x", count); return count; } /* fifo size */ #define RFIFO_CRIT 64 #define WFIFO_CRIT 64 /* * read data in DATA IN phase */ static void nsp_pio_read(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned long mmio_base = SCpnt->device->host->base; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; long time_out; int ocount, res; unsigned char stat, fifo_stat; ocount = data->FifoCount; nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d", SCpnt, scsi_get_resid(SCpnt), ocount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual); time_out = 1000; while ((time_out-- != 0) && (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0 ) ) { stat = nsp_index_read(base, SCSIBUSMON); stat &= BUSMON_PHASE_MASK; res = nsp_fifo_count(SCpnt) - ocount; //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount, res); if (res == 0) { /* if some data available ? */ if (stat == BUSPHASE_DATA_IN) { /* phase changed? */ //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", SCpnt->SCp.this_residual); continue; } else { nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x", stat); break; } } fifo_stat = nsp_read(base, FIFOSTATUS); if ((fifo_stat & FIFOSTATUS_FULL_EMPTY) == 0 && stat == BUSPHASE_DATA_IN) { continue; } res = min(res, SCpnt->SCp.this_residual); switch (data->TransferMode) { case MODE_IO32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_fifo32_read(base, SCpnt->SCp.ptr, res >> 2); break; case MODE_IO8: nsp_fifo8_read (base, SCpnt->SCp.ptr, res ); break; case MODE_MEM32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_mmio_fifo32_read(mmio_base, SCpnt->SCp.ptr, res >> 2); break; default: nsp_dbg(NSP_DEBUG_DATA_IO, "unknown read mode"); return; } nsp_inc_resid(SCpnt, -res); SCpnt->SCp.ptr += res; SCpnt->SCp.this_residual -= res; ocount += res; //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount); /* go to next scatter list if available */ if (SCpnt->SCp.this_residual == 0 && SCpnt->SCp.buffers_residual != 0 ) { //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out); SCpnt->SCp.buffers_residual--; SCpnt->SCp.buffer++; SCpnt->SCp.ptr = BUFFER_ADDR; SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; time_out = 1000; //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", SCpnt->SCp.buffer->page, SCpnt->SCp.buffer->offset); } } data->FifoCount = ocount; if (time_out < 0) { nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d", scsi_get_resid(SCpnt), SCpnt->SCp.this_residual, SCpnt->SCp.buffers_residual); } nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount); nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId, scsi_get_resid(SCpnt)); } /* * write data in DATA OUT phase */ static void nsp_pio_write(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned long mmio_base = SCpnt->device->host->base; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; int time_out; int ocount, res; unsigned char stat; ocount = data->FifoCount; nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x", data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual, scsi_get_resid(SCpnt)); time_out = 1000; while ((time_out-- != 0) && (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0)) { stat = nsp_index_read(base, SCSIBUSMON); stat &= BUSMON_PHASE_MASK; if (stat != BUSPHASE_DATA_OUT) { res = ocount - nsp_fifo_count(SCpnt); nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res); /* Put back pointer */ nsp_inc_resid(SCpnt, res); SCpnt->SCp.ptr -= res; SCpnt->SCp.this_residual += res; ocount -= res; break; } res = ocount - nsp_fifo_count(SCpnt); if (res > 0) { /* write all data? */ nsp_dbg(NSP_DEBUG_DATA_IO, "wait for all data out. ocount=0x%x res=%d", ocount, res); continue; } res = min(SCpnt->SCp.this_residual, WFIFO_CRIT); //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, res); switch (data->TransferMode) { case MODE_IO32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_fifo32_write(base, SCpnt->SCp.ptr, res >> 2); break; case MODE_IO8: nsp_fifo8_write (base, SCpnt->SCp.ptr, res ); break; case MODE_MEM32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_mmio_fifo32_write(mmio_base, SCpnt->SCp.ptr, res >> 2); break; default: nsp_dbg(NSP_DEBUG_DATA_IO, "unknown write mode"); break; } nsp_inc_resid(SCpnt, -res); SCpnt->SCp.ptr += res; SCpnt->SCp.this_residual -= res; ocount += res; /* go to next scatter list if available */ if (SCpnt->SCp.this_residual == 0 && SCpnt->SCp.buffers_residual != 0 ) { //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next"); SCpnt->SCp.buffers_residual--; SCpnt->SCp.buffer++; SCpnt->SCp.ptr = BUFFER_ADDR; SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; time_out = 1000; } } data->FifoCount = ocount; if (time_out < 0) { nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", scsi_get_resid(SCpnt)); } nsp_dbg(NSP_DEBUG_DATA_IO, "write ocount=0x%x", ocount); nsp_dbg(NSP_DEBUG_DATA_IO, "w cmd=%d resid=0x%x\n", data->CmdId, scsi_get_resid(SCpnt)); } #undef RFIFO_CRIT #undef WFIFO_CRIT /* * setup synchronous/asynchronous data transfer mode */ static int nsp_nexus(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned char target = scmd_id(SCpnt); // unsigned char lun = SCpnt->device->lun; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; sync_data *sync = &(data->Sync[target]); //nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p", SCpnt); /* setup synch transfer registers */ nsp_index_write(base, SYNCREG, sync->SyncRegister); nsp_index_write(base, ACKWIDTH, sync->AckWidth); if (scsi_get_resid(SCpnt) % 4 != 0 || scsi_get_resid(SCpnt) <= PAGE_SIZE ) { data->TransferMode = MODE_IO8; } else if (nsp_burst_mode == BURST_MEM32) { data->TransferMode = MODE_MEM32; } else if (nsp_burst_mode == BURST_IO32) { data->TransferMode = MODE_IO32; } else { data->TransferMode = MODE_IO8; } /* setup pdma fifo */ nsp_setup_fifo(data, TRUE); /* clear ack counter */ data->FifoCount = 0; nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER_CLEAR | REQ_COUNTER_CLEAR | HOST_COUNTER_CLEAR); return 0; } #include "nsp_message.c" /* * interrupt handler */ static irqreturn_t nspintr(int irq, void *dev_id) { unsigned int base; unsigned char irq_status, irq_phase, phase; struct scsi_cmnd *tmpSC; unsigned char target, lun; unsigned int *sync_neg; int i, tmp; nsp_hw_data *data; //nsp_dbg(NSP_DEBUG_INTR, "dev_id=0x%p", dev_id); //nsp_dbg(NSP_DEBUG_INTR, "host=0x%p", ((scsi_info_t *)dev_id)->host); if ( dev_id != NULL && ((scsi_info_t *)dev_id)->host != NULL ) { scsi_info_t *info = (scsi_info_t *)dev_id; data = (nsp_hw_data *)info->host->hostdata; } else { nsp_dbg(NSP_DEBUG_INTR, "host data wrong"); return IRQ_NONE; } //nsp_dbg(NSP_DEBUG_INTR, "&nsp_data_base=0x%p, dev_id=0x%p", &nsp_data_base, dev_id); base = data->BaseAddress; //nsp_dbg(NSP_DEBUG_INTR, "base=0x%x", base); /* * interrupt check */ nsp_write(base, IRQCONTROL, IRQCONTROL_IRQDISABLE); irq_status = nsp_read(base, IRQSTATUS); //nsp_dbg(NSP_DEBUG_INTR, "irq_status=0x%x", irq_status); if ((irq_status == 0xff) || ((irq_status & IRQSTATUS_MASK) == 0)) { nsp_write(base, IRQCONTROL, 0); //nsp_dbg(NSP_DEBUG_INTR, "no irq/shared irq"); return IRQ_NONE; } /* XXX: IMPORTANT * Do not read an irq_phase register if no scsi phase interrupt. * Unless, you should lose a scsi phase interrupt. */ phase = nsp_index_read(base, SCSIBUSMON); if((irq_status & IRQSTATUS_SCSI) != 0) { irq_phase = nsp_index_read(base, IRQPHASESENCE); } else { irq_phase = 0; } //nsp_dbg(NSP_DEBUG_INTR, "irq_phase=0x%x", irq_phase); /* * timer interrupt handler (scsi vs timer interrupts) */ //nsp_dbg(NSP_DEBUG_INTR, "timercount=%d", data->TimerCount); if (data->TimerCount != 0) { //nsp_dbg(NSP_DEBUG_INTR, "stop timer"); nsp_index_write(base, TIMERCOUNT, 0); nsp_index_write(base, TIMERCOUNT, 0); data->TimerCount = 0; } if ((irq_status & IRQSTATUS_MASK) == IRQSTATUS_TIMER && data->SelectionTimeOut == 0) { //nsp_dbg(NSP_DEBUG_INTR, "timer start"); nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR); return IRQ_HANDLED; } nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR | IRQCONTROL_FIFO_CLEAR); if ((irq_status & IRQSTATUS_SCSI) && (irq_phase & SCSI_RESET_IRQ)) { nsp_msg(KERN_ERR, "bus reset (power off?)"); nsphw_init(data); nsp_bus_reset(data); if(data->CurrentSC != NULL) { tmpSC = data->CurrentSC; tmpSC->result = (DID_RESET << 16) | ((tmpSC->SCp.Message & 0xff) << 8) | ((tmpSC->SCp.Status & 0xff) << 0); nsp_scsi_done(tmpSC); } return IRQ_HANDLED; } if (data->CurrentSC == NULL) { nsp_msg(KERN_ERR, "CurrentSC==NULL irq_status=0x%x phase=0x%x irq_phase=0x%x this can't be happen. reset everything", irq_status, phase, irq_phase); nsphw_init(data); nsp_bus_reset(data); return IRQ_HANDLED; } tmpSC = data->CurrentSC; target = tmpSC->device->id; lun = tmpSC->device->lun; sync_neg = &(data->Sync[target].SyncNegotiation); /* * parse hardware SCSI irq reasons register */ if (irq_status & IRQSTATUS_SCSI) { if (irq_phase & RESELECT_IRQ) { nsp_dbg(NSP_DEBUG_INTR, "reselect"); nsp_write(base, IRQCONTROL, IRQCONTROL_RESELECT_CLEAR); if (nsp_reselected(tmpSC) != FALSE) { return IRQ_HANDLED; } } if ((irq_phase & (PHASE_CHANGE_IRQ | LATCHED_BUS_FREE)) == 0) { return IRQ_HANDLED; } } //show_phase(tmpSC); switch(tmpSC->SCp.phase) { case PH_SELSTART: // *sync_neg = SYNC_NOT_YET; if ((phase & BUSMON_BSY) == 0) { //nsp_dbg(NSP_DEBUG_INTR, "selection count=%d", data->SelectionTimeOut); if (data->SelectionTimeOut >= NSP_SELTIMEOUT) { nsp_dbg(NSP_DEBUG_INTR, "selection time out"); data->SelectionTimeOut = 0; nsp_index_write(base, SCSIBUSCTRL, 0); tmpSC->result = DID_TIME_OUT << 16; nsp_scsi_done(tmpSC); return IRQ_HANDLED; } data->SelectionTimeOut += 1; nsp_start_timer(tmpSC, 1000/51); return IRQ_HANDLED; } /* attention assert */ //nsp_dbg(NSP_DEBUG_INTR, "attention assert"); data->SelectionTimeOut = 0; tmpSC->SCp.phase = PH_SELECTED; nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN); udelay(1); nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN | AUTODIRECTION | ACKENB); return IRQ_HANDLED; break; case PH_RESELECT: //nsp_dbg(NSP_DEBUG_INTR, "phase reselect"); // *sync_neg = SYNC_NOT_YET; if ((phase & BUSMON_PHASE_MASK) != BUSPHASE_MESSAGE_IN) { tmpSC->result = DID_ABORT << 16; nsp_scsi_done(tmpSC); return IRQ_HANDLED; } /* fall thru */ default: if ((irq_status & (IRQSTATUS_SCSI | IRQSTATUS_FIFO)) == 0) { return IRQ_HANDLED; } break; } /* * SCSI sequencer */ //nsp_dbg(NSP_DEBUG_INTR, "start scsi seq"); /* normal disconnect */ if (((tmpSC->SCp.phase == PH_MSG_IN) || (tmpSC->SCp.phase == PH_MSG_OUT)) && (irq_phase & LATCHED_BUS_FREE) != 0 ) { nsp_dbg(NSP_DEBUG_INTR, "normal disconnect irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase); //*sync_neg = SYNC_NOT_YET; if ((tmpSC->SCp.Message == MSG_COMMAND_COMPLETE)) { /* all command complete and return status */ tmpSC->result = (DID_OK << 16) | ((tmpSC->SCp.Message & 0xff) << 8) | ((tmpSC->SCp.Status & 0xff) << 0); nsp_dbg(NSP_DEBUG_INTR, "command complete result=0x%x", tmpSC->result); nsp_scsi_done(tmpSC); return IRQ_HANDLED; } return IRQ_HANDLED; } /* check unexpected bus free state */ if (phase == 0) { nsp_msg(KERN_DEBUG, "unexpected bus free. irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase); *sync_neg = SYNC_NG; tmpSC->result = DID_ERROR << 16; nsp_scsi_done(tmpSC); return IRQ_HANDLED; } switch (phase & BUSMON_PHASE_MASK) { case BUSPHASE_COMMAND: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_COMMAND"); if ((phase & BUSMON_REQ) == 0) { nsp_dbg(NSP_DEBUG_INTR, "REQ == 0"); return IRQ_HANDLED; } tmpSC->SCp.phase = PH_COMMAND; nsp_nexus(tmpSC); /* write scsi command */ nsp_dbg(NSP_DEBUG_INTR, "cmd_len=%d", tmpSC->cmd_len); nsp_index_write(base, COMMANDCTRL, CLEAR_COMMAND_POINTER); for (i = 0; i < tmpSC->cmd_len; i++) { nsp_index_write(base, COMMANDDATA, tmpSC->cmnd[i]); } nsp_index_write(base, COMMANDCTRL, CLEAR_COMMAND_POINTER | AUTO_COMMAND_GO); break; case BUSPHASE_DATA_OUT: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_OUT"); tmpSC->SCp.phase = PH_DATA; tmpSC->SCp.have_data_in = IO_OUT; nsp_pio_write(tmpSC); break; case BUSPHASE_DATA_IN: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_IN"); tmpSC->SCp.phase = PH_DATA; tmpSC->SCp.have_data_in = IO_IN; nsp_pio_read(tmpSC); break; case BUSPHASE_STATUS: nsp_dataphase_bypass(tmpSC); nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_STATUS"); tmpSC->SCp.phase = PH_STATUS; tmpSC->SCp.Status = nsp_index_read(base, SCSIDATAWITHACK); nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x", tmpSC->SCp.Message, tmpSC->SCp.Status); break; case BUSPHASE_MESSAGE_OUT: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_MESSAGE_OUT"); if ((phase & BUSMON_REQ) == 0) { goto timer_out; } tmpSC->SCp.phase = PH_MSG_OUT; //*sync_neg = SYNC_NOT_YET; data->MsgLen = i = 0; data->MsgBuffer[i] = IDENTIFY(TRUE, lun); i++; if (*sync_neg == SYNC_NOT_YET) { data->Sync[target].SyncPeriod = 0; data->Sync[target].SyncOffset = 0; /**/ data->MsgBuffer[i] = MSG_EXTENDED; i++; data->MsgBuffer[i] = 3; i++; data->MsgBuffer[i] = MSG_EXT_SDTR; i++; data->MsgBuffer[i] = 0x0c; i++; data->MsgBuffer[i] = 15; i++; /**/ } data->MsgLen = i; nsp_analyze_sdtr(tmpSC); show_message(data); nsp_message_out(tmpSC); break; case BUSPHASE_MESSAGE_IN: nsp_dataphase_bypass(tmpSC); nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_MESSAGE_IN"); if ((phase & BUSMON_REQ) == 0) { goto timer_out; } tmpSC->SCp.phase = PH_MSG_IN; nsp_message_in(tmpSC); /**/ if (*sync_neg == SYNC_NOT_YET) { //nsp_dbg(NSP_DEBUG_INTR, "sync target=%d,lun=%d",target,lun); if (data->MsgLen >= 5 && data->MsgBuffer[0] == MSG_EXTENDED && data->MsgBuffer[1] == 3 && data->MsgBuffer[2] == MSG_EXT_SDTR ) { data->Sync[target].SyncPeriod = data->MsgBuffer[3]; data->Sync[target].SyncOffset = data->MsgBuffer[4]; //nsp_dbg(NSP_DEBUG_INTR, "sync ok, %d %d", data->MsgBuffer[3], data->MsgBuffer[4]); *sync_neg = SYNC_OK; } else { data->Sync[target].SyncPeriod = 0; data->Sync[target].SyncOffset = 0; *sync_neg = SYNC_NG; } nsp_analyze_sdtr(tmpSC); } /**/ /* search last messeage byte */ tmp = -1; for (i = 0; i < data->MsgLen; i++) { tmp = data->MsgBuffer[i]; if (data->MsgBuffer[i] == MSG_EXTENDED) { i += (1 + data->MsgBuffer[i+1]); } } tmpSC->SCp.Message = tmp; nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d", tmpSC->SCp.Message, data->MsgLen); show_message(data); break; case BUSPHASE_SELECT: default: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE other"); break; } //nsp_dbg(NSP_DEBUG_INTR, "out"); return IRQ_HANDLED; timer_out: nsp_start_timer(tmpSC, 1000/102); return IRQ_HANDLED; } #ifdef NSP_DEBUG #include "nsp_debug.c" #endif /* NSP_DEBUG */ /*----------------------------------------------------------------*/ /* look for ninja3 card and init if found */ /*----------------------------------------------------------------*/ static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht) { struct Scsi_Host *host; /* registered host structure */ nsp_hw_data *data_b = &nsp_data_base, *data; nsp_dbg(NSP_DEBUG_INIT, "this_id=%d", sht->this_id); host = scsi_host_alloc(&nsp_driver_template, sizeof(nsp_hw_data)); if (host == NULL) { nsp_dbg(NSP_DEBUG_INIT, "host failed"); return NULL; } memcpy(host->hostdata, data_b, sizeof(nsp_hw_data)); data = (nsp_hw_data *)host->hostdata; data->ScsiInfo->host = host; #ifdef NSP_DEBUG data->CmdId = 0; #endif nsp_dbg(NSP_DEBUG_INIT, "irq=%d,%d", data_b->IrqNumber, ((nsp_hw_data *)host->hostdata)->IrqNumber); host->unique_id = data->BaseAddress; host->io_port = data->BaseAddress; host->n_io_port = data->NumAddress; host->irq = data->IrqNumber; host->base = data->MmioAddress; spin_lock_init(&(data->Lock)); snprintf(data->nspinfo, sizeof(data->nspinfo), "NinjaSCSI-3/32Bi Driver $Revision: 1.23 $ IO:0x%04lx-0x%04lx MMIO(virt addr):0x%04lx IRQ:%02d", host->io_port, host->io_port + host->n_io_port - 1, host->base, host->irq); sht->name = data->nspinfo; nsp_dbg(NSP_DEBUG_INIT, "end"); return host; /* detect done. */ } /*----------------------------------------------------------------*/ /* return info string */ /*----------------------------------------------------------------*/ static const char *nsp_info(struct Scsi_Host *shpnt) { nsp_hw_data *data = (nsp_hw_data *)shpnt->hostdata; return data->nspinfo; } #undef SPRINTF #define SPRINTF(args...) \ do { \ if(length > (pos - buffer)) { \ pos += snprintf(pos, length - (pos - buffer) + 1, ## args); \ nsp_dbg(NSP_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\ } \ } while(0) static int nsp_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) { int id; char *pos = buffer; int thislength; int speed; unsigned long flags; nsp_hw_data *data; int hostno; if (inout) { return -EINVAL; } hostno = host->host_no; data = (nsp_hw_data *)host->hostdata; SPRINTF("NinjaSCSI status\n\n"); SPRINTF("Driver version: $Revision: 1.23 $\n"); SPRINTF("SCSI host No.: %d\n", hostno); SPRINTF("IRQ: %d\n", host->irq); SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); SPRINTF("sg_tablesize: %d\n", host->sg_tablesize); SPRINTF("burst transfer mode: "); switch (nsp_burst_mode) { case BURST_IO8: SPRINTF("io8"); break; case BURST_IO32: SPRINTF("io32"); break; case BURST_MEM32: SPRINTF("mem32"); break; default: SPRINTF("???"); break; } SPRINTF("\n"); spin_lock_irqsave(&(data->Lock), flags); SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC); spin_unlock_irqrestore(&(data->Lock), flags); SPRINTF("SDTR status\n"); for(id = 0; id < ARRAY_SIZE(data->Sync); id++) { SPRINTF("id %d: ", id); if (id == host->this_id) { SPRINTF("----- NinjaSCSI-3 host adapter\n"); continue; } switch(data->Sync[id].SyncNegotiation) { case SYNC_OK: SPRINTF(" sync"); break; case SYNC_NG: SPRINTF("async"); break; case SYNC_NOT_YET: SPRINTF(" none"); break; default: SPRINTF("?????"); break; } if (data->Sync[id].SyncPeriod != 0) { speed = 1000000 / (data->Sync[id].SyncPeriod * 4); SPRINTF(" transfer %d.%dMB/s, offset %d", speed / 1000, speed % 1000, data->Sync[id].SyncOffset ); } SPRINTF("\n"); } thislength = pos - (buffer + offset); if(thislength < 0) { *start = NULL; return 0; } thislength = min(thislength, length); *start = buffer + offset; return thislength; } #undef SPRINTF /*---------------------------------------------------------------*/ /* error handler */ /*---------------------------------------------------------------*/ /* static int nsp_eh_abort(struct scsi_cmnd *SCpnt) { nsp_dbg(NSP_DEBUG_BUSRESET, "SCpnt=0x%p", SCpnt); return nsp_eh_bus_reset(SCpnt); }*/ static int nsp_bus_reset(nsp_hw_data *data) { unsigned int base = data->BaseAddress; int i; nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK); nsp_index_write(base, SCSIBUSCTRL, SCSI_RST); mdelay(100); /* 100ms */ nsp_index_write(base, SCSIBUSCTRL, 0); for(i = 0; i < 5; i++) { nsp_index_read(base, IRQPHASESENCE); /* dummy read */ } nsphw_init_sync(data); nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR); return SUCCESS; } static int nsp_eh_bus_reset(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_dbg(NSP_DEBUG_BUSRESET, "SCpnt=0x%p", SCpnt); return nsp_bus_reset(data); } static int nsp_eh_host_reset(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_dbg(NSP_DEBUG_BUSRESET, "in"); nsphw_init(data); return SUCCESS; } /********************************************************************** PCMCIA functions **********************************************************************/ static int nsp_cs_probe(struct pcmcia_device *link) { scsi_info_t *info; nsp_hw_data *data = &nsp_data_base; int ret; nsp_dbg(NSP_DEBUG_INIT, "in"); /* Create new SCSI device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) { return -ENOMEM; } info->p_dev = link; link->priv = info; data->ScsiInfo = info; nsp_dbg(NSP_DEBUG_INIT, "info=0x%p", info); ret = nsp_cs_config(link); nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link); return ret; } /* nsp_cs_attach */ static void nsp_cs_detach(struct pcmcia_device *link) { nsp_dbg(NSP_DEBUG_INIT, "in, link=0x%p", link); ((scsi_info_t *)link->priv)->stop = 1; nsp_cs_release(link); kfree(link->priv); link->priv = NULL; } /* nsp_cs_detach */ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { nsp_hw_data *data = priv_data; if (p_dev->config_index == 0) return -ENODEV; /* This reserves IO space but doesn't actually enable it */ if (pcmcia_request_io(p_dev) != 0) goto next_entry; if (resource_size(p_dev->resource[2])) { p_dev->resource[2]->flags |= (WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE); if (p_dev->resource[2]->end < 0x1000) p_dev->resource[2]->end = 0x1000; if (pcmcia_request_window(p_dev, p_dev->resource[2], 0) != 0) goto next_entry; if (pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr) != 0) goto next_entry; data->MmioAddress = (unsigned long) ioremap_nocache(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); data->MmioLength = resource_size(p_dev->resource[2]); } /* If we got this far, we're cool! */ return 0; next_entry: nsp_dbg(NSP_DEBUG_INIT, "next"); pcmcia_disable_device(p_dev); return -ENODEV; } static int nsp_cs_config(struct pcmcia_device *link) { int ret; scsi_info_t *info = link->priv; struct Scsi_Host *host; nsp_hw_data *data = &nsp_data_base; nsp_dbg(NSP_DEBUG_INIT, "in"); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IOMEM | CONF_AUTO_SET_IO; ret = pcmcia_loop_config(link, nsp_cs_config_check, data); if (ret) goto cs_failed; if (pcmcia_request_irq(link, nspintr)) goto cs_failed; ret = pcmcia_enable_device(link); if (ret) goto cs_failed; if (free_ports) { if (link->resource[0]) { release_region(link->resource[0]->start, resource_size(link->resource[0])); } if (link->resource[1]) { release_region(link->resource[1]->start, resource_size(link->resource[1])); } } /* Set port and IRQ */ data->BaseAddress = link->resource[0]->start; data->NumAddress = resource_size(link->resource[0]); data->IrqNumber = link->irq; nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d", data->BaseAddress, data->NumAddress, data->IrqNumber); if(nsphw_init(data) == FALSE) { goto cs_failed; } host = nsp_detect(&nsp_driver_template); if (host == NULL) { nsp_dbg(NSP_DEBUG_INIT, "detect failed"); goto cs_failed; } ret = scsi_add_host (host, NULL); if (ret) goto cs_failed; scsi_scan_host(host); info->host = host; return 0; cs_failed: nsp_dbg(NSP_DEBUG_INIT, "config fail"); nsp_cs_release(link); return -ENODEV; } /* nsp_cs_config */ static void nsp_cs_release(struct pcmcia_device *link) { scsi_info_t *info = link->priv; nsp_hw_data *data = NULL; if (info->host == NULL) { nsp_msg(KERN_DEBUG, "unexpected card release call."); } else { data = (nsp_hw_data *)info->host->hostdata; } nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link); /* Unlink the device chain */ if (info->host != NULL) { scsi_remove_host(info->host); } if (resource_size(link->resource[2])) { if (data != NULL) { iounmap((void *)(data->MmioAddress)); } } pcmcia_disable_device(link); if (info->host != NULL) { scsi_host_put(info->host); } } /* nsp_cs_release */ static int nsp_cs_suspend(struct pcmcia_device *link) { scsi_info_t *info = link->priv; nsp_hw_data *data; nsp_dbg(NSP_DEBUG_INIT, "event: suspend"); if (info->host != NULL) { nsp_msg(KERN_INFO, "clear SDTR status"); data = (nsp_hw_data *)info->host->hostdata; nsphw_init_sync(data); } info->stop = 1; return 0; } static int nsp_cs_resume(struct pcmcia_device *link) { scsi_info_t *info = link->priv; nsp_hw_data *data; nsp_dbg(NSP_DEBUG_INIT, "event: resume"); info->stop = 0; if (info->host != NULL) { nsp_msg(KERN_INFO, "reset host and bus"); data = (nsp_hw_data *)info->host->hostdata; nsphw_init (data); nsp_bus_reset(data); } return 0; } /*======================================================================* * module entry point *====================================================================*/ static const struct pcmcia_device_id nsp_cs_ids[] = { PCMCIA_DEVICE_PROD_ID123("IO DATA", "CBSC16 ", "1", 0x547e66dc, 0x0d63a3fd, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-001", "1", 0x534c02bc, 0x52008408, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-002", "1", 0x534c02bc, 0xcb09d5b2, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-003", "1", 0x534c02bc, 0xbc0ee524, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-004", "1", 0x534c02bc, 0x226a7087, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("WBT", "NinjaSCSI-3", "R1.0", 0xc7ba805f, 0xfdc7c97d, 0x6973710e), PCMCIA_DEVICE_PROD_ID123("WORKBIT", "UltraNinja-16", "1", 0x28191418, 0xb70f4b09, 0x51de003a), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, nsp_cs_ids); static struct pcmcia_driver nsp_driver = { .owner = THIS_MODULE, .name = "nsp_cs", .probe = nsp_cs_probe, .remove = nsp_cs_detach, .id_table = nsp_cs_ids, .suspend = nsp_cs_suspend, .resume = nsp_cs_resume, }; static int __init nsp_cs_init(void) { return pcmcia_register_driver(&nsp_driver); } static void __exit nsp_cs_exit(void) { pcmcia_unregister_driver(&nsp_driver); } module_init(nsp_cs_init) module_exit(nsp_cs_exit) /* end */
gpl-2.0
LibiSC/tab10test
drivers/net/ethernet/cisco/enic/enic_dev.c
5240
5924
/* * Copyright 2011 Cisco Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/pci.h> #include <linux/etherdevice.h> #include "vnic_dev.h" #include "vnic_vic.h" #include "enic_res.h" #include "enic.h" #include "enic_dev.h" int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_fw_info(enic->vdev, fw_info); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_stats_dump(enic->vdev, vstats); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_add_station_addr(struct enic *enic) { int err; if (!is_valid_ether_addr(enic->netdev->dev_addr)) return -EADDRNOTAVAIL; spin_lock(&enic->devcmd_lock); err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_del_station_addr(struct enic *enic) { int err; if (!is_valid_ether_addr(enic->netdev->dev_addr)) return -EADDRNOTAVAIL; spin_lock(&enic->devcmd_lock); err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, int broadcast, int promisc, int allmulti) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_packet_filter(enic->vdev, directed, multicast, broadcast, promisc, allmulti); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_add_addr(struct enic *enic, u8 *addr) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_add_addr(enic->vdev, addr); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_del_addr(struct enic *enic, u8 *addr) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_del_addr(enic->vdev, addr); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_notify_unset(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_notify_unset(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_hang_notify(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_hang_notify(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_enable(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_enable_wait(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_disable(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_disable(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_intr_coal_timer_info(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_intr_coal_timer_info(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_vnic_dev_deinit(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_deinit(enic->vdev); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_init_prov2(enic->vdev, (u8 *)vp, vic_provinfo_size(vp)); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_deinit_done(struct enic *enic, int *status) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_deinit_done(enic->vdev, status); spin_unlock(&enic->devcmd_lock); return err; } /* rtnl lock is held */ int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct enic *enic = netdev_priv(netdev); int err; spin_lock(&enic->devcmd_lock); err = enic_add_vlan(enic, vid); spin_unlock(&enic->devcmd_lock); return err; } /* rtnl lock is held */ int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct enic *enic = netdev_priv(netdev); int err; spin_lock(&enic->devcmd_lock); err = enic_del_vlan(enic, vid); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_enable2(struct enic *enic, int active) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_enable2(enic->vdev, active); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_enable2_done(struct enic *enic, int *status) { int err; spin_lock(&enic->devcmd_lock); err = vnic_dev_enable2_done(enic->vdev, status); spin_unlock(&enic->devcmd_lock); return err; } int enic_dev_status_to_errno(int devcmd_status) { switch (devcmd_status) { case ERR_SUCCESS: return 0; case ERR_EINVAL: return -EINVAL; case ERR_EFAULT: return -EFAULT; case ERR_EPERM: return -EPERM; case ERR_EBUSY: return -EBUSY; case ERR_ECMDUNKNOWN: case ERR_ENOTSUPPORTED: return -EOPNOTSUPP; case ERR_EBADSTATE: return -EINVAL; case ERR_ENOMEM: return -ENOMEM; case ERR_ETIMEDOUT: return -ETIMEDOUT; case ERR_ELINKDOWN: return -ENETDOWN; case ERR_EINPROGRESS: return -EINPROGRESS; case ERR_EMAXRES: default: return (devcmd_status < 0) ? devcmd_status : -1; } }
gpl-2.0
LegacyHuawei/android_kernel_huawei_msm7x30
drivers/cpufreq/freq_table.c
6520
6165
/* * linux/drivers/cpufreq/freq_table.c * * Copyright (C) 2002 - 2003 Dominik Brodowski * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { unsigned int min_freq = ~0; unsigned int max_freq = 0; unsigned int i; for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) { pr_debug("table entry %u is invalid, skipping\n", i); continue; } pr_debug("table entry %u: %u kHz, %u index\n", i, freq, table[i].index); if (freq < min_freq) min_freq = freq; if (freq > max_freq) max_freq = freq; } policy->min = policy->cpuinfo.min_freq = min_freq; policy->max = policy->cpuinfo.max_freq = max_freq; if (policy->min == ~0) return -EINVAL; else return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { unsigned int next_larger = ~0; unsigned int i; unsigned int count = 0; pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); if (!cpu_online(policy->cpu)) return -EINVAL; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) continue; if ((freq >= policy->min) && (freq <= policy->max)) count++; else if ((next_larger > freq) && (freq > policy->max)) next_larger = freq; } if (!count) policy->max = next_larger; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); int cpufreq_frequency_table_target(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int target_freq, unsigned int relation, unsigned int *index) { struct cpufreq_frequency_table optimal = { .index = ~0, .frequency = 0, }; struct cpufreq_frequency_table suboptimal = { .index = ~0, .frequency = 0, }; unsigned int i; pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); switch (relation) { case CPUFREQ_RELATION_H: suboptimal.frequency = ~0; break; case CPUFREQ_RELATION_L: optimal.frequency = ~0; break; } if (!cpu_online(policy->cpu)) return -EINVAL; for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) continue; if ((freq < policy->min) || (freq > policy->max)) continue; switch (relation) { case CPUFREQ_RELATION_H: if (freq <= target_freq) { if (freq >= optimal.frequency) { optimal.frequency = freq; optimal.index = i; } } else { if (freq <= suboptimal.frequency) { suboptimal.frequency = freq; suboptimal.index = i; } } break; case CPUFREQ_RELATION_L: if (freq >= target_freq) { if (freq <= optimal.frequency) { optimal.frequency = freq; optimal.index = i; } } else { if (freq >= suboptimal.frequency) { suboptimal.frequency = freq; suboptimal.index = i; } } break; } } if (optimal.index > i) { if (suboptimal.index > i) return -EINVAL; *index = suboptimal.index; } else *index = optimal.index; pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, table[*index].index); return 0; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table); /** * show_available_freqs - show available frequencies for the specified CPU */ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf) { unsigned int i = 0; unsigned int cpu = policy->cpu; ssize_t count = 0; struct cpufreq_frequency_table *table; if (!per_cpu(cpufreq_show_table, cpu)) return -ENODEV; table = per_cpu(cpufreq_show_table, cpu); for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { if (table[i].frequency == CPUFREQ_ENTRY_INVALID) continue; count += sprintf(&buf[count], "%d ", table[i].frequency); } count += sprintf(&buf[count], "\n"); return count; } struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { .attr = { .name = "scaling_available_frequencies", .mode = 0444, }, .show = show_available_freqs, }; EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); /* * if you use these, you must assure that the frequency table is valid * all the time between get_attr and put_attr! */ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu) { pr_debug("setting show_table for cpu %u to %p\n", cpu, table); per_cpu(cpufreq_show_table, cpu) = table; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); void cpufreq_frequency_table_put_attr(unsigned int cpu) { pr_debug("clearing show_table for cpu %u\n", cpu); per_cpu(cpufreq_show_table, cpu) = NULL; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) { return per_cpu(cpufreq_show_table, cpu); } EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("CPUfreq frequency table helpers"); MODULE_LICENSE("GPL");
gpl-2.0
charles1018/kernel_blu_spark
fs/ntfs/aops.c
7032
49127
/** * aops.c - NTFS kernel address space operations and page cache handling. * Part of the Linux-NTFS project. * * Copyright (c) 2001-2007 Anton Altaparmakov * Copyright (c) 2002 Richard Russon * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/errno.h> #include <linux/fs.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/bit_spinlock.h> #include "aops.h" #include "attrib.h" #include "debug.h" #include "inode.h" #include "mft.h" #include "runlist.h" #include "types.h" #include "ntfs.h" /** * ntfs_end_buffer_async_read - async io completion for reading attributes * @bh: buffer head on which io is completed * @uptodate: whether @bh is now uptodate or not * * Asynchronous I/O completion handler for reading pages belonging to the * attribute address space of an inode. The inodes can either be files or * directories or they can be fake inodes describing some attribute. * * If NInoMstProtected(), perform the post read mst fixups when all IO on the * page has been completed and mark the page uptodate or set the error bit on * the page. To determine the size of the records that need fixing up, we * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs * record size, and index_block_size_bits, to the log(base 2) of the ntfs * record size. */ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) { unsigned long flags; struct buffer_head *first, *tmp; struct page *page; struct inode *vi; ntfs_inode *ni; int page_uptodate = 1; page = bh->b_page; vi = page->mapping->host; ni = NTFS_I(vi); if (likely(uptodate)) { loff_t i_size; s64 file_ofs, init_size; set_buffer_uptodate(bh); file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); read_lock_irqsave(&ni->size_lock, flags); init_size = ni->initialized_size; i_size = i_size_read(vi); read_unlock_irqrestore(&ni->size_lock, flags); if (unlikely(init_size > i_size)) { /* Race with shrinking truncate. */ init_size = i_size; } /* Check for the current buffer head overflowing. */ if (unlikely(file_ofs + bh->b_size > init_size)) { int ofs; void *kaddr; ofs = 0; if (file_ofs < init_size) ofs = init_size - file_ofs; local_irq_save(flags); kaddr = kmap_atomic(page); memset(kaddr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); flush_dcache_page(page); kunmap_atomic(kaddr); local_irq_restore(flags); } } else { clear_buffer_uptodate(bh); SetPageError(page); ntfs_error(ni->vol->sb, "Buffer I/O error, logical block " "0x%llx.", (unsigned long long)bh->b_blocknr); } first = page_buffers(page); local_irq_save(flags); bit_spin_lock(BH_Uptodate_Lock, &first->b_state); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; do { if (!buffer_uptodate(tmp)) page_uptodate = 0; if (buffer_async_read(tmp)) { if (likely(buffer_locked(tmp))) goto still_busy; /* Async buffers must be locked. */ BUG(); } tmp = tmp->b_this_page; } while (tmp != bh); bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); local_irq_restore(flags); /* * If none of the buffers had errors then we can set the page uptodate, * but we first have to perform the post read mst fixups, if the * attribute is mst protected, i.e. if NInoMstProteced(ni) is true. * Note we ignore fixup errors as those are detected when * map_mft_record() is called which gives us per record granularity * rather than per page granularity. */ if (!NInoMstProtected(ni)) { if (likely(page_uptodate && !PageError(page))) SetPageUptodate(page); } else { u8 *kaddr; unsigned int i, recs; u32 rec_size; rec_size = ni->itype.index.block_size; recs = PAGE_CACHE_SIZE / rec_size; /* Should have been verified before we got here... */ BUG_ON(!recs); local_irq_save(flags); kaddr = kmap_atomic(page); for (i = 0; i < recs; i++) post_read_mst_fixup((NTFS_RECORD*)(kaddr + i * rec_size), rec_size); kunmap_atomic(kaddr); local_irq_restore(flags); flush_dcache_page(page); if (likely(page_uptodate && !PageError(page))) SetPageUptodate(page); } unlock_page(page); return; still_busy: bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); local_irq_restore(flags); return; } /** * ntfs_read_block - fill a @page of an address space with data * @page: page cache page to fill with data * * Fill the page @page of the address space belonging to the @page->host inode. * We read each buffer asynchronously and when all buffers are read in, our io * completion handler ntfs_end_buffer_read_async(), if required, automatically * applies the mst fixups to the page before finally marking it uptodate and * unlocking it. * * We only enforce allocated_size limit because i_size is checked for in * generic_file_read(). * * Return 0 on success and -errno on error. * * Contains an adapted version of fs/buffer.c::block_read_full_page(). */ static int ntfs_read_block(struct page *page) { loff_t i_size; VCN vcn; LCN lcn; s64 init_size; struct inode *vi; ntfs_inode *ni; ntfs_volume *vol; runlist_element *rl; struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; sector_t iblock, lblock, zblock; unsigned long flags; unsigned int blocksize, vcn_ofs; int i, nr; unsigned char blocksize_bits; vi = page->mapping->host; ni = NTFS_I(vi); vol = ni->vol; /* $MFT/$DATA must have its complete runlist in memory at all times. */ BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)); blocksize = vol->sb->s_blocksize; blocksize_bits = vol->sb->s_blocksize_bits; if (!page_has_buffers(page)) { create_empty_buffers(page, blocksize, 0); if (unlikely(!page_has_buffers(page))) { unlock_page(page); return -ENOMEM; } } bh = head = page_buffers(page); BUG_ON(!bh); /* * We may be racing with truncate. To avoid some of the problems we * now take a snapshot of the various sizes and use those for the whole * of the function. In case of an extending truncate it just means we * may leave some buffers unmapped which are now allocated. This is * not a problem since these buffers will just get mapped when a write * occurs. In case of a shrinking truncate, we will detect this later * on due to the runlist being incomplete and if the page is being * fully truncated, truncate will throw it away as soon as we unlock * it so no need to worry what we do with it. */ iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); read_lock_irqsave(&ni->size_lock, flags); lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; init_size = ni->initialized_size; i_size = i_size_read(vi); read_unlock_irqrestore(&ni->size_lock, flags); if (unlikely(init_size > i_size)) { /* Race with shrinking truncate. */ init_size = i_size; } zblock = (init_size + blocksize - 1) >> blocksize_bits; /* Loop through all the buffers in the page. */ rl = NULL; nr = i = 0; do { int err = 0; if (unlikely(buffer_uptodate(bh))) continue; if (unlikely(buffer_mapped(bh))) { arr[nr++] = bh; continue; } bh->b_bdev = vol->sb->s_bdev; /* Is the block within the allowed limits? */ if (iblock < lblock) { bool is_retry = false; /* Convert iblock into corresponding vcn and offset. */ vcn = (VCN)iblock << blocksize_bits >> vol->cluster_size_bits; vcn_ofs = ((VCN)iblock << blocksize_bits) & vol->cluster_size_mask; if (!rl) { lock_retry_remap: down_read(&ni->runlist.lock); rl = ni->runlist.rl; } if (likely(rl != NULL)) { /* Seek to element containing target vcn. */ while (rl->length && rl[1].vcn <= vcn) rl++; lcn = ntfs_rl_vcn_to_lcn(rl, vcn); } else lcn = LCN_RL_NOT_MAPPED; /* Successful remap. */ if (lcn >= 0) { /* Setup buffer head to correct block. */ bh->b_blocknr = ((lcn << vol->cluster_size_bits) + vcn_ofs) >> blocksize_bits; set_buffer_mapped(bh); /* Only read initialized data blocks. */ if (iblock < zblock) { arr[nr++] = bh; continue; } /* Fully non-initialized data block, zero it. */ goto handle_zblock; } /* It is a hole, need to zero it. */ if (lcn == LCN_HOLE) goto handle_hole; /* If first try and runlist unmapped, map and retry. */ if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { is_retry = true; /* * Attempt to map runlist, dropping lock for * the duration. */ up_read(&ni->runlist.lock); err = ntfs_map_runlist(ni, vcn); if (likely(!err)) goto lock_retry_remap; rl = NULL; } else if (!rl) up_read(&ni->runlist.lock); /* * If buffer is outside the runlist, treat it as a * hole. This can happen due to concurrent truncate * for example. */ if (err == -ENOENT || lcn == LCN_ENOENT) { err = 0; goto handle_hole; } /* Hard error, zero out region. */ if (!err) err = -EIO; bh->b_blocknr = -1; SetPageError(page); ntfs_error(vol->sb, "Failed to read from inode 0x%lx, " "attribute type 0x%x, vcn 0x%llx, " "offset 0x%x because its location on " "disk could not be determined%s " "(error code %i).", ni->mft_no, ni->type, (unsigned long long)vcn, vcn_ofs, is_retry ? " even after " "retrying" : "", err); } /* * Either iblock was outside lblock limits or * ntfs_rl_vcn_to_lcn() returned error. Just zero that portion * of the page and set the buffer uptodate. */ handle_hole: bh->b_blocknr = -1UL; clear_buffer_mapped(bh); handle_zblock: zero_user(page, i * blocksize, blocksize); if (likely(!err)) set_buffer_uptodate(bh); } while (i++, iblock++, (bh = bh->b_this_page) != head); /* Release the lock if we took it. */ if (rl) up_read(&ni->runlist.lock); /* Check we have at least one buffer ready for i/o. */ if (nr) { struct buffer_head *tbh; /* Lock the buffers. */ for (i = 0; i < nr; i++) { tbh = arr[i]; lock_buffer(tbh); tbh->b_end_io = ntfs_end_buffer_async_read; set_buffer_async_read(tbh); } /* Finally, start i/o on the buffers. */ for (i = 0; i < nr; i++) { tbh = arr[i]; if (likely(!buffer_uptodate(tbh))) submit_bh(READ, tbh); else ntfs_end_buffer_async_read(tbh, 1); } return 0; } /* No i/o was scheduled on any of the buffers. */ if (likely(!PageError(page))) SetPageUptodate(page); else /* Signal synchronous i/o error. */ nr = -EIO; unlock_page(page); return nr; } /** * ntfs_readpage - fill a @page of a @file with data from the device * @file: open file to which the page @page belongs or NULL * @page: page cache page to fill with data * * For non-resident attributes, ntfs_readpage() fills the @page of the open * file @file by calling the ntfs version of the generic block_read_full_page() * function, ntfs_read_block(), which in turn creates and reads in the buffers * associated with the page asynchronously. * * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the * data from the mft record (which at this stage is most likely in memory) and * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as * even if the mft record is not cached at this point in time, we need to wait * for it to be read in before we can do the copy. * * Return 0 on success and -errno on error. */ static int ntfs_readpage(struct file *file, struct page *page) { loff_t i_size; struct inode *vi; ntfs_inode *ni, *base_ni; u8 *addr; ntfs_attr_search_ctx *ctx; MFT_RECORD *mrec; unsigned long flags; u32 attr_len; int err = 0; retry_readpage: BUG_ON(!PageLocked(page)); vi = page->mapping->host; i_size = i_size_read(vi); /* Is the page fully outside i_size? (truncate in progress) */ if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)) { zero_user(page, 0, PAGE_CACHE_SIZE); ntfs_debug("Read outside i_size - truncated?"); goto done; } /* * This can potentially happen because we clear PageUptodate() during * ntfs_writepage() of MstProtected() attributes. */ if (PageUptodate(page)) { unlock_page(page); return 0; } ni = NTFS_I(vi); /* * Only $DATA attributes can be encrypted and only unnamed $DATA * attributes can be compressed. Index root can have the flags set but * this means to create compressed/encrypted files, not that the * attribute is compressed/encrypted. Note we need to check for * AT_INDEX_ALLOCATION since this is the type of both directory and * index inodes. */ if (ni->type != AT_INDEX_ALLOCATION) { /* If attribute is encrypted, deny access, just like NT4. */ if (NInoEncrypted(ni)) { BUG_ON(ni->type != AT_DATA); err = -EACCES; goto err_out; } /* Compressed data streams are handled in compress.c. */ if (NInoNonResident(ni) && NInoCompressed(ni)) { BUG_ON(ni->type != AT_DATA); BUG_ON(ni->name_len); return ntfs_read_compressed_block(page); } } /* NInoNonResident() == NInoIndexAllocPresent() */ if (NInoNonResident(ni)) { /* Normal, non-resident data stream. */ return ntfs_read_block(page); } /* * Attribute is resident, implying it is not compressed or encrypted. * This also means the attribute is smaller than an mft record and * hence smaller than a page, so can simply zero out any pages with * index above 0. Note the attribute can actually be marked compressed * but if it is resident the actual data is not compressed so we are * ok to ignore the compressed flag here. */ if (unlikely(page->index > 0)) { zero_user(page, 0, PAGE_CACHE_SIZE); goto done; } if (!NInoAttr(ni)) base_ni = ni; else base_ni = ni->ext.base_ntfs_ino; /* Map, pin, and lock the mft record. */ mrec = map_mft_record(base_ni); if (IS_ERR(mrec)) { err = PTR_ERR(mrec); goto err_out; } /* * If a parallel write made the attribute non-resident, drop the mft * record and retry the readpage. */ if (unlikely(NInoNonResident(ni))) { unmap_mft_record(base_ni); goto retry_readpage; } ctx = ntfs_attr_get_search_ctx(base_ni, mrec); if (unlikely(!ctx)) { err = -ENOMEM; goto unm_err_out; } err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) goto put_unm_err_out; attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); read_lock_irqsave(&ni->size_lock, flags); if (unlikely(attr_len > ni->initialized_size)) attr_len = ni->initialized_size; i_size = i_size_read(vi); read_unlock_irqrestore(&ni->size_lock, flags); if (unlikely(attr_len > i_size)) { /* Race with shrinking truncate. */ attr_len = i_size; } addr = kmap_atomic(page); /* Copy the data to the page. */ memcpy(addr, (u8*)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset), attr_len); /* Zero the remainder of the page. */ memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); flush_dcache_page(page); kunmap_atomic(addr); put_unm_err_out: ntfs_attr_put_search_ctx(ctx); unm_err_out: unmap_mft_record(base_ni); done: SetPageUptodate(page); err_out: unlock_page(page); return err; } #ifdef NTFS_RW /** * ntfs_write_block - write a @page to the backing store * @page: page cache page to write out * @wbc: writeback control structure * * This function is for writing pages belonging to non-resident, non-mst * protected attributes to their backing store. * * For a page with buffers, map and write the dirty buffers asynchronously * under page writeback. For a page without buffers, create buffers for the * page, then proceed as above. * * If a page doesn't have buffers the page dirty state is definitive. If a page * does have buffers, the page dirty state is just a hint, and the buffer dirty * state is definitive. (A hint which has rules: dirty buffers against a clean * page is illegal. Other combinations are legal and need to be handled. In * particular a dirty page containing clean buffers for example.) * * Return 0 on success and -errno on error. * * Based on ntfs_read_block() and __block_write_full_page(). */ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) { VCN vcn; LCN lcn; s64 initialized_size; loff_t i_size; sector_t block, dblock, iblock; struct inode *vi; ntfs_inode *ni; ntfs_volume *vol; runlist_element *rl; struct buffer_head *bh, *head; unsigned long flags; unsigned int blocksize, vcn_ofs; int err; bool need_end_writeback; unsigned char blocksize_bits; vi = page->mapping->host; ni = NTFS_I(vi); vol = ni->vol; ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " "0x%lx.", ni->mft_no, ni->type, page->index); BUG_ON(!NInoNonResident(ni)); BUG_ON(NInoMstProtected(ni)); blocksize = vol->sb->s_blocksize; blocksize_bits = vol->sb->s_blocksize_bits; if (!page_has_buffers(page)) { BUG_ON(!PageUptodate(page)); create_empty_buffers(page, blocksize, (1 << BH_Uptodate) | (1 << BH_Dirty)); if (unlikely(!page_has_buffers(page))) { ntfs_warning(vol->sb, "Error allocating page " "buffers. Redirtying page so we try " "again later."); /* * Put the page back on mapping->dirty_pages, but leave * its buffers' dirty state as-is. */ redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; } } bh = head = page_buffers(page); BUG_ON(!bh); /* NOTE: Different naming scheme to ntfs_read_block()! */ /* The first block in the page. */ block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); read_lock_irqsave(&ni->size_lock, flags); i_size = i_size_read(vi); initialized_size = ni->initialized_size; read_unlock_irqrestore(&ni->size_lock, flags); /* The first out of bounds block for the data size. */ dblock = (i_size + blocksize - 1) >> blocksize_bits; /* The last (fully or partially) initialized block. */ iblock = initialized_size >> blocksize_bits; /* * Be very careful. We have no exclusion from __set_page_dirty_buffers * here, and the (potentially unmapped) buffers may become dirty at * any time. If a buffer becomes dirty here after we've inspected it * then we just miss that fact, and the page stays dirty. * * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; * handle that here by just cleaning them. */ /* * Loop through all the buffers in the page, mapping all the dirty * buffers to disk addresses and handling any aliases from the * underlying block device's mapping. */ rl = NULL; err = 0; do { bool is_retry = false; if (unlikely(block >= dblock)) { /* * Mapped buffers outside i_size will occur, because * this page can be outside i_size when there is a * truncate in progress. The contents of such buffers * were zeroed by ntfs_writepage(). * * FIXME: What about the small race window where * ntfs_writepage() has not done any clearing because * the page was within i_size but before we get here, * vmtruncate() modifies i_size? */ clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } /* Clean buffers are not written out, so no need to map them. */ if (!buffer_dirty(bh)) continue; /* Make sure we have enough initialized size. */ if (unlikely((block >= iblock) && (initialized_size < i_size))) { /* * If this page is fully outside initialized size, zero * out all pages between the current initialized size * and the current page. Just use ntfs_readpage() to do * the zeroing transparently. */ if (block > iblock) { // TODO: // For each page do: // - read_cache_page() // Again for each page do: // - wait_on_page_locked() // - Check (PageUptodate(page) && // !PageError(page)) // Update initialized size in the attribute and // in the inode. // Again, for each page do: // __set_page_dirty_buffers(); // page_cache_release() // We don't need to wait on the writes. // Update iblock. } /* * The current page straddles initialized size. Zero * all non-uptodate buffers and set them uptodate (and * dirty?). Note, there aren't any non-uptodate buffers * if the page is uptodate. * FIXME: For an uptodate page, the buffers may need to * be written out because they were not initialized on * disk before. */ if (!PageUptodate(page)) { // TODO: // Zero any non-uptodate buffers up to i_size. // Set them uptodate and dirty. } // TODO: // Update initialized size in the attribute and in the // inode (up to i_size). // Update iblock. // FIXME: This is inefficient. Try to batch the two // size changes to happen in one go. ntfs_error(vol->sb, "Writing beyond initialized size " "is not supported yet. Sorry."); err = -EOPNOTSUPP; break; // Do NOT set_buffer_new() BUT DO clear buffer range // outside write request range. // set_buffer_uptodate() on complete buffers as well as // set_buffer_dirty(). } /* No need to map buffers that are already mapped. */ if (buffer_mapped(bh)) continue; /* Unmapped, dirty buffer. Need to map it. */ bh->b_bdev = vol->sb->s_bdev; /* Convert block into corresponding vcn and offset. */ vcn = (VCN)block << blocksize_bits; vcn_ofs = vcn & vol->cluster_size_mask; vcn >>= vol->cluster_size_bits; if (!rl) { lock_retry_remap: down_read(&ni->runlist.lock); rl = ni->runlist.rl; } if (likely(rl != NULL)) { /* Seek to element containing target vcn. */ while (rl->length && rl[1].vcn <= vcn) rl++; lcn = ntfs_rl_vcn_to_lcn(rl, vcn); } else lcn = LCN_RL_NOT_MAPPED; /* Successful remap. */ if (lcn >= 0) { /* Setup buffer head to point to correct block. */ bh->b_blocknr = ((lcn << vol->cluster_size_bits) + vcn_ofs) >> blocksize_bits; set_buffer_mapped(bh); continue; } /* It is a hole, need to instantiate it. */ if (lcn == LCN_HOLE) { u8 *kaddr; unsigned long *bpos, *bend; /* Check if the buffer is zero. */ kaddr = kmap_atomic(page); bpos = (unsigned long *)(kaddr + bh_offset(bh)); bend = (unsigned long *)((u8*)bpos + blocksize); do { if (unlikely(*bpos)) break; } while (likely(++bpos < bend)); kunmap_atomic(kaddr); if (bpos == bend) { /* * Buffer is zero and sparse, no need to write * it. */ bh->b_blocknr = -1; clear_buffer_dirty(bh); continue; } // TODO: Instantiate the hole. // clear_buffer_new(bh); // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); ntfs_error(vol->sb, "Writing into sparse regions is " "not supported yet. Sorry."); err = -EOPNOTSUPP; break; } /* If first try and runlist unmapped, map and retry. */ if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { is_retry = true; /* * Attempt to map runlist, dropping lock for * the duration. */ up_read(&ni->runlist.lock); err = ntfs_map_runlist(ni, vcn); if (likely(!err)) goto lock_retry_remap; rl = NULL; } else if (!rl) up_read(&ni->runlist.lock); /* * If buffer is outside the runlist, truncate has cut it out * of the runlist. Just clean and clear the buffer and set it * uptodate so it can get discarded by the VM. */ if (err == -ENOENT || lcn == LCN_ENOENT) { bh->b_blocknr = -1; clear_buffer_dirty(bh); zero_user(page, bh_offset(bh), blocksize); set_buffer_uptodate(bh); err = 0; continue; } /* Failed to map the buffer, even after retrying. */ if (!err) err = -EIO; bh->b_blocknr = -1; ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " "attribute type 0x%x, vcn 0x%llx, offset 0x%x " "because its location on disk could not be " "determined%s (error code %i).", ni->mft_no, ni->type, (unsigned long long)vcn, vcn_ofs, is_retry ? " even after " "retrying" : "", err); break; } while (block++, (bh = bh->b_this_page) != head); /* Release the lock if we took it. */ if (rl) up_read(&ni->runlist.lock); /* For the error case, need to reset bh to the beginning. */ bh = head; /* Just an optimization, so ->readpage() is not called later. */ if (unlikely(!PageUptodate(page))) { int uptodate = 1; do { if (!buffer_uptodate(bh)) { uptodate = 0; bh = head; break; } } while ((bh = bh->b_this_page) != head); if (uptodate) SetPageUptodate(page); } /* Setup all mapped, dirty buffers for async write i/o. */ do { if (buffer_mapped(bh) && buffer_dirty(bh)) { lock_buffer(bh); if (test_clear_buffer_dirty(bh)) { BUG_ON(!buffer_uptodate(bh)); mark_buffer_async_write(bh); } else unlock_buffer(bh); } else if (unlikely(err)) { /* * For the error case. The buffer may have been set * dirty during attachment to a dirty page. */ if (err != -ENOMEM) clear_buffer_dirty(bh); } } while ((bh = bh->b_this_page) != head); if (unlikely(err)) { // TODO: Remove the -EOPNOTSUPP check later on... if (unlikely(err == -EOPNOTSUPP)) err = 0; else if (err == -ENOMEM) { ntfs_warning(vol->sb, "Error allocating memory. " "Redirtying page so we try again " "later."); /* * Put the page back on mapping->dirty_pages, but * leave its buffer's dirty state as-is. */ redirty_page_for_writepage(wbc, page); err = 0; } else SetPageError(page); } BUG_ON(PageWriteback(page)); set_page_writeback(page); /* Keeps try_to_free_buffers() away. */ /* Submit the prepared buffers for i/o. */ need_end_writeback = true; do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { submit_bh(WRITE, bh); need_end_writeback = false; } bh = next; } while (bh != head); unlock_page(page); /* If no i/o was started, need to end_page_writeback(). */ if (unlikely(need_end_writeback)) end_page_writeback(page); ntfs_debug("Done."); return err; } /** * ntfs_write_mst_block - write a @page to the backing store * @page: page cache page to write out * @wbc: writeback control structure * * This function is for writing pages belonging to non-resident, mst protected * attributes to their backing store. The only supported attributes are index * allocation and $MFT/$DATA. Both directory inodes and index inodes are * supported for the index allocation case. * * The page must remain locked for the duration of the write because we apply * the mst fixups, write, and then undo the fixups, so if we were to unlock the * page before undoing the fixups, any other user of the page will see the * page contents as corrupt. * * We clear the page uptodate flag for the duration of the function to ensure * exclusion for the $MFT/$DATA case against someone mapping an mft record we * are about to apply the mst fixups to. * * Return 0 on success and -errno on error. * * Based on ntfs_write_block(), ntfs_mft_writepage(), and * write_mft_record_nolock(). */ static int ntfs_write_mst_block(struct page *page, struct writeback_control *wbc) { sector_t block, dblock, rec_block; struct inode *vi = page->mapping->host; ntfs_inode *ni = NTFS_I(vi); ntfs_volume *vol = ni->vol; u8 *kaddr; unsigned int rec_size = ni->itype.index.block_size; ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; struct buffer_head *bh, *head, *tbh, *rec_start_bh; struct buffer_head *bhs[MAX_BUF_PER_PAGE]; runlist_element *rl; int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2; unsigned bh_size, rec_size_bits; bool sync, is_mft, page_is_dirty, rec_is_dirty; unsigned char bh_size_bits; ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " "0x%lx.", vi->i_ino, ni->type, page->index); BUG_ON(!NInoNonResident(ni)); BUG_ON(!NInoMstProtected(ni)); is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino); /* * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page * in its page cache were to be marked dirty. However this should * never happen with the current driver and considering we do not * handle this case here we do want to BUG(), at least for now. */ BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) || (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); bh_size = vol->sb->s_blocksize; bh_size_bits = vol->sb->s_blocksize_bits; max_bhs = PAGE_CACHE_SIZE / bh_size; BUG_ON(!max_bhs); BUG_ON(max_bhs > MAX_BUF_PER_PAGE); /* Were we called for sync purposes? */ sync = (wbc->sync_mode == WB_SYNC_ALL); /* Make sure we have mapped buffers. */ bh = head = page_buffers(page); BUG_ON(!bh); rec_size_bits = ni->itype.index.block_size_bits; BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits)); bhs_per_rec = rec_size >> bh_size_bits; BUG_ON(!bhs_per_rec); /* The first block in the page. */ rec_block = block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bh_size_bits); /* The first out of bounds block for the data size. */ dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; rl = NULL; err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0; page_is_dirty = rec_is_dirty = false; rec_start_bh = NULL; do { bool is_retry = false; if (likely(block < rec_block)) { if (unlikely(block >= dblock)) { clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } /* * This block is not the first one in the record. We * ignore the buffer's dirty state because we could * have raced with a parallel mark_ntfs_record_dirty(). */ if (!rec_is_dirty) continue; if (unlikely(err2)) { if (err2 != -ENOMEM) clear_buffer_dirty(bh); continue; } } else /* if (block == rec_block) */ { BUG_ON(block > rec_block); /* This block is the first one in the record. */ rec_block += bhs_per_rec; err2 = 0; if (unlikely(block >= dblock)) { clear_buffer_dirty(bh); continue; } if (!buffer_dirty(bh)) { /* Clean records are not written out. */ rec_is_dirty = false; continue; } rec_is_dirty = true; rec_start_bh = bh; } /* Need to map the buffer if it is not mapped already. */ if (unlikely(!buffer_mapped(bh))) { VCN vcn; LCN lcn; unsigned int vcn_ofs; bh->b_bdev = vol->sb->s_bdev; /* Obtain the vcn and offset of the current block. */ vcn = (VCN)block << bh_size_bits; vcn_ofs = vcn & vol->cluster_size_mask; vcn >>= vol->cluster_size_bits; if (!rl) { lock_retry_remap: down_read(&ni->runlist.lock); rl = ni->runlist.rl; } if (likely(rl != NULL)) { /* Seek to element containing target vcn. */ while (rl->length && rl[1].vcn <= vcn) rl++; lcn = ntfs_rl_vcn_to_lcn(rl, vcn); } else lcn = LCN_RL_NOT_MAPPED; /* Successful remap. */ if (likely(lcn >= 0)) { /* Setup buffer head to correct block. */ bh->b_blocknr = ((lcn << vol->cluster_size_bits) + vcn_ofs) >> bh_size_bits; set_buffer_mapped(bh); } else { /* * Remap failed. Retry to map the runlist once * unless we are working on $MFT which always * has the whole of its runlist in memory. */ if (!is_mft && !is_retry && lcn == LCN_RL_NOT_MAPPED) { is_retry = true; /* * Attempt to map runlist, dropping * lock for the duration. */ up_read(&ni->runlist.lock); err2 = ntfs_map_runlist(ni, vcn); if (likely(!err2)) goto lock_retry_remap; if (err2 == -ENOMEM) page_is_dirty = true; lcn = err2; } else { err2 = -EIO; if (!rl) up_read(&ni->runlist.lock); } /* Hard error. Abort writing this record. */ if (!err || err == -ENOMEM) err = err2; bh->b_blocknr = -1; ntfs_error(vol->sb, "Cannot write ntfs record " "0x%llx (inode 0x%lx, " "attribute type 0x%x) because " "its location on disk could " "not be determined (error " "code %lli).", (long long)block << bh_size_bits >> vol->mft_record_size_bits, ni->mft_no, ni->type, (long long)lcn); /* * If this is not the first buffer, remove the * buffers in this record from the list of * buffers to write and clear their dirty bit * if not error -ENOMEM. */ if (rec_start_bh != bh) { while (bhs[--nr_bhs] != rec_start_bh) ; if (err2 != -ENOMEM) { do { clear_buffer_dirty( rec_start_bh); } while ((rec_start_bh = rec_start_bh-> b_this_page) != bh); } } continue; } } BUG_ON(!buffer_uptodate(bh)); BUG_ON(nr_bhs >= max_bhs); bhs[nr_bhs++] = bh; } while (block++, (bh = bh->b_this_page) != head); if (unlikely(rl)) up_read(&ni->runlist.lock); /* If there were no dirty buffers, we are done. */ if (!nr_bhs) goto done; /* Map the page so we can access its contents. */ kaddr = kmap(page); /* Clear the page uptodate flag whilst the mst fixups are applied. */ BUG_ON(!PageUptodate(page)); ClearPageUptodate(page); for (i = 0; i < nr_bhs; i++) { unsigned int ofs; /* Skip buffers which are not at the beginning of records. */ if (i % bhs_per_rec) continue; tbh = bhs[i]; ofs = bh_offset(tbh); if (is_mft) { ntfs_inode *tni; unsigned long mft_no; /* Get the mft record number. */ mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) >> rec_size_bits; /* Check whether to write this mft record. */ tni = NULL; if (!ntfs_may_write_mft_record(vol, mft_no, (MFT_RECORD*)(kaddr + ofs), &tni)) { /* * The record should not be written. This * means we need to redirty the page before * returning. */ page_is_dirty = true; /* * Remove the buffers in this mft record from * the list of buffers to write. */ do { bhs[i] = NULL; } while (++i % bhs_per_rec); continue; } /* * The record should be written. If a locked ntfs * inode was returned, add it to the array of locked * ntfs inodes. */ if (tni) locked_nis[nr_locked_nis++] = tni; } /* Apply the mst protection fixups. */ err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs), rec_size); if (unlikely(err2)) { if (!err || err == -ENOMEM) err = -EIO; ntfs_error(vol->sb, "Failed to apply mst fixups " "(inode 0x%lx, attribute type 0x%x, " "page index 0x%lx, page offset 0x%x)!" " Unmount and run chkdsk.", vi->i_ino, ni->type, page->index, ofs); /* * Mark all the buffers in this record clean as we do * not want to write corrupt data to disk. */ do { clear_buffer_dirty(bhs[i]); bhs[i] = NULL; } while (++i % bhs_per_rec); continue; } nr_recs++; } /* If no records are to be written out, we are done. */ if (!nr_recs) goto unm_done; flush_dcache_page(page); /* Lock buffers and start synchronous write i/o on them. */ for (i = 0; i < nr_bhs; i++) { tbh = bhs[i]; if (!tbh) continue; if (!trylock_buffer(tbh)) BUG(); /* The buffer dirty state is now irrelevant, just clean it. */ clear_buffer_dirty(tbh); BUG_ON(!buffer_uptodate(tbh)); BUG_ON(!buffer_mapped(tbh)); get_bh(tbh); tbh->b_end_io = end_buffer_write_sync; submit_bh(WRITE, tbh); } /* Synchronize the mft mirror now if not @sync. */ if (is_mft && !sync) goto do_mirror; do_wait: /* Wait on i/o completion of buffers. */ for (i = 0; i < nr_bhs; i++) { tbh = bhs[i]; if (!tbh) continue; wait_on_buffer(tbh); if (unlikely(!buffer_uptodate(tbh))) { ntfs_error(vol->sb, "I/O error while writing ntfs " "record buffer (inode 0x%lx, " "attribute type 0x%x, page index " "0x%lx, page offset 0x%lx)! Unmount " "and run chkdsk.", vi->i_ino, ni->type, page->index, bh_offset(tbh)); if (!err || err == -ENOMEM) err = -EIO; /* * Set the buffer uptodate so the page and buffer * states do not become out of sync. */ set_buffer_uptodate(tbh); } } /* If @sync, now synchronize the mft mirror. */ if (is_mft && sync) { do_mirror: for (i = 0; i < nr_bhs; i++) { unsigned long mft_no; unsigned int ofs; /* * Skip buffers which are not at the beginning of * records. */ if (i % bhs_per_rec) continue; tbh = bhs[i]; /* Skip removed buffers (and hence records). */ if (!tbh) continue; ofs = bh_offset(tbh); /* Get the mft record number. */ mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) >> rec_size_bits; if (mft_no < vol->mftmirr_size) ntfs_sync_mft_mirror(vol, mft_no, (MFT_RECORD*)(kaddr + ofs), sync); } if (!sync) goto do_wait; } /* Remove the mst protection fixups again. */ for (i = 0; i < nr_bhs; i++) { if (!(i % bhs_per_rec)) { tbh = bhs[i]; if (!tbh) continue; post_write_mst_fixup((NTFS_RECORD*)(kaddr + bh_offset(tbh))); } } flush_dcache_page(page); unm_done: /* Unlock any locked inodes. */ while (nr_locked_nis-- > 0) { ntfs_inode *tni, *base_tni; tni = locked_nis[nr_locked_nis]; /* Get the base inode. */ mutex_lock(&tni->extent_lock); if (tni->nr_extents >= 0) base_tni = tni; else { base_tni = tni->ext.base_ntfs_ino; BUG_ON(!base_tni); } mutex_unlock(&tni->extent_lock); ntfs_debug("Unlocking %s inode 0x%lx.", tni == base_tni ? "base" : "extent", tni->mft_no); mutex_unlock(&tni->mrec_lock); atomic_dec(&tni->count); iput(VFS_I(base_tni)); } SetPageUptodate(page); kunmap(page); done: if (unlikely(err && err != -ENOMEM)) { /* * Set page error if there is only one ntfs record in the page. * Otherwise we would loose per-record granularity. */ if (ni->itype.index.block_size == PAGE_CACHE_SIZE) SetPageError(page); NVolSetErrors(vol); } if (page_is_dirty) { ntfs_debug("Page still contains one or more dirty ntfs " "records. Redirtying the page starting at " "record 0x%lx.", page->index << (PAGE_CACHE_SHIFT - rec_size_bits)); redirty_page_for_writepage(wbc, page); unlock_page(page); } else { /* * Keep the VM happy. This must be done otherwise the * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though * the page is clean. */ BUG_ON(PageWriteback(page)); set_page_writeback(page); unlock_page(page); end_page_writeback(page); } if (likely(!err)) ntfs_debug("Done."); return err; } /** * ntfs_writepage - write a @page to the backing store * @page: page cache page to write out * @wbc: writeback control structure * * This is called from the VM when it wants to have a dirty ntfs page cache * page cleaned. The VM has already locked the page and marked it clean. * * For non-resident attributes, ntfs_writepage() writes the @page by calling * the ntfs version of the generic block_write_full_page() function, * ntfs_write_block(), which in turn if necessary creates and writes the * buffers associated with the page asynchronously. * * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying * the data to the mft record (which at this stage is most likely in memory). * The mft record is then marked dirty and written out asynchronously via the * vfs inode dirty code path for the inode the mft record belongs to or via the * vm page dirty code path for the page the mft record is in. * * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page(). * * Return 0 on success and -errno on error. */ static int ntfs_writepage(struct page *page, struct writeback_control *wbc) { loff_t i_size; struct inode *vi = page->mapping->host; ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi); char *addr; ntfs_attr_search_ctx *ctx = NULL; MFT_RECORD *m = NULL; u32 attr_len; int err; retry_writepage: BUG_ON(!PageLocked(page)); i_size = i_size_read(vi); /* Is the page fully outside i_size? (truncate in progress) */ if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)) { /* * The page may have dirty, unmapped buffers. Make them * freeable here, so the page does not leak. */ block_invalidatepage(page, 0); unlock_page(page); ntfs_debug("Write outside i_size - truncated?"); return 0; } /* * Only $DATA attributes can be encrypted and only unnamed $DATA * attributes can be compressed. Index root can have the flags set but * this means to create compressed/encrypted files, not that the * attribute is compressed/encrypted. Note we need to check for * AT_INDEX_ALLOCATION since this is the type of both directory and * index inodes. */ if (ni->type != AT_INDEX_ALLOCATION) { /* If file is encrypted, deny access, just like NT4. */ if (NInoEncrypted(ni)) { unlock_page(page); BUG_ON(ni->type != AT_DATA); ntfs_debug("Denying write access to encrypted file."); return -EACCES; } /* Compressed data streams are handled in compress.c. */ if (NInoNonResident(ni) && NInoCompressed(ni)) { BUG_ON(ni->type != AT_DATA); BUG_ON(ni->name_len); // TODO: Implement and replace this with // return ntfs_write_compressed_block(page); unlock_page(page); ntfs_error(vi->i_sb, "Writing to compressed files is " "not supported yet. Sorry."); return -EOPNOTSUPP; } // TODO: Implement and remove this check. if (NInoNonResident(ni) && NInoSparse(ni)) { unlock_page(page); ntfs_error(vi->i_sb, "Writing to sparse files is not " "supported yet. Sorry."); return -EOPNOTSUPP; } } /* NInoNonResident() == NInoIndexAllocPresent() */ if (NInoNonResident(ni)) { /* We have to zero every time due to mmap-at-end-of-file. */ if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { /* The page straddles i_size. */ unsigned int ofs = i_size & ~PAGE_CACHE_MASK; zero_user_segment(page, ofs, PAGE_CACHE_SIZE); } /* Handle mst protected attributes. */ if (NInoMstProtected(ni)) return ntfs_write_mst_block(page, wbc); /* Normal, non-resident data stream. */ return ntfs_write_block(page, wbc); } /* * Attribute is resident, implying it is not compressed, encrypted, or * mst protected. This also means the attribute is smaller than an mft * record and hence smaller than a page, so can simply return error on * any pages with index above 0. Note the attribute can actually be * marked compressed but if it is resident the actual data is not * compressed so we are ok to ignore the compressed flag here. */ BUG_ON(page_has_buffers(page)); BUG_ON(!PageUptodate(page)); if (unlikely(page->index > 0)) { ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. " "Aborting write.", page->index); BUG_ON(PageWriteback(page)); set_page_writeback(page); unlock_page(page); end_page_writeback(page); return -EIO; } if (!NInoAttr(ni)) base_ni = ni; else base_ni = ni->ext.base_ntfs_ino; /* Map, pin, and lock the mft record. */ m = map_mft_record(base_ni); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; ctx = NULL; goto err_out; } /* * If a parallel write made the attribute non-resident, drop the mft * record and retry the writepage. */ if (unlikely(NInoNonResident(ni))) { unmap_mft_record(base_ni); goto retry_writepage; } ctx = ntfs_attr_get_search_ctx(base_ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) goto err_out; /* * Keep the VM happy. This must be done otherwise the radix-tree tag * PAGECACHE_TAG_DIRTY remains set even though the page is clean. */ BUG_ON(PageWriteback(page)); set_page_writeback(page); unlock_page(page); attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); i_size = i_size_read(vi); if (unlikely(attr_len > i_size)) { /* Race with shrinking truncate or a failed truncate. */ attr_len = i_size; /* * If the truncate failed, fix it up now. If a concurrent * truncate, we do its job, so it does not have to do anything. */ err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr, attr_len); /* Shrinking cannot fail. */ BUG_ON(err); } addr = kmap_atomic(page); /* Copy the data from the page to the mft record. */ memcpy((u8*)ctx->attr + le16_to_cpu(ctx->attr->data.resident.value_offset), addr, attr_len); /* Zero out of bounds area in the page cache page. */ memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); kunmap_atomic(addr); flush_dcache_page(page); flush_dcache_mft_record_page(ctx->ntfs_ino); /* We are done with the page. */ end_page_writeback(page); /* Finally, mark the mft record dirty, so it gets written back. */ mark_mft_record_dirty(ctx->ntfs_ino); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(base_ni); return 0; err_out: if (err == -ENOMEM) { ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying " "page so we try again later."); /* * Put the page back on mapping->dirty_pages, but leave its * buffers' dirty state as-is. */ redirty_page_for_writepage(wbc, page); err = 0; } else { ntfs_error(vi->i_sb, "Resident attribute write failed with " "error %i.", err); SetPageError(page); NVolSetErrors(ni->vol); } unlock_page(page); if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(base_ni); return err; } #endif /* NTFS_RW */ /** * ntfs_aops - general address space operations for inodes and attributes */ const struct address_space_operations ntfs_aops = { .readpage = ntfs_readpage, /* Fill page with data. */ #ifdef NTFS_RW .writepage = ntfs_writepage, /* Write dirty page to disk. */ #endif /* NTFS_RW */ .migratepage = buffer_migrate_page, /* Move a page cache page from one physical page to an other. */ .error_remove_page = generic_error_remove_page, }; /** * ntfs_mst_aops - general address space operations for mst protecteed inodes * and attributes */ const struct address_space_operations ntfs_mst_aops = { .readpage = ntfs_readpage, /* Fill page with data. */ #ifdef NTFS_RW .writepage = ntfs_writepage, /* Write dirty page to disk. */ .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty without touching the buffers belonging to the page. */ #endif /* NTFS_RW */ .migratepage = buffer_migrate_page, /* Move a page cache page from one physical page to an other. */ .error_remove_page = generic_error_remove_page, }; #ifdef NTFS_RW /** * mark_ntfs_record_dirty - mark an ntfs record dirty * @page: page containing the ntfs record to mark dirty * @ofs: byte offset within @page at which the ntfs record begins * * Set the buffers and the page in which the ntfs record is located dirty. * * The latter also marks the vfs inode the ntfs record belongs to dirty * (I_DIRTY_PAGES only). * * If the page does not have buffers, we create them and set them uptodate. * The page may not be locked which is why we need to handle the buffers under * the mapping->private_lock. Once the buffers are marked dirty we no longer * need the lock since try_to_free_buffers() does not free dirty buffers. */ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { struct address_space *mapping = page->mapping; ntfs_inode *ni = NTFS_I(mapping->host); struct buffer_head *bh, *head, *buffers_to_free = NULL; unsigned int end, bh_size, bh_ofs; BUG_ON(!PageUptodate(page)); end = ofs + ni->itype.index.block_size; bh_size = VFS_I(ni)->i_sb->s_blocksize; spin_lock(&mapping->private_lock); if (unlikely(!page_has_buffers(page))) { spin_unlock(&mapping->private_lock); bh = head = alloc_page_buffers(page, bh_size, 1); spin_lock(&mapping->private_lock); if (likely(!page_has_buffers(page))) { struct buffer_head *tail; do { set_buffer_uptodate(bh); tail = bh; bh = bh->b_this_page; } while (bh); tail->b_this_page = head; attach_page_buffers(page, head); } else buffers_to_free = bh; } bh = head = page_buffers(page); BUG_ON(!bh); do { bh_ofs = bh_offset(bh); if (bh_ofs + bh_size <= ofs) continue; if (unlikely(bh_ofs >= end)) break; set_buffer_dirty(bh); } while ((bh = bh->b_this_page) != head); spin_unlock(&mapping->private_lock); __set_page_dirty_nobuffers(page); if (unlikely(buffers_to_free)) { do { bh = buffers_to_free->b_this_page; free_buffer_head(buffers_to_free); buffers_to_free = bh; } while (buffers_to_free); } } #endif /* NTFS_RW */
gpl-2.0
N30nHaCkZ/android-platform_frameworks_base
drivers/net/ethernet/cisco/enic/vnic_cq.c
9592
2830
/* * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include "vnic_dev.h" #include "vnic_cq.h" void vnic_cq_free(struct vnic_cq *cq) { vnic_dev_free_desc_ring(cq->vdev, &cq->ring); cq->ctrl = NULL; } int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, unsigned int desc_count, unsigned int desc_size) { int err; cq->index = index; cq->vdev = vdev; cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); if (!cq->ctrl) { pr_err("Failed to hook CQ[%d] resource\n", index); return -EINVAL; } err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); if (err) return err; return 0; } void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, unsigned int cq_tail_color, unsigned int interrupt_enable, unsigned int cq_entry_enable, unsigned int cq_message_enable, unsigned int interrupt_offset, u64 cq_message_addr) { u64 paddr; paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; writeq(paddr, &cq->ctrl->ring_base); iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); iowrite32(color_enable, &cq->ctrl->color_enable); iowrite32(cq_head, &cq->ctrl->cq_head); iowrite32(cq_tail, &cq->ctrl->cq_tail); iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); writeq(cq_message_addr, &cq->ctrl->cq_message_addr); cq->interrupt_offset = interrupt_offset; } void vnic_cq_clean(struct vnic_cq *cq) { cq->to_clean = 0; cq->last_color = 0; iowrite32(0, &cq->ctrl->cq_head); iowrite32(0, &cq->ctrl->cq_tail); iowrite32(1, &cq->ctrl->cq_tail_color); vnic_dev_clear_desc_ring(&cq->ring); }
gpl-2.0
weitengchu/u-boot-2009.11
examples/standalone/interrupt.c
121
2193
/* * (C) Copyright 2006 * Detlev Zundel, DENX Software Engineering, dzu@denx.de. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * * This is a very simple standalone application demonstrating * catching IRQs on the MPC52xx architecture. * * The interrupt to be intercepted can be specified as an argument * to the application. Specifying nothing will intercept IRQ1 on the * MPC5200 platform. On the CR825 carrier board from MicroSys this * maps to the ABORT switch :) * * Note that the specified vector is only a logical number specified * by the respective header file. */ #include <common.h> #include <exports.h> #include <config.h> #if defined(CONFIG_MPC5xxx) #define DFL_IRQ MPC5XXX_IRQ1 #else #define DFL_IRQ 0 #endif static void irq_handler (void *arg); int interrupt (int argc, char *argv[]) { int c, irq = -1; app_startup (argv); if (argc > 1) irq = simple_strtoul (argv[1], NULL, 0); if ((irq < 0) || (irq > NR_IRQS)) irq = DFL_IRQ; printf ("Installing handler for irq vector %d and doing busy wait\n", irq); printf ("Press 'q' to quit\n"); /* Install interrupt handler */ install_hdlr (irq, irq_handler, NULL); while ((c = getc ()) != 'q') { printf ("Ok, ok, I am still alive!\n"); } free_hdlr (irq); printf ("\nInterrupt handler has been uninstalled\n"); return (0); } /* * Handler for interrupt */ static void irq_handler (void *arg) { /* just for demonstration */ printf ("+"); }
gpl-2.0
Kingcom/pcsx2
3rdparty/wxwidgets3.0/src/common/ctrlsub.cpp
121
8322
/////////////////////////////////////////////////////////////////////////////// // Name: src/common/ctrlsub.cpp // Purpose: wxItemContainer implementation // Author: Vadim Zeitlin // Modified by: // Created: 22.10.99 // Copyright: (c) wxWidgets team // Licence: wxWindows licence /////////////////////////////////////////////////////////////////////////////// // ============================================================================ // declarations // ============================================================================ // ---------------------------------------------------------------------------- // headers // ---------------------------------------------------------------------------- // For compilers that support precompilation, includes "wx.h". #include "wx/wxprec.h" #ifdef __BORLANDC__ #pragma hdrstop #endif #if wxUSE_CONTROLS #ifndef WX_PRECOMP #include "wx/ctrlsub.h" #include "wx/arrstr.h" #endif IMPLEMENT_ABSTRACT_CLASS(wxControlWithItems, wxControl) // ============================================================================ // wxItemContainerImmutable implementation // ============================================================================ wxItemContainerImmutable::~wxItemContainerImmutable() { // this destructor is required for Darwin } // ---------------------------------------------------------------------------- // selection // ---------------------------------------------------------------------------- wxString wxItemContainerImmutable::GetStringSelection() const { wxString s; int sel = GetSelection(); if ( sel != wxNOT_FOUND ) s = GetString((unsigned int)sel); return s; } bool wxItemContainerImmutable::SetStringSelection(const wxString& s) { const int sel = FindString(s); if ( sel == wxNOT_FOUND ) return false; SetSelection(sel); return true; } wxArrayString wxItemContainerImmutable::GetStrings() const { wxArrayString result; const unsigned int count = GetCount(); result.Alloc(count); for ( unsigned int n = 0; n < count; n++ ) result.Add(GetString(n)); return result; } // ============================================================================ // wxItemContainer implementation // ============================================================================ wxItemContainer::~wxItemContainer() { // this destructor is required for Darwin } // ---------------------------------------------------------------------------- // deleting items // ---------------------------------------------------------------------------- void wxItemContainer::Clear() { if ( HasClientObjectData() ) { const unsigned count = GetCount(); for ( unsigned i = 0; i < count; ++i ) ResetItemClientObject(i); } SetClientDataType(wxClientData_None); DoClear(); } void wxItemContainer::Delete(unsigned int pos) { wxCHECK_RET( pos < GetCount(), wxT("invalid index") ); if ( HasClientObjectData() ) ResetItemClientObject(pos); DoDeleteOneItem(pos); if ( IsEmpty() ) { SetClientDataType(wxClientData_None); } } // ---------------------------------------------------------------------------- // // ---------------------------------------------------------------------------- int wxItemContainer::DoInsertItemsInLoop(const wxArrayStringsAdapter& items, unsigned int pos, void **clientData, wxClientDataType type) { int n = wxNOT_FOUND; const unsigned int count = items.GetCount(); for ( unsigned int i = 0; i < count; ++i ) { n = DoInsertOneItem(items[i], pos++); if ( n == wxNOT_FOUND ) break; AssignNewItemClientData(n, clientData, i, type); } return n; } int wxItemContainer::DoInsertOneItem(const wxString& WXUNUSED(item), unsigned int WXUNUSED(pos)) { wxFAIL_MSG( wxT("Must be overridden if DoInsertItemsInLoop() is used") ); return wxNOT_FOUND; } // ---------------------------------------------------------------------------- // client data // ---------------------------------------------------------------------------- void wxItemContainer::SetClientObject(unsigned int n, wxClientData *data) { wxASSERT_MSG( !HasClientUntypedData(), wxT("can't have both object and void client data") ); wxCHECK_RET( IsValid(n), "Invalid index passed to SetClientObject()" ); if ( HasClientObjectData() ) { wxClientData * clientDataOld = static_cast<wxClientData *>(DoGetItemClientData(n)); if ( clientDataOld ) delete clientDataOld; } else // didn't have any client data so far { // now we have object client data DoInitItemClientData(); SetClientDataType(wxClientData_Object); } DoSetItemClientData(n, data); } wxClientData *wxItemContainer::GetClientObject(unsigned int n) const { wxCHECK_MSG( HasClientObjectData(), NULL, wxT("this window doesn't have object client data") ); wxCHECK_MSG( IsValid(n), NULL, "Invalid index passed to GetClientObject()" ); return static_cast<wxClientData *>(DoGetItemClientData(n)); } wxClientData *wxItemContainer::DetachClientObject(unsigned int n) { wxClientData * const data = GetClientObject(n); if ( data ) { // reset the pointer as we don't own it any more DoSetItemClientData(n, NULL); } return data; } void wxItemContainer::SetClientData(unsigned int n, void *data) { if ( !HasClientData() ) { DoInitItemClientData(); SetClientDataType(wxClientData_Void); } wxASSERT_MSG( HasClientUntypedData(), wxT("can't have both object and void client data") ); wxCHECK_RET( IsValid(n), "Invalid index passed to SetClientData()" ); DoSetItemClientData(n, data); } void *wxItemContainer::GetClientData(unsigned int n) const { wxCHECK_MSG( HasClientUntypedData(), NULL, wxT("this window doesn't have void client data") ); wxCHECK_MSG( IsValid(n), NULL, "Invalid index passed to GetClientData()" ); return DoGetItemClientData(n); } void wxItemContainer::AssignNewItemClientData(unsigned int pos, void **clientData, unsigned int n, wxClientDataType type) { switch ( type ) { case wxClientData_Object: SetClientObject ( pos, (reinterpret_cast<wxClientData **>(clientData))[n] ); break; case wxClientData_Void: SetClientData(pos, clientData[n]); break; default: wxFAIL_MSG( wxT("unknown client data type") ); // fall through case wxClientData_None: // nothing to do break; } } void wxItemContainer::ResetItemClientObject(unsigned int n) { wxClientData * const data = GetClientObject(n); if ( data ) { delete data; DoSetItemClientData(n, NULL); } } // ============================================================================ // wxControlWithItems implementation // ============================================================================ void wxControlWithItemsBase::InitCommandEventWithItems(wxCommandEvent& event, int n) { InitCommandEvent(event); if ( n != wxNOT_FOUND ) { if ( HasClientObjectData() ) event.SetClientObject(GetClientObject(n)); else if ( HasClientUntypedData() ) event.SetClientData(GetClientData(n)); } } void wxControlWithItemsBase::SendSelectionChangedEvent(wxEventType eventType) { const int n = GetSelection(); if ( n == wxNOT_FOUND ) return; wxCommandEvent event(eventType, m_windowId); event.SetInt(n); event.SetEventObject(this); event.SetString(GetStringSelection()); InitCommandEventWithItems(event, n); HandleWindowEvent(event); } #endif // wxUSE_CONTROLS
gpl-2.0
michaelspacejames/android_kernel_cyanogen_msm8916
drivers/soc/qcom/hvc.c
889
2435
/* Copyright (c) 2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/io.h> #include <linux/export.h> #include <linux/err.h> #include <asm/compiler.h> #include <soc/qcom/hvc.h> #define HVC_RET_SUCCESS 0 #define HVC_RET_ERROR -1 #define HVC_RET_EFUNCNOSUPPORT -2 #define HVC_RET_EINVALARCH -3 #define HVC_RET_EMEMMAP -4 #define HVC_RET_EMEMUNMAP -5 #define HVC_RET_EMEMPERM -6 static int hvc_to_linux_errno(int errno) { switch (errno) { case HVC_RET_SUCCESS: return 0; case HVC_RET_ERROR: return -EIO; case HVC_RET_EFUNCNOSUPPORT: return -EOPNOTSUPP; case HVC_RET_EINVALARCH: case HVC_RET_EMEMMAP: case HVC_RET_EMEMUNMAP: return -EINVAL; case HVC_RET_EMEMPERM: return -EPERM; }; return -EINVAL; } #ifdef CONFIG_ARM64 static int __hvc(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5, u64 x6, u64 x7, u64 *ret1, u64 *ret2, u64 *ret3) { register u64 r0 asm("x0") = x0; register u64 r1 asm("x1") = x1; register u64 r2 asm("x2") = x2; register u64 r3 asm("x3") = x3; register u64 r4 asm("x4") = x4; register u64 r5 asm("x5") = x5; register u64 r6 asm("x6") = x6; register u64 r7 asm("x7") = x7; asm volatile( __asmeq("%0", "x0") __asmeq("%1", "x1") __asmeq("%2", "x2") __asmeq("%3", "x3") __asmeq("%4", "x4") __asmeq("%5", "x5") __asmeq("%6", "x6") __asmeq("%7", "x7") "hvc #0\n" : "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) : "r" (r4), "r" (r5), "r" (r6), "r" (r7)); *ret1 = r1; *ret2 = r2; *ret3 = r3; return r0; } #else static int __hvc(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5, u64 x6, u64 x7, u64 *ret1, u64 *ret2, u64 *ret3) { return 0; } #endif int hvc(u64 func_id, struct hvc_desc *desc) { int ret; if (!desc) return -EINVAL; ret = __hvc(func_id, desc->arg[0], desc->arg[1], desc->arg[2], desc->arg[3], desc->arg[4], desc->arg[5], 0, &desc->ret[0], &desc->ret[1], &desc->ret[2]); return hvc_to_linux_errno(ret); } EXPORT_SYMBOL(hvc);
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHW-M190S
fs/coda/pioctl.c
889
2315
/* * Pioctl operations for Coda. * Original version: (C) 1996 Peter Braam * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University * * Carnegie Mellon encourages users of this code to contribute improvements * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/namei.h> #include <linux/module.h> #include <asm/uaccess.h> #include <linux/coda.h> #include <linux/coda_linux.h> #include <linux/coda_fs_i.h> #include <linux/coda_psdev.h> #include <linux/smp_lock.h> /* pioctl ops */ static int coda_ioctl_permission(struct inode *inode, int mask); static long coda_pioctl(struct file *filp, unsigned int cmd, unsigned long user_data); /* exported from this file */ const struct inode_operations coda_ioctl_inode_operations = { .permission = coda_ioctl_permission, .setattr = coda_setattr, }; const struct file_operations coda_ioctl_operations = { .owner = THIS_MODULE, .unlocked_ioctl = coda_pioctl, }; /* the coda pioctl inode ops */ static int coda_ioctl_permission(struct inode *inode, int mask) { return (mask & MAY_EXEC) ? -EACCES : 0; } static long coda_pioctl(struct file *filp, unsigned int cmd, unsigned long user_data) { struct path path; int error; struct PioctlData data; struct inode *inode = filp->f_dentry->d_inode; struct inode *target_inode = NULL; struct coda_inode_info *cnp; lock_kernel(); /* get the Pioctl data arguments from user space */ if (copy_from_user(&data, (void __user *)user_data, sizeof(data))) { error = -EINVAL; goto out; } /* * Look up the pathname. Note that the pathname is in * user memory, and namei takes care of this */ if (data.follow) error = user_path(data.path, &path); else error = user_lpath(data.path, &path); if (error) goto out; else target_inode = path.dentry->d_inode; /* return if it is not a Coda inode */ if (target_inode->i_sb != inode->i_sb) { path_put(&path); error = -EINVAL; goto out; } /* now proceed to make the upcall */ cnp = ITOC(target_inode); error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data); path_put(&path); out: unlock_kernel(); return error; }
gpl-2.0
bcnice20/Shooter-2.6.35_mr
drivers/s390/char/tape_3590.c
889
46458
/* * drivers/s390/char/tape_3590.c * tape device discipline for 3590 tapes. * * Copyright IBM Corp. 2001, 2009 * Author(s): Stefan Bader <shbader@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #define KMSG_COMPONENT "tape_3590" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/bio.h> #include <asm/ebcdic.h> #define TAPE_DBF_AREA tape_3590_dbf #define BUFSIZE 512 /* size of buffers for dynamic generated messages */ #include "tape.h" #include "tape_std.h" #include "tape_3590.h" /* * Pointer to debug area. */ debug_info_t *TAPE_DBF_AREA = NULL; EXPORT_SYMBOL(TAPE_DBF_AREA); /******************************************************************* * Error Recovery fuctions: * - Read Opposite: implemented * - Read Device (buffered) log: BRA * - Read Library log: BRA * - Swap Devices: BRA * - Long Busy: implemented * - Special Intercept: BRA * - Read Alternate: implemented *******************************************************************/ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { [0x00] = "", [0x10] = "Lost Sense", [0x11] = "Assigned Elsewhere", [0x12] = "Allegiance Reset", [0x13] = "Shared Access Violation", [0x20] = "Command Reject", [0x21] = "Configuration Error", [0x22] = "Protection Exception", [0x23] = "Write Protect", [0x24] = "Write Length", [0x25] = "Read-Only Format", [0x31] = "Beginning of Partition", [0x33] = "End of Partition", [0x34] = "End of Data", [0x35] = "Block not found", [0x40] = "Device Intervention", [0x41] = "Loader Intervention", [0x42] = "Library Intervention", [0x50] = "Write Error", [0x51] = "Erase Error", [0x52] = "Formatting Error", [0x53] = "Read Error", [0x54] = "Unsupported Format", [0x55] = "No Formatting", [0x56] = "Positioning lost", [0x57] = "Read Length", [0x60] = "Unsupported Medium", [0x61] = "Medium Length Error", [0x62] = "Medium removed", [0x64] = "Load Check", [0x65] = "Unload Check", [0x70] = "Equipment Check", [0x71] = "Bus out Check", [0x72] = "Protocol Error", [0x73] = "Interface Error", [0x74] = "Overrun", [0x75] = "Halt Signal", [0x90] = "Device fenced", [0x91] = "Device Path fenced", [0xa0] = "Volume misplaced", [0xa1] = "Volume inaccessible", [0xa2] = "Volume in input", [0xa3] = "Volume ejected", [0xa4] = "All categories reserved", [0xa5] = "Duplicate Volume", [0xa6] = "Library Manager Offline", [0xa7] = "Library Output Station full", [0xa8] = "Vision System non-operational", [0xa9] = "Library Manager Equipment Check", [0xaa] = "Library Equipment Check", [0xab] = "All Library Cells full", [0xac] = "No Cleaner Volumes in Library", [0xad] = "I/O Station door open", [0xae] = "Subsystem environmental alert", }; static int crypt_supported(struct tape_device *device) { return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device)); } static int crypt_enabled(struct tape_device *device) { return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device)); } static void ext_to_int_kekl(struct tape390_kekl *in, struct tape3592_kekl *out) { int i; memset(out, 0, sizeof(*out)); if (in->type == TAPE390_KEKL_TYPE_HASH) out->flags |= 0x40; if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH) out->flags |= 0x80; strncpy(out->label, in->label, 64); for (i = strlen(in->label); i < sizeof(out->label); i++) out->label[i] = ' '; ASCEBC(out->label, sizeof(out->label)); } static void int_to_ext_kekl(struct tape3592_kekl *in, struct tape390_kekl *out) { memset(out, 0, sizeof(*out)); if(in->flags & 0x40) out->type = TAPE390_KEKL_TYPE_HASH; else out->type = TAPE390_KEKL_TYPE_LABEL; if(in->flags & 0x80) out->type_on_tape = TAPE390_KEKL_TYPE_HASH; else out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; memcpy(out->label, in->label, sizeof(in->label)); EBCASC(out->label, sizeof(in->label)); strim(out->label); } static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in, struct tape390_kekl_pair *out) { if (in->count == 0) { out->kekl[0].type = TAPE390_KEKL_TYPE_NONE; out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE; out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; } else if (in->count == 1) { int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; } else if (in->count == 2) { int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); int_to_ext_kekl(&in->kekl[1], &out->kekl[1]); } else { printk("Invalid KEKL number: %d\n", in->count); BUG(); } } static int check_ext_kekl(struct tape390_kekl *kekl) { if (kekl->type == TAPE390_KEKL_TYPE_NONE) goto invalid; if (kekl->type > TAPE390_KEKL_TYPE_HASH) goto invalid; if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE) goto invalid; if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH) goto invalid; if ((kekl->type == TAPE390_KEKL_TYPE_HASH) && (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL)) goto invalid; return 0; invalid: return -EINVAL; } static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls) { if (check_ext_kekl(&kekls->kekl[0])) goto invalid; if (check_ext_kekl(&kekls->kekl[1])) goto invalid; return 0; invalid: return -EINVAL; } /* * Query KEKLs */ static int tape_3592_kekl_query(struct tape_device *device, struct tape390_kekl_pair *ext_kekls) { struct tape_request *request; struct tape3592_kekl_query_order *order; struct tape3592_kekl_query_data *int_kekls; int rc; DBF_EVENT(6, "tape3592_kekl_query\n"); int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA); if (!int_kekls) return -ENOMEM; request = tape_alloc_request(2, sizeof(*order)); if (IS_ERR(request)) { rc = PTR_ERR(request); goto fail_malloc; } order = request->cpdata; memset(order,0,sizeof(*order)); order->code = 0xe2; order->max_count = 2; request->op = TO_KEKL_QUERY; tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls), int_kekls); rc = tape_do_io(device, request); if (rc) goto fail_request; int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls); rc = 0; fail_request: tape_free_request(request); fail_malloc: kfree(int_kekls); return rc; } /* * IOCTL: Query KEKLs */ static int tape_3592_ioctl_kekl_query(struct tape_device *device, unsigned long arg) { int rc; struct tape390_kekl_pair *ext_kekls; DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n"); if (!crypt_supported(device)) return -ENOSYS; if (!crypt_enabled(device)) return -EUNATCH; ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); if (!ext_kekls) return -ENOMEM; rc = tape_3592_kekl_query(device, ext_kekls); if (rc != 0) goto fail; if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) { rc = -EFAULT; goto fail; } rc = 0; fail: kfree(ext_kekls); return rc; } static int tape_3590_mttell(struct tape_device *device, int mt_count); /* * Set KEKLs */ static int tape_3592_kekl_set(struct tape_device *device, struct tape390_kekl_pair *ext_kekls) { struct tape_request *request; struct tape3592_kekl_set_order *order; DBF_EVENT(6, "tape3592_kekl_set\n"); if (check_ext_kekl_pair(ext_kekls)) { DBF_EVENT(6, "invalid kekls\n"); return -EINVAL; } if (tape_3590_mttell(device, 0) != 0) return -EBADSLT; request = tape_alloc_request(1, sizeof(*order)); if (IS_ERR(request)) return PTR_ERR(request); order = request->cpdata; memset(order, 0, sizeof(*order)); order->code = 0xe3; order->kekls.count = 2; ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]); ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]); request->op = TO_KEKL_SET; tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); return tape_do_io_free(device, request); } /* * IOCTL: Set KEKLs */ static int tape_3592_ioctl_kekl_set(struct tape_device *device, unsigned long arg) { int rc; struct tape390_kekl_pair *ext_kekls; DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n"); if (!crypt_supported(device)) return -ENOSYS; if (!crypt_enabled(device)) return -EUNATCH; ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); if (!ext_kekls) return -ENOMEM; if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) { rc = -EFAULT; goto out; } rc = tape_3592_kekl_set(device, ext_kekls); out: kfree(ext_kekls); return rc; } /* * Enable encryption */ static int tape_3592_enable_crypt(struct tape_device *device) { struct tape_request *request; char *data; DBF_EVENT(6, "tape_3592_enable_crypt\n"); if (!crypt_supported(device)) return -ENOSYS; request = tape_alloc_request(2, 72); if (IS_ERR(request)) return PTR_ERR(request); data = request->cpdata; memset(data,0,72); data[0] = 0x05; data[36 + 0] = 0x03; data[36 + 1] = 0x03; data[36 + 4] = 0x40; data[36 + 6] = 0x01; data[36 + 14] = 0x2f; data[36 + 18] = 0xc3; data[36 + 35] = 0x72; request->op = TO_CRYPT_ON; tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); return tape_do_io_free(device, request); } /* * Disable encryption */ static int tape_3592_disable_crypt(struct tape_device *device) { struct tape_request *request; char *data; DBF_EVENT(6, "tape_3592_disable_crypt\n"); if (!crypt_supported(device)) return -ENOSYS; request = tape_alloc_request(2, 72); if (IS_ERR(request)) return PTR_ERR(request); data = request->cpdata; memset(data,0,72); data[0] = 0x05; data[36 + 0] = 0x03; data[36 + 1] = 0x03; data[36 + 35] = 0x32; request->op = TO_CRYPT_OFF; tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); return tape_do_io_free(device, request); } /* * IOCTL: Set encryption status */ static int tape_3592_ioctl_crypt_set(struct tape_device *device, unsigned long arg) { struct tape390_crypt_info info; DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n"); if (!crypt_supported(device)) return -ENOSYS; if (copy_from_user(&info, (char __user *)arg, sizeof(info))) return -EFAULT; if (info.status & ~TAPE390_CRYPT_ON_MASK) return -EINVAL; if (info.status & TAPE390_CRYPT_ON_MASK) return tape_3592_enable_crypt(device); else return tape_3592_disable_crypt(device); } static int tape_3590_sense_medium(struct tape_device *device); /* * IOCTL: Query enryption status */ static int tape_3592_ioctl_crypt_query(struct tape_device *device, unsigned long arg) { DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n"); if (!crypt_supported(device)) return -ENOSYS; tape_3590_sense_medium(device); if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device), sizeof(TAPE_3590_CRYPT_INFO(device)))) return -EFAULT; else return 0; } /* * 3590 IOCTL Overload */ static int tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) { switch (cmd) { case TAPE390_DISPLAY: { struct display_struct disp; if (copy_from_user(&disp, (char __user *) arg, sizeof(disp))) return -EFAULT; return tape_std_display(device, &disp); } case TAPE390_KEKL_SET: return tape_3592_ioctl_kekl_set(device, arg); case TAPE390_KEKL_QUERY: return tape_3592_ioctl_kekl_query(device, arg); case TAPE390_CRYPT_SET: return tape_3592_ioctl_crypt_set(device, arg); case TAPE390_CRYPT_QUERY: return tape_3592_ioctl_crypt_query(device, arg); default: return -EINVAL; /* no additional ioctls */ } } /* * SENSE Medium: Get Sense data about medium state */ static int tape_3590_sense_medium(struct tape_device *device) { struct tape_request *request; request = tape_alloc_request(1, 128); if (IS_ERR(request)) return PTR_ERR(request); request->op = TO_MSEN; tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); return tape_do_io_free(device, request); } /* * MTTELL: Tell block. Return the number of block relative to current file. */ static int tape_3590_mttell(struct tape_device *device, int mt_count) { __u64 block_id; int rc; rc = tape_std_read_block_id(device, &block_id); if (rc) return rc; return block_id >> 32; } /* * MTSEEK: seek to the specified block. */ static int tape_3590_mtseek(struct tape_device *device, int count) { struct tape_request *request; DBF_EVENT(6, "xsee id: %x\n", count); request = tape_alloc_request(3, 4); if (IS_ERR(request)) return PTR_ERR(request); request->op = TO_LBL; tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); *(__u32 *) request->cpdata = count; tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); return tape_do_io_free(device, request); } /* * Read Opposite Error Recovery Function: * Used, when Read Forward does not work */ static void tape_3590_read_opposite(struct tape_device *device, struct tape_request *request) { struct tape_3590_disc_data *data; /* * We have allocated 4 ccws in tape_std_read, so we can now * transform the request to a read backward, followed by a * forward space block. */ request->op = TO_RBA; tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); data = device->discdata; tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op, device->char_data.idal_buf); tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); DBF_EVENT(6, "xrop ccwg\n"); } /* * Read Attention Msg * This should be done after an interrupt with attention bit (0x80) * in device state. * * After a "read attention message" request there are two possible * results: * * 1. A unit check is presented, when attention sense is present (e.g. when * a medium has been unloaded). The attention sense comes then * together with the unit check. The recovery action is either "retry" * (in case there is an attention message pending) or "permanent error". * * 2. The attention msg is written to the "read subsystem data" buffer. * In this case we probably should print it to the console. */ static int tape_3590_read_attmsg(struct tape_device *device) { struct tape_request *request; char *buf; request = tape_alloc_request(3, 4096); if (IS_ERR(request)) return PTR_ERR(request); request->op = TO_READ_ATTMSG; buf = request->cpdata; buf[0] = PREP_RD_SS_DATA; buf[6] = RD_ATTMSG; /* read att msg */ tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); return tape_do_io_free(device, request); } /* * These functions are used to schedule follow-up actions from within an * interrupt context (like unsolicited interrupts). */ struct work_handler_data { struct tape_device *device; enum tape_op op; struct work_struct work; }; static void tape_3590_work_handler(struct work_struct *work) { struct work_handler_data *p = container_of(work, struct work_handler_data, work); switch (p->op) { case TO_MSEN: tape_3590_sense_medium(p->device); break; case TO_READ_ATTMSG: tape_3590_read_attmsg(p->device); break; case TO_CRYPT_ON: tape_3592_enable_crypt(p->device); break; case TO_CRYPT_OFF: tape_3592_disable_crypt(p->device); break; default: DBF_EVENT(3, "T3590: work handler undefined for " "operation 0x%02x\n", p->op); } tape_put_device(p->device); kfree(p); } static int tape_3590_schedule_work(struct tape_device *device, enum tape_op op) { struct work_handler_data *p; if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) return -ENOMEM; INIT_WORK(&p->work, tape_3590_work_handler); p->device = tape_get_device(device); p->op = op; schedule_work(&p->work); return 0; } #ifdef CONFIG_S390_TAPE_BLOCK /* * Tape Block READ */ static struct tape_request * tape_3590_bread(struct tape_device *device, struct request *req) { struct tape_request *request; struct ccw1 *ccw; int count = 0, start_block; unsigned off; char *dst; struct bio_vec *bv; struct req_iterator iter; DBF_EVENT(6, "xBREDid:"); start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; DBF_EVENT(6, "start_block = %i\n", start_block); rq_for_each_segment(bv, req, iter) count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); request = tape_alloc_request(2 + count + 1, 4); if (IS_ERR(request)) return request; request->op = TO_BLOCK; *(__u32 *) request->cpdata = start_block; ccw = request->cpaddr; ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); /* * We always setup a nop after the mode set ccw. This slot is * used in tape_std_check_locate to insert a locate ccw if the * current tape position doesn't match the start block to be read. */ ccw = tape_ccw_cc(ccw, NOP, 0, NULL); rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) { ccw->flags = CCW_FLAG_CC; ccw->cmd_code = READ_FORWARD; ccw->count = TAPEBLOCK_HSEC_SIZE; set_normalized_cda(ccw, (void *) __pa(dst)); ccw++; dst += TAPEBLOCK_HSEC_SIZE; } BUG_ON(off > bv->bv_len); } ccw = tape_ccw_end(ccw, NOP, 0, NULL); DBF_EVENT(6, "xBREDccwg\n"); return request; } static void tape_3590_free_bread(struct tape_request *request) { struct ccw1 *ccw; /* Last ccw is a nop and doesn't need clear_normalized_cda */ for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++) if (ccw->cmd_code == READ_FORWARD) clear_normalized_cda(ccw); tape_free_request(request); } /* * check_locate is called just before the tape request is passed to * the common io layer for execution. It has to check the current * tape position and insert a locate ccw if it doesn't match the * start block for the request. */ static void tape_3590_check_locate(struct tape_device *device, struct tape_request *request) { __u32 *start_block; start_block = (__u32 *) request->cpdata; if (*start_block != device->blk_data.block_position) { /* Add the start offset of the file to get the real block. */ *start_block += device->bof; tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); } } #endif static void tape_3590_med_state_set(struct tape_device *device, struct tape_3590_med_sense *sense) { struct tape390_crypt_info *c_info; c_info = &TAPE_3590_CRYPT_INFO(device); DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst); switch (sense->macst) { case 0x04: case 0x05: case 0x06: tape_med_state_set(device, MS_UNLOADED); TAPE_3590_CRYPT_INFO(device).medium_status = 0; return; case 0x08: case 0x09: tape_med_state_set(device, MS_LOADED); break; default: tape_med_state_set(device, MS_UNKNOWN); return; } c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK; if (sense->flags & MSENSE_CRYPT_MASK) { DBF_EVENT(6, "Medium is encrypted (%04x)\n", sense->flags); c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK; } else { DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags); c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK; } } /* * The done handler is called at device/channel end and wakes up the sleeping * process */ static int tape_3590_done(struct tape_device *device, struct tape_request *request) { struct tape_3590_disc_data *disc_data; DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); disc_data = device->discdata; switch (request->op) { case TO_BSB: case TO_BSF: case TO_DSE: case TO_FSB: case TO_FSF: case TO_LBL: case TO_RFO: case TO_RBA: case TO_REW: case TO_WRI: case TO_WTM: case TO_BLOCK: case TO_LOAD: tape_med_state_set(device, MS_LOADED); break; case TO_RUN: tape_med_state_set(device, MS_UNLOADED); tape_3590_schedule_work(device, TO_CRYPT_OFF); break; case TO_MSEN: tape_3590_med_state_set(device, request->cpdata); break; case TO_CRYPT_ON: TAPE_3590_CRYPT_INFO(device).status |= TAPE390_CRYPT_ON_MASK; *(device->modeset_byte) |= 0x03; break; case TO_CRYPT_OFF: TAPE_3590_CRYPT_INFO(device).status &= ~TAPE390_CRYPT_ON_MASK; *(device->modeset_byte) &= ~0x03; break; case TO_RBI: /* RBI seems to succeed even without medium loaded. */ case TO_NOP: /* Same to NOP. */ case TO_READ_CONFIG: case TO_READ_ATTMSG: case TO_DIS: case TO_ASSIGN: case TO_UNASSIGN: case TO_SIZE: case TO_KEKL_SET: case TO_KEKL_QUERY: case TO_RDC: break; } return TAPE_IO_SUCCESS; } /* * This fuction is called, when error recovery was successfull */ static inline int tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) { DBF_EVENT(3, "Error Recovery successful for %s\n", tape_op_verbose[request->op]); return tape_3590_done(device, request); } /* * This fuction is called, when error recovery was not successfull */ static inline int tape_3590_erp_failed(struct tape_device *device, struct tape_request *request, struct irb *irb, int rc) { DBF_EVENT(3, "Error Recovery failed for %s\n", tape_op_verbose[request->op]); tape_dump_sense_dbf(device, request, irb); return rc; } /* * Error Recovery do retry */ static inline int tape_3590_erp_retry(struct tape_device *device, struct tape_request *request, struct irb *irb) { DBF_EVENT(2, "Retry: %s\n", tape_op_verbose[request->op]); tape_dump_sense_dbf(device, request, irb); return TAPE_IO_RETRY; } /* * Handle unsolicited interrupts */ static int tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) { if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END) /* Probably result of halt ssch */ return TAPE_IO_PENDING; else if (irb->scsw.cmd.dstat == 0x85) /* Device Ready */ DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { tape_3590_schedule_work(device, TO_READ_ATTMSG); } else { DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); tape_dump_sense_dbf(device, NULL, irb); } /* check medium state */ tape_3590_schedule_work(device, TO_MSEN); return TAPE_IO_SUCCESS; } /* * Basic Recovery routine */ static int tape_3590_erp_basic(struct tape_device *device, struct tape_request *request, struct irb *irb, int rc) { struct tape_3590_sense *sense; sense = (struct tape_3590_sense *) irb->ecw; switch (sense->bra) { case SENSE_BRA_PER: return tape_3590_erp_failed(device, request, irb, rc); case SENSE_BRA_CONT: return tape_3590_erp_succeded(device, request); case SENSE_BRA_RE: return tape_3590_erp_retry(device, request, irb); case SENSE_BRA_DRE: return tape_3590_erp_failed(device, request, irb, rc); default: BUG(); return TAPE_IO_STOP; } } /* * RDL: Read Device (buffered) log */ static int tape_3590_erp_read_buf_log(struct tape_device *device, struct tape_request *request, struct irb *irb) { /* * We just do the basic error recovery at the moment (retry). * Perhaps in the future, we read the log and dump it somewhere... */ return tape_3590_erp_basic(device, request, irb, -EIO); } /* * SWAP: Swap Devices */ static int tape_3590_erp_swap(struct tape_device *device, struct tape_request *request, struct irb *irb) { /* * This error recovery should swap the tapes * if the original has a problem. The operation * should proceed with the new tape... this * should probably be done in user space! */ dev_warn (&device->cdev->dev, "The tape medium must be loaded into a " "different tape unit\n"); return tape_3590_erp_basic(device, request, irb, -EIO); } /* * LBY: Long Busy */ static int tape_3590_erp_long_busy(struct tape_device *device, struct tape_request *request, struct irb *irb) { DBF_EVENT(6, "Device is busy\n"); return TAPE_IO_LONG_BUSY; } /* * SPI: Special Intercept */ static int tape_3590_erp_special_interrupt(struct tape_device *device, struct tape_request *request, struct irb *irb) { return tape_3590_erp_basic(device, request, irb, -EIO); } /* * RDA: Read Alternate */ static int tape_3590_erp_read_alternate(struct tape_device *device, struct tape_request *request, struct irb *irb) { struct tape_3590_disc_data *data; /* * The issued Read Backward or Read Previous command is not * supported by the device * The recovery action should be to issue another command: * Read Revious: if Read Backward is not supported * Read Backward: if Read Previous is not supported */ data = device->discdata; if (data->read_back_op == READ_PREVIOUS) { DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n", device->cdev_id); data->read_back_op = READ_BACKWARD; } else { DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n", device->cdev_id); data->read_back_op = READ_PREVIOUS; } tape_3590_read_opposite(device, request); return tape_3590_erp_retry(device, request, irb); } /* * Error Recovery read opposite */ static int tape_3590_erp_read_opposite(struct tape_device *device, struct tape_request *request, struct irb *irb) { switch (request->op) { case TO_RFO: /* * We did read forward, but the data could not be read. * We will read backward and then skip forward again. */ tape_3590_read_opposite(device, request); return tape_3590_erp_retry(device, request, irb); case TO_RBA: /* We tried to read forward and backward, but hat no success */ return tape_3590_erp_failed(device, request, irb, -EIO); break; default: return tape_3590_erp_failed(device, request, irb, -EIO); } } /* * Print an MIM (Media Information Message) (message code f0) */ static void tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb) { struct tape_3590_sense *sense; char *exception, *service; exception = kmalloc(BUFSIZE, GFP_ATOMIC); service = kmalloc(BUFSIZE, GFP_ATOMIC); if (!exception || !service) goto out_nomem; sense = (struct tape_3590_sense *) irb->ecw; /* Exception Message */ switch (sense->fmt.f70.emc) { case 0x02: snprintf(exception, BUFSIZE, "Data degraded"); break; case 0x03: snprintf(exception, BUFSIZE, "Data degraded in partion %i", sense->fmt.f70.mp); break; case 0x04: snprintf(exception, BUFSIZE, "Medium degraded"); break; case 0x05: snprintf(exception, BUFSIZE, "Medium degraded in partition %i", sense->fmt.f70.mp); break; case 0x06: snprintf(exception, BUFSIZE, "Block 0 Error"); break; case 0x07: snprintf(exception, BUFSIZE, "Medium Exception 0x%02x", sense->fmt.f70.md); break; default: snprintf(exception, BUFSIZE, "0x%02x", sense->fmt.f70.emc); break; } /* Service Message */ switch (sense->fmt.f70.smc) { case 0x02: snprintf(service, BUFSIZE, "Reference Media maintenance " "procedure %i", sense->fmt.f70.md); break; default: snprintf(service, BUFSIZE, "0x%02x", sense->fmt.f70.smc); break; } dev_warn (&device->cdev->dev, "Tape media information: exception %s, " "service %s\n", exception, service); out_nomem: kfree(exception); kfree(service); } /* * Print an I/O Subsystem Service Information Message (message code f1) */ static void tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb) { struct tape_3590_sense *sense; char *exception, *service; exception = kmalloc(BUFSIZE, GFP_ATOMIC); service = kmalloc(BUFSIZE, GFP_ATOMIC); if (!exception || !service) goto out_nomem; sense = (struct tape_3590_sense *) irb->ecw; /* Exception Message */ switch (sense->fmt.f71.emc) { case 0x01: snprintf(exception, BUFSIZE, "Effect of failure is unknown"); break; case 0x02: snprintf(exception, BUFSIZE, "CU Exception - no performance " "impact"); break; case 0x03: snprintf(exception, BUFSIZE, "CU Exception on channel " "interface 0x%02x", sense->fmt.f71.md[0]); break; case 0x04: snprintf(exception, BUFSIZE, "CU Exception on device path " "0x%02x", sense->fmt.f71.md[0]); break; case 0x05: snprintf(exception, BUFSIZE, "CU Exception on library path " "0x%02x", sense->fmt.f71.md[0]); break; case 0x06: snprintf(exception, BUFSIZE, "CU Exception on node 0x%02x", sense->fmt.f71.md[0]); break; case 0x07: snprintf(exception, BUFSIZE, "CU Exception on partition " "0x%02x", sense->fmt.f71.md[0]); break; default: snprintf(exception, BUFSIZE, "0x%02x", sense->fmt.f71.emc); } /* Service Message */ switch (sense->fmt.f71.smc) { case 0x01: snprintf(service, BUFSIZE, "Repair impact is unknown"); break; case 0x02: snprintf(service, BUFSIZE, "Repair will not impact cu " "performance"); break; case 0x03: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable node " "0x%x on CU", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable " "nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x04: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable " "channel path 0x%x on CU", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable cannel" " paths (0x%x-0x%x) on CU", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x05: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable device" " path 0x%x on CU", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable device" " paths (0x%x-0x%x) on CU", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x06: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable " "library path 0x%x on CU", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable " "library paths (0x%x-0x%x) on CU", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x07: snprintf(service, BUFSIZE, "Repair will disable access to CU"); break; default: snprintf(service, BUFSIZE, "0x%02x", sense->fmt.f71.smc); } dev_warn (&device->cdev->dev, "I/O subsystem information: exception" " %s, service %s\n", exception, service); out_nomem: kfree(exception); kfree(service); } /* * Print an Device Subsystem Service Information Message (message code f2) */ static void tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb) { struct tape_3590_sense *sense; char *exception, *service; exception = kmalloc(BUFSIZE, GFP_ATOMIC); service = kmalloc(BUFSIZE, GFP_ATOMIC); if (!exception || !service) goto out_nomem; sense = (struct tape_3590_sense *) irb->ecw; /* Exception Message */ switch (sense->fmt.f71.emc) { case 0x01: snprintf(exception, BUFSIZE, "Effect of failure is unknown"); break; case 0x02: snprintf(exception, BUFSIZE, "DV Exception - no performance" " impact"); break; case 0x03: snprintf(exception, BUFSIZE, "DV Exception on channel " "interface 0x%02x", sense->fmt.f71.md[0]); break; case 0x04: snprintf(exception, BUFSIZE, "DV Exception on loader 0x%02x", sense->fmt.f71.md[0]); break; case 0x05: snprintf(exception, BUFSIZE, "DV Exception on message display" " 0x%02x", sense->fmt.f71.md[0]); break; case 0x06: snprintf(exception, BUFSIZE, "DV Exception in tape path"); break; case 0x07: snprintf(exception, BUFSIZE, "DV Exception in drive"); break; default: snprintf(exception, BUFSIZE, "0x%02x", sense->fmt.f71.emc); } /* Service Message */ switch (sense->fmt.f71.smc) { case 0x01: snprintf(service, BUFSIZE, "Repair impact is unknown"); break; case 0x02: snprintf(service, BUFSIZE, "Repair will not impact device " "performance"); break; case 0x03: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable " "channel path 0x%x on DV", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable " "channel path (0x%x-0x%x) on DV", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x04: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable " "interface 0x%x on DV", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable " "interfaces (0x%x-0x%x) on DV", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x05: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable loader" " 0x%x on DV", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable loader" " (0x%x-0x%x) on DV", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x07: snprintf(service, BUFSIZE, "Repair will disable access to DV"); break; case 0x08: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable " "message display 0x%x on DV", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable " "message displays (0x%x-0x%x) on DV", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x09: snprintf(service, BUFSIZE, "Clean DV"); break; default: snprintf(service, BUFSIZE, "0x%02x", sense->fmt.f71.smc); } dev_warn (&device->cdev->dev, "Device subsystem information: exception" " %s, service %s\n", exception, service); out_nomem: kfree(exception); kfree(service); } /* * Print standard ERA Message */ static void tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) { struct tape_3590_sense *sense; sense = (struct tape_3590_sense *) irb->ecw; if (sense->mc == 0) return; if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) { if (tape_3590_msg[sense->mc] != NULL) dev_warn (&device->cdev->dev, "The tape unit has " "issued sense message %s\n", tape_3590_msg[sense->mc]); else dev_warn (&device->cdev->dev, "The tape unit has " "issued an unknown sense message code 0x%x\n", sense->mc); return; } if (sense->mc == 0xf0) { /* Standard Media Information Message */ dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, " "RC=%02x-%04x-%02x\n", sense->fmt.f70.sev, sense->mc, sense->fmt.f70.emc, sense->fmt.f70.smc, sense->fmt.f70.refcode, sense->fmt.f70.mid, sense->fmt.f70.fid); tape_3590_print_mim_msg_f0(device, irb); return; } if (sense->mc == 0xf1) { /* Standard I/O Subsystem Service Information Message */ dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x," " MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", sense->fmt.f71.sev, device->cdev->id.dev_model, sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc, sense->fmt.f71.refcode1, sense->fmt.f71.refcode2, sense->fmt.f71.refcode3); tape_3590_print_io_sim_msg_f1(device, irb); return; } if (sense->mc == 0xf2) { /* Standard Device Service Information Message */ dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x" ", MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", sense->fmt.f71.sev, device->cdev->id.dev_model, sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc, sense->fmt.f71.refcode1, sense->fmt.f71.refcode2, sense->fmt.f71.refcode3); tape_3590_print_dev_sim_msg_f2(device, irb); return; } if (sense->mc == 0xf3) { /* Standard Library Service Information Message */ return; } dev_warn (&device->cdev->dev, "The tape unit has issued an unknown " "sense message code %x\n", sense->mc); } static int tape_3590_crypt_error(struct tape_device *device, struct tape_request *request, struct irb *irb) { u8 cu_rc, ekm_rc1; u16 ekm_rc2; u32 drv_rc; const char *bus_id; char *sense; sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; bus_id = dev_name(&device->cdev->dev); cu_rc = sense[0]; drv_rc = *((u32*) &sense[5]) & 0xffffff; ekm_rc1 = sense[9]; ekm_rc2 = *((u16*) &sense[10]); if ((cu_rc == 0) && (ekm_rc2 == 0xee31)) /* key not defined on EKM */ return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED); if ((cu_rc == 1) || (cu_rc == 2)) /* No connection to EKM */ return tape_3590_erp_basic(device, request, irb, -ENOTCONN); dev_err (&device->cdev->dev, "The tape unit failed to obtain the " "encryption key from EKM\n"); return tape_3590_erp_basic(device, request, irb, -ENOKEY); } /* * 3590 error Recovery routine: * If possible, it tries to recover from the error. If this is not possible, * inform the user about the problem. */ static int tape_3590_unit_check(struct tape_device *device, struct tape_request *request, struct irb *irb) { struct tape_3590_sense *sense; int rc; #ifdef CONFIG_S390_TAPE_BLOCK if (request->op == TO_BLOCK) { /* * Recovery for block device requests. Set the block_position * to something invalid and retry. */ device->blk_data.block_position = -1; if (request->retries-- <= 0) return tape_3590_erp_failed(device, request, irb, -EIO); else return tape_3590_erp_retry(device, request, irb); } #endif sense = (struct tape_3590_sense *) irb->ecw; DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc); /* * First check all RC-QRCs where we want to do something special * - "break": basic error recovery is done * - "goto out:": just print error message if available */ rc = -EIO; switch (sense->rc_rqc) { case 0x1110: tape_3590_print_era_msg(device, irb); return tape_3590_erp_read_buf_log(device, request, irb); case 0x2011: tape_3590_print_era_msg(device, irb); return tape_3590_erp_read_alternate(device, request, irb); case 0x2230: case 0x2231: tape_3590_print_era_msg(device, irb); return tape_3590_erp_special_interrupt(device, request, irb); case 0x2240: return tape_3590_crypt_error(device, request, irb); case 0x3010: DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", device->cdev_id); return tape_3590_erp_basic(device, request, irb, -ENOSPC); case 0x3012: DBF_EVENT(2, "(%08x): Forward at End of Partition\n", device->cdev_id); return tape_3590_erp_basic(device, request, irb, -ENOSPC); case 0x3020: DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id); return tape_3590_erp_basic(device, request, irb, -ENOSPC); case 0x3122: DBF_EVENT(2, "(%08x): Rewind Unload initiated\n", device->cdev_id); return tape_3590_erp_basic(device, request, irb, -EIO); case 0x3123: DBF_EVENT(2, "(%08x): Rewind Unload complete\n", device->cdev_id); tape_med_state_set(device, MS_UNLOADED); tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, 0); case 0x4010: /* * print additional msg since default msg * "device intervention" is not very meaningfull */ tape_med_state_set(device, MS_UNLOADED); tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); case 0x4012: /* Device Long Busy */ /* XXX: Also use long busy handling here? */ DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id); tape_3590_print_era_msg(device, irb); return tape_3590_erp_basic(device, request, irb, -EBUSY); case 0x4014: DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id); return tape_3590_erp_long_busy(device, request, irb); case 0x5010: if (sense->rac == 0xd0) { /* Swap */ tape_3590_print_era_msg(device, irb); return tape_3590_erp_swap(device, request, irb); } if (sense->rac == 0x26) { /* Read Opposite */ tape_3590_print_era_msg(device, irb); return tape_3590_erp_read_opposite(device, request, irb); } return tape_3590_erp_basic(device, request, irb, -EIO); case 0x5020: case 0x5021: case 0x5022: case 0x5040: case 0x5041: case 0x5042: tape_3590_print_era_msg(device, irb); return tape_3590_erp_swap(device, request, irb); case 0x5110: case 0x5111: return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); case 0x5120: case 0x1120: tape_med_state_set(device, MS_UNLOADED); tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); case 0x6020: return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); case 0x8011: return tape_3590_erp_basic(device, request, irb, -EPERM); case 0x8013: dev_warn (&device->cdev->dev, "A different host has privileged" " access to the tape unit\n"); return tape_3590_erp_basic(device, request, irb, -EPERM); default: return tape_3590_erp_basic(device, request, irb, -EIO); } } /* * 3590 interrupt handler: */ static int tape_3590_irq(struct tape_device *device, struct tape_request *request, struct irb *irb) { if (request == NULL) return tape_3590_unsolicited_irq(device, irb); if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { /* Write at end of volume */ DBF_EVENT(2, "End of volume\n"); return tape_3590_erp_failed(device, request, irb, -ENOSPC); } if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) return tape_3590_unit_check(device, request, irb); if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) { if (request->op == TO_FSB || request->op == TO_BSB) request->rescnt++; else DBF_EVENT(5, "Unit Exception!\n"); } return tape_3590_done(device, request); } if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) { DBF_EVENT(2, "cannel end\n"); return TAPE_IO_PENDING; } if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { DBF_EVENT(2, "Unit Attention when busy..\n"); return TAPE_IO_PENDING; } DBF_EVENT(6, "xunknownirq\n"); tape_dump_sense_dbf(device, request, irb); return TAPE_IO_STOP; } static int tape_3590_read_dev_chars(struct tape_device *device, struct tape_3590_rdc_data *rdc_data) { int rc; struct tape_request *request; request = tape_alloc_request(1, sizeof(*rdc_data)); if (IS_ERR(request)) return PTR_ERR(request); request->op = TO_RDC; tape_ccw_end(request->cpaddr, CCW_CMD_RDC, sizeof(*rdc_data), request->cpdata); rc = tape_do_io(device, request); if (rc == 0) memcpy(rdc_data, request->cpdata, sizeof(*rdc_data)); tape_free_request(request); return rc; } /* * Setup device function */ static int tape_3590_setup_device(struct tape_device *device) { int rc; struct tape_3590_disc_data *data; struct tape_3590_rdc_data *rdc_data; DBF_EVENT(6, "3590 device setup\n"); data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA); if (data == NULL) return -ENOMEM; data->read_back_op = READ_PREVIOUS; device->discdata = data; rdc_data = kmalloc(sizeof(*rdc_data), GFP_KERNEL | GFP_DMA); if (!rdc_data) { rc = -ENOMEM; goto fail_kmalloc; } rc = tape_3590_read_dev_chars(device, rdc_data); if (rc) { DBF_LH(3, "Read device characteristics failed!\n"); goto fail_rdc_data; } rc = tape_std_assign(device); if (rc) goto fail_rdc_data; if (rdc_data->data[31] == 0x13) { data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK; tape_3592_disable_crypt(device); } else { DBF_EVENT(6, "Device has NO crypto support\n"); } /* Try to find out if medium is loaded */ rc = tape_3590_sense_medium(device); if (rc) { DBF_LH(3, "3590 medium sense returned %d\n", rc); goto fail_rdc_data; } return 0; fail_rdc_data: kfree(rdc_data); fail_kmalloc: kfree(data); return rc; } /* * Cleanup device function */ static void tape_3590_cleanup_device(struct tape_device *device) { flush_scheduled_work(); tape_std_unassign(device); kfree(device->discdata); device->discdata = NULL; } /* * List of 3590 magnetic tape commands. */ static tape_mtop_fn tape_3590_mtop[TAPE_NR_MTOPS] = { [MTRESET] = tape_std_mtreset, [MTFSF] = tape_std_mtfsf, [MTBSF] = tape_std_mtbsf, [MTFSR] = tape_std_mtfsr, [MTBSR] = tape_std_mtbsr, [MTWEOF] = tape_std_mtweof, [MTREW] = tape_std_mtrew, [MTOFFL] = tape_std_mtoffl, [MTNOP] = tape_std_mtnop, [MTRETEN] = tape_std_mtreten, [MTBSFM] = tape_std_mtbsfm, [MTFSFM] = tape_std_mtfsfm, [MTEOM] = tape_std_mteom, [MTERASE] = tape_std_mterase, [MTRAS1] = NULL, [MTRAS2] = NULL, [MTRAS3] = NULL, [MTSETBLK] = tape_std_mtsetblk, [MTSETDENSITY] = NULL, [MTSEEK] = tape_3590_mtseek, [MTTELL] = tape_3590_mttell, [MTSETDRVBUFFER] = NULL, [MTFSS] = NULL, [MTBSS] = NULL, [MTWSM] = NULL, [MTLOCK] = NULL, [MTUNLOCK] = NULL, [MTLOAD] = tape_std_mtload, [MTUNLOAD] = tape_std_mtunload, [MTCOMPRESSION] = tape_std_mtcompression, [MTSETPART] = NULL, [MTMKPART] = NULL }; /* * Tape discipline structure for 3590. */ static struct tape_discipline tape_discipline_3590 = { .owner = THIS_MODULE, .setup_device = tape_3590_setup_device, .cleanup_device = tape_3590_cleanup_device, .process_eov = tape_std_process_eov, .irq = tape_3590_irq, .read_block = tape_std_read_block, .write_block = tape_std_write_block, #ifdef CONFIG_S390_TAPE_BLOCK .bread = tape_3590_bread, .free_bread = tape_3590_free_bread, .check_locate = tape_3590_check_locate, #endif .ioctl_fn = tape_3590_ioctl, .mtop_array = tape_3590_mtop }; static struct ccw_device_id tape_3590_ids[] = { {CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590}, {CCW_DEVICE_DEVTYPE(0x3592, 0, 0x3592, 0), .driver_info = tape_3592}, { /* end of list */ } }; static int tape_3590_online(struct ccw_device *cdev) { return tape_generic_online(dev_get_drvdata(&cdev->dev), &tape_discipline_3590); } static struct ccw_driver tape_3590_driver = { .name = "tape_3590", .owner = THIS_MODULE, .ids = tape_3590_ids, .probe = tape_generic_probe, .remove = tape_generic_remove, .set_offline = tape_generic_offline, .set_online = tape_3590_online, .freeze = tape_generic_pm_suspend, }; /* * Setup discipline structure. */ static int tape_3590_init(void) { int rc; TAPE_DBF_AREA = debug_register("tape_3590", 2, 2, 4 * sizeof(long)); debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); #ifdef DBF_LIKE_HELL debug_set_level(TAPE_DBF_AREA, 6); #endif DBF_EVENT(3, "3590 init\n"); /* Register driver for 3590 tapes. */ rc = ccw_driver_register(&tape_3590_driver); if (rc) DBF_EVENT(3, "3590 init failed\n"); else DBF_EVENT(3, "3590 registered\n"); return rc; } static void tape_3590_exit(void) { ccw_driver_unregister(&tape_3590_driver); debug_unregister(TAPE_DBF_AREA); } MODULE_DEVICE_TABLE(ccw, tape_3590_ids); MODULE_AUTHOR("(C) 2001,2006 IBM Corporation"); MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver"); MODULE_LICENSE("GPL"); module_init(tape_3590_init); module_exit(tape_3590_exit);
gpl-2.0
Clouded/linux-rt-rpi2
drivers/mfd/adp5520.c
1145
8554
/* * Base driver for Analog Devices ADP5520/ADP5501 MFD PMICs * LCD Backlight: drivers/video/backlight/adp5520_bl * LEDs : drivers/led/leds-adp5520 * GPIO : drivers/gpio/adp5520-gpio (ADP5520 only) * Keys : drivers/input/keyboard/adp5520-keys (ADP5520 only) * * Copyright 2009 Analog Devices Inc. * * Derived from da903x: * Copyright (C) 2008 Compulab, Ltd. * Mike Rapoport <mike@compulab.co.il> * * Copyright (C) 2006-2008 Marvell International Ltd. * Eric Miao <eric.miao@marvell.com> * * Licensed under the GPL-2 or later. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/mfd/adp5520.h> struct adp5520_chip { struct i2c_client *client; struct device *dev; struct mutex lock; struct blocking_notifier_head notifier_list; int irq; unsigned long id; uint8_t mode; }; static int __adp5520_read(struct i2c_client *client, int reg, uint8_t *val) { int ret; ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) { dev_err(&client->dev, "failed reading at 0x%02x\n", reg); return ret; } *val = (uint8_t)ret; return 0; } static int __adp5520_write(struct i2c_client *client, int reg, uint8_t val) { int ret; ret = i2c_smbus_write_byte_data(client, reg, val); if (ret < 0) { dev_err(&client->dev, "failed writing 0x%02x to 0x%02x\n", val, reg); return ret; } return 0; } static int __adp5520_ack_bits(struct i2c_client *client, int reg, uint8_t bit_mask) { struct adp5520_chip *chip = i2c_get_clientdata(client); uint8_t reg_val; int ret; mutex_lock(&chip->lock); ret = __adp5520_read(client, reg, &reg_val); if (!ret) { reg_val |= bit_mask; ret = __adp5520_write(client, reg, reg_val); } mutex_unlock(&chip->lock); return ret; } int adp5520_write(struct device *dev, int reg, uint8_t val) { return __adp5520_write(to_i2c_client(dev), reg, val); } EXPORT_SYMBOL_GPL(adp5520_write); int adp5520_read(struct device *dev, int reg, uint8_t *val) { return __adp5520_read(to_i2c_client(dev), reg, val); } EXPORT_SYMBOL_GPL(adp5520_read); int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask) { struct adp5520_chip *chip = dev_get_drvdata(dev); uint8_t reg_val; int ret; mutex_lock(&chip->lock); ret = __adp5520_read(chip->client, reg, &reg_val); if (!ret && ((reg_val & bit_mask) != bit_mask)) { reg_val |= bit_mask; ret = __adp5520_write(chip->client, reg, reg_val); } mutex_unlock(&chip->lock); return ret; } EXPORT_SYMBOL_GPL(adp5520_set_bits); int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask) { struct adp5520_chip *chip = dev_get_drvdata(dev); uint8_t reg_val; int ret; mutex_lock(&chip->lock); ret = __adp5520_read(chip->client, reg, &reg_val); if (!ret && (reg_val & bit_mask)) { reg_val &= ~bit_mask; ret = __adp5520_write(chip->client, reg, reg_val); } mutex_unlock(&chip->lock); return ret; } EXPORT_SYMBOL_GPL(adp5520_clr_bits); int adp5520_register_notifier(struct device *dev, struct notifier_block *nb, unsigned int events) { struct adp5520_chip *chip = dev_get_drvdata(dev); if (chip->irq) { adp5520_set_bits(chip->dev, ADP5520_INTERRUPT_ENABLE, events & (ADP5520_KP_IEN | ADP5520_KR_IEN | ADP5520_OVP_IEN | ADP5520_CMPR_IEN)); return blocking_notifier_chain_register(&chip->notifier_list, nb); } return -ENODEV; } EXPORT_SYMBOL_GPL(adp5520_register_notifier); int adp5520_unregister_notifier(struct device *dev, struct notifier_block *nb, unsigned int events) { struct adp5520_chip *chip = dev_get_drvdata(dev); adp5520_clr_bits(chip->dev, ADP5520_INTERRUPT_ENABLE, events & (ADP5520_KP_IEN | ADP5520_KR_IEN | ADP5520_OVP_IEN | ADP5520_CMPR_IEN)); return blocking_notifier_chain_unregister(&chip->notifier_list, nb); } EXPORT_SYMBOL_GPL(adp5520_unregister_notifier); static irqreturn_t adp5520_irq_thread(int irq, void *data) { struct adp5520_chip *chip = data; unsigned int events; uint8_t reg_val; int ret; ret = __adp5520_read(chip->client, ADP5520_MODE_STATUS, &reg_val); if (ret) goto out; events = reg_val & (ADP5520_OVP_INT | ADP5520_CMPR_INT | ADP5520_GPI_INT | ADP5520_KR_INT | ADP5520_KP_INT); blocking_notifier_call_chain(&chip->notifier_list, events, NULL); /* ACK, Sticky bits are W1C */ __adp5520_ack_bits(chip->client, ADP5520_MODE_STATUS, events); out: return IRQ_HANDLED; } static int __remove_subdev(struct device *dev, void *unused) { platform_device_unregister(to_platform_device(dev)); return 0; } static int adp5520_remove_subdevs(struct adp5520_chip *chip) { return device_for_each_child(chip->dev, NULL, __remove_subdev); } static int adp5520_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adp5520_platform_data *pdata = dev_get_platdata(&client->dev); struct platform_device *pdev; struct adp5520_chip *chip; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "SMBUS Word Data not Supported\n"); return -EIO; } if (pdata == NULL) { dev_err(&client->dev, "missing platform data\n"); return -ENODEV; } chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; i2c_set_clientdata(client, chip); chip->client = client; chip->dev = &client->dev; chip->irq = client->irq; chip->id = id->driver_data; mutex_init(&chip->lock); if (chip->irq) { BLOCKING_INIT_NOTIFIER_HEAD(&chip->notifier_list); ret = request_threaded_irq(chip->irq, NULL, adp5520_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "adp5520", chip); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", chip->irq); return ret; } } ret = adp5520_write(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY); if (ret) { dev_err(&client->dev, "failed to write\n"); goto out_free_irq; } if (pdata->keys) { pdev = platform_device_register_data(chip->dev, "adp5520-keys", chip->id, pdata->keys, sizeof(*pdata->keys)); if (IS_ERR(pdev)) { ret = PTR_ERR(pdev); goto out_remove_subdevs; } } if (pdata->gpio) { pdev = platform_device_register_data(chip->dev, "adp5520-gpio", chip->id, pdata->gpio, sizeof(*pdata->gpio)); if (IS_ERR(pdev)) { ret = PTR_ERR(pdev); goto out_remove_subdevs; } } if (pdata->leds) { pdev = platform_device_register_data(chip->dev, "adp5520-led", chip->id, pdata->leds, sizeof(*pdata->leds)); if (IS_ERR(pdev)) { ret = PTR_ERR(pdev); goto out_remove_subdevs; } } if (pdata->backlight) { pdev = platform_device_register_data(chip->dev, "adp5520-backlight", chip->id, pdata->backlight, sizeof(*pdata->backlight)); if (IS_ERR(pdev)) { ret = PTR_ERR(pdev); goto out_remove_subdevs; } } return 0; out_remove_subdevs: adp5520_remove_subdevs(chip); out_free_irq: if (chip->irq) free_irq(chip->irq, chip); return ret; } static int adp5520_remove(struct i2c_client *client) { struct adp5520_chip *chip = dev_get_drvdata(&client->dev); if (chip->irq) free_irq(chip->irq, chip); adp5520_remove_subdevs(chip); adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0); return 0; } #ifdef CONFIG_PM_SLEEP static int adp5520_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct adp5520_chip *chip = dev_get_drvdata(&client->dev); adp5520_read(chip->dev, ADP5520_MODE_STATUS, &chip->mode); /* All other bits are W1C */ chip->mode &= ADP5520_BL_EN | ADP5520_DIM_EN | ADP5520_nSTNBY; adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0); return 0; } static int adp5520_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct adp5520_chip *chip = dev_get_drvdata(&client->dev); adp5520_write(chip->dev, ADP5520_MODE_STATUS, chip->mode); return 0; } #endif static SIMPLE_DEV_PM_OPS(adp5520_pm, adp5520_suspend, adp5520_resume); static const struct i2c_device_id adp5520_id[] = { { "pmic-adp5520", ID_ADP5520 }, { "pmic-adp5501", ID_ADP5501 }, { } }; MODULE_DEVICE_TABLE(i2c, adp5520_id); static struct i2c_driver adp5520_driver = { .driver = { .name = "adp5520", .owner = THIS_MODULE, .pm = &adp5520_pm, }, .probe = adp5520_probe, .remove = adp5520_remove, .id_table = adp5520_id, }; module_i2c_driver(adp5520_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("ADP5520(01) PMIC-MFD Driver"); MODULE_LICENSE("GPL");
gpl-2.0
quanghieu/linux-DFI
drivers/infiniband/hw/mlx4/cm.c
1145
13588
/* * Copyright (c) 2012 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_mad.h> #include <linux/mlx4/cmd.h> #include <linux/rbtree.h> #include <linux/idr.h> #include <rdma/ib_cm.h> #include "mlx4_ib.h" #define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ) struct id_map_entry { struct rb_node node; u32 sl_cm_id; u32 pv_cm_id; int slave_id; int scheduled_delete; struct mlx4_ib_dev *dev; struct list_head list; struct delayed_work timeout; }; struct cm_generic_msg { struct ib_mad_hdr hdr; __be32 local_comm_id; __be32 remote_comm_id; }; struct cm_sidr_generic_msg { struct ib_mad_hdr hdr; __be32 request_id; }; struct cm_req_msg { unsigned char unused[0x60]; union ib_gid primary_path_sgid; }; static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) { if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { struct cm_sidr_generic_msg *msg = (struct cm_sidr_generic_msg *)mad; msg->request_id = cpu_to_be32(cm_id); } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { pr_err("trying to set local_comm_id in SIDR_REP\n"); return; } else { struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; msg->local_comm_id = cpu_to_be32(cm_id); } } static u32 get_local_comm_id(struct ib_mad *mad) { if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { struct cm_sidr_generic_msg *msg = (struct cm_sidr_generic_msg *)mad; return be32_to_cpu(msg->request_id); } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { pr_err("trying to set local_comm_id in SIDR_REP\n"); return -1; } else { struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; return be32_to_cpu(msg->local_comm_id); } } static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) { if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { struct cm_sidr_generic_msg *msg = (struct cm_sidr_generic_msg *)mad; msg->request_id = cpu_to_be32(cm_id); } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { pr_err("trying to set remote_comm_id in SIDR_REQ\n"); return; } else { struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; msg->remote_comm_id = cpu_to_be32(cm_id); } } static u32 get_remote_comm_id(struct ib_mad *mad) { if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { struct cm_sidr_generic_msg *msg = (struct cm_sidr_generic_msg *)mad; return be32_to_cpu(msg->request_id); } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { pr_err("trying to set remote_comm_id in SIDR_REQ\n"); return -1; } else { struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; return be32_to_cpu(msg->remote_comm_id); } } static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad) { struct cm_req_msg *msg = (struct cm_req_msg *)mad; return msg->primary_path_sgid; } /* Lock should be taken before called */ static struct id_map_entry * id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id) { struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; struct rb_node *node = sl_id_map->rb_node; while (node) { struct id_map_entry *id_map_entry = rb_entry(node, struct id_map_entry, node); if (id_map_entry->sl_cm_id > sl_cm_id) node = node->rb_left; else if (id_map_entry->sl_cm_id < sl_cm_id) node = node->rb_right; else if (id_map_entry->slave_id > slave_id) node = node->rb_left; else if (id_map_entry->slave_id < slave_id) node = node->rb_right; else return id_map_entry; } return NULL; } static void id_map_ent_timeout(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout); struct id_map_entry *db_ent, *found_ent; struct mlx4_ib_dev *dev = ent->dev; struct mlx4_ib_sriov *sriov = &dev->sriov; struct rb_root *sl_id_map = &sriov->sl_id_map; int pv_id = (int) ent->pv_cm_id; spin_lock(&sriov->id_map_lock); db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id); if (!db_ent) goto out; found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); if (found_ent && found_ent == ent) rb_erase(&found_ent->node, sl_id_map); idr_remove(&sriov->pv_id_table, pv_id); out: list_del(&ent->list); spin_unlock(&sriov->id_map_lock); kfree(ent); } static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) { struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; struct rb_root *sl_id_map = &sriov->sl_id_map; struct id_map_entry *ent, *found_ent; spin_lock(&sriov->id_map_lock); ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id); if (!ent) goto out; found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id); if (found_ent && found_ent == ent) rb_erase(&found_ent->node, sl_id_map); idr_remove(&sriov->pv_id_table, pv_cm_id); out: spin_unlock(&sriov->id_map_lock); } static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) { struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; struct rb_node **link = &sl_id_map->rb_node, *parent = NULL; struct id_map_entry *ent; int slave_id = new->slave_id; int sl_cm_id = new->sl_cm_id; ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id); if (ent) { pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n", sl_cm_id); rb_replace_node(&ent->node, &new->node, sl_id_map); return; } /* Go to the bottom of the tree */ while (*link) { parent = *link; ent = rb_entry(parent, struct id_map_entry, node); if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id)) link = &(*link)->rb_left; else link = &(*link)->rb_right; } rb_link_node(&new->node, parent, link); rb_insert_color(&new->node, sl_id_map); } static struct id_map_entry * id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) { int ret; struct id_map_entry *ent; struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL); if (!ent) { mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n"); return ERR_PTR(-ENOMEM); } ent->sl_cm_id = sl_cm_id; ent->slave_id = slave_id; ent->scheduled_delete = 0; ent->dev = to_mdev(ibdev); INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); idr_preload(GFP_KERNEL); spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT); if (ret >= 0) { ent->pv_cm_id = (u32)ret; sl_id_map_add(ibdev, ent); list_add_tail(&ent->list, &sriov->cm_list); } spin_unlock(&sriov->id_map_lock); idr_preload_end(); if (ret >= 0) return ent; /*error flow*/ kfree(ent); mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret); return ERR_PTR(-ENOMEM); } static struct id_map_entry * id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id) { struct id_map_entry *ent; struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; spin_lock(&sriov->id_map_lock); if (*pv_cm_id == -1) { ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id); if (ent) *pv_cm_id = (int) ent->pv_cm_id; } else ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id); spin_unlock(&sriov->id_map_lock); return ent; } static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) { struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; unsigned long flags; spin_lock(&sriov->id_map_lock); spin_lock_irqsave(&sriov->going_down_lock, flags); /*make sure that there is no schedule inside the scheduled work.*/ if (!sriov->is_going_down) { id->scheduled_delete = 1; schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); } spin_unlock_irqrestore(&sriov->going_down_lock, flags); spin_unlock(&sriov->id_map_lock); } int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id, struct ib_mad *mad) { struct id_map_entry *id; u32 sl_cm_id; int pv_cm_id = -1; if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || mad->mad_hdr.attr_id == CM_REP_ATTR_ID || mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { sl_cm_id = get_local_comm_id(mad); id = id_map_alloc(ibdev, slave_id, sl_cm_id); if (IS_ERR(id)) { mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", __func__, slave_id, sl_cm_id); return PTR_ERR(id); } } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID || mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { return 0; } else { sl_cm_id = get_local_comm_id(mad); id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); } if (!id) { pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n", slave_id, sl_cm_id); return -EINVAL; } set_local_comm_id(mad, id->pv_cm_id); if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) schedule_delayed(ibdev, id); else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) id_map_find_del(ibdev, pv_cm_id); return 0; } int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, struct ib_mad *mad) { u32 pv_cm_id; struct id_map_entry *id; if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { union ib_gid gid; if (!slave) return 0; gid = gid_from_req_msg(ibdev, mad); *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); if (*slave < 0) { mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", be64_to_cpu(gid.global.interface_id)); return -ENOENT; } return 0; } pv_cm_id = get_remote_comm_id(mad); id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1); if (!id) { pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id); return -ENOENT; } if (slave) *slave = id->slave_id; set_remote_comm_id(mad, id->sl_cm_id); if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) schedule_delayed(ibdev, id); else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID || mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) { id_map_find_del(ibdev, (int) pv_cm_id); } return 0; } void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev) { spin_lock_init(&dev->sriov.id_map_lock); INIT_LIST_HEAD(&dev->sriov.cm_list); dev->sriov.sl_id_map = RB_ROOT; idr_init(&dev->sriov.pv_id_table); } /* slave = -1 ==> all slaves */ /* TBD -- call paravirt clean for single slave. Need for slave RESET event */ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) { struct mlx4_ib_sriov *sriov = &dev->sriov; struct rb_root *sl_id_map = &sriov->sl_id_map; struct list_head lh; struct rb_node *nd; int need_flush = 1; struct id_map_entry *map, *tmp_map; /* cancel all delayed work queue entries */ INIT_LIST_HEAD(&lh); spin_lock(&sriov->id_map_lock); list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { if (slave < 0 || slave == map->slave_id) { if (map->scheduled_delete) need_flush &= !!cancel_delayed_work(&map->timeout); } } spin_unlock(&sriov->id_map_lock); if (!need_flush) flush_scheduled_work(); /* make sure all timers were flushed */ /* now, remove all leftover entries from databases*/ spin_lock(&sriov->id_map_lock); if (slave < 0) { while (rb_first(sl_id_map)) { struct id_map_entry *ent = rb_entry(rb_first(sl_id_map), struct id_map_entry, node); rb_erase(&ent->node, sl_id_map); idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id); } list_splice_init(&dev->sriov.cm_list, &lh); } else { /* first, move nodes belonging to slave to db remove list */ nd = rb_first(sl_id_map); while (nd) { struct id_map_entry *ent = rb_entry(nd, struct id_map_entry, node); nd = rb_next(nd); if (ent->slave_id == slave) list_move_tail(&ent->list, &lh); } /* remove those nodes from databases */ list_for_each_entry_safe(map, tmp_map, &lh, list) { rb_erase(&map->node, sl_id_map); idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id); } /* add remaining nodes from cm_list */ list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { if (slave == map->slave_id) list_move_tail(&map->list, &lh); } } spin_unlock(&sriov->id_map_lock); /* free any map entries left behind due to cancel_delayed_work above */ list_for_each_entry_safe(map, tmp_map, &lh, list) { list_del(&map->list); kfree(map); } }
gpl-2.0
sub77/android_kernel_samsung_matissewifi
drivers/staging/prima/CORE/VOSS/src/vos_types.c
1401
6496
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /**========================================================================= \file vos_Types.c \brief virtual Operating System Servies (vOS) Basic type definitions Copyright 2008 (c) Qualcomm, Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary. ========================================================================*/ /* $Header$ */ /*-------------------------------------------------------------------------- Include Files ------------------------------------------------------------------------*/ #include "vos_types.h" #include "vos_trace.h" //#include "wlan_libra_config.h" /*-------------------------------------------------------------------------- Preprocessor definitions and constants ------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- Type declarations ------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- \brief vos_atomic_set_U32() - set a U32 variable atomically \param pTarget - pointer to the v_U32_t to set. \param value - the value to set in the v_U32_t variable. \return This function returns the value previously in the v_U32_t before the new value is set. \sa vos_atomic_increment_U32(), vos_atomic_decrement_U32() --------------------------------------------------------------------------*/ v_U32_t vos_atomic_set_U32( v_U32_t *pTarget, v_U32_t value ) { v_U32_t oldval; unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); oldval = *pTarget; *pTarget = value; local_irq_restore(flags); // v_U32_t prev = atomic_read(pTarget); // atomic_set(pTarget, value); return oldval; } /*---------------------------------------------------------------------------- \brief vos_atomic_increment_U32() - Increment a U32 variable atomically \param pTarget - pointer to the v_U32_t to increment. \return This function returns the value of the variable after the increment occurs. \sa vos_atomic_decrement_U32(), vos_atomic_set_U32() --------------------------------------------------------------------------*/ v_U32_t vos_atomic_increment_U32( v_U32_t *pTarget ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); ++*pTarget; local_irq_restore(flags); return *pTarget; // return atomic_inc_return(pTarget); } /*---------------------------------------------------------------------------- \brief vos_atomic_decrement_U32() - Decrement a U32 variable atomically \param pTarget - pointer to the v_U32_t to decrement. \return This function returns the value of the variable after the decrement occurs. \sa vos_atomic_increment_U32(), vos_atomic_set_U32() --------------------------------------------------------------------------*/ v_U32_t vos_atomic_decrement_U32( v_U32_t *pTarget ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } // return atomic_dec_return(pTarget); local_irq_save(flags); --*pTarget; local_irq_restore(flags); return (*pTarget); } v_U32_t vos_atomic_increment_U32_by_value( v_U32_t *pTarget, v_U32_t value ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); *pTarget += value ; local_irq_restore(flags); return (*pTarget); } v_U32_t vos_atomic_decrement_U32_by_value( v_U32_t *pTarget, v_U32_t value ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); *pTarget -= value ; local_irq_restore(flags); return (*pTarget); } v_U32_t vos_get_skip_ssid_check(void) { /**This is needed by only AMSS for interoperatability **/ return 1; } v_U32_t vos_get_skip_11e_check(void) { /* this is needed only for AMSS for interopratability **/ return 1; }
gpl-2.0
TeamHydra/android_kernel_samsung_n7100
drivers/input/mouse/bcm5974.c
1657
27085
/* * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver * * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) * * The USB initialization and package decoding was made by * Scott Shawcroft as part of the touchd user-space driver project: * Copyright (C) 2008 Scott Shawcroft (scott.shawcroft@gmail.com) * * The BCM5974 driver is based on the appletouch driver: * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2005 Johannes Berg (johannes@sipsolutions.net) * Copyright (C) 2005 Stelian Pop (stelian@popies.net) * Copyright (C) 2005 Frank Arnold (frank@scirocco-5v-turbo.de) * Copyright (C) 2005 Peter Osterlund (petero2@telia.com) * Copyright (C) 2005 Michael Hanselmann (linux-kernel@hansmi.ch) * Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> #include <linux/hid.h> #include <linux/mutex.h> #define USB_VENDOR_ID_APPLE 0x05ac /* MacbookAir, aka wellspring */ #define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223 #define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224 #define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225 /* MacbookProPenryn, aka wellspring2 */ #define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230 #define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231 #define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232 /* Macbook5,1 (unibody), aka wellspring3 */ #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 /* MacbookAir3,2 (unibody), aka wellspring5 */ #define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f #define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240 #define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241 /* MacbookAir3,1 (unibody), aka wellspring4 */ #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 /* Macbook8 (unibody, March 2011) */ #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 #define BCM5974_DEVICE(prod) { \ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_CLASS | \ USB_DEVICE_ID_MATCH_INT_PROTOCOL), \ .idVendor = USB_VENDOR_ID_APPLE, \ .idProduct = (prod), \ .bInterfaceClass = USB_INTERFACE_CLASS_HID, \ .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE \ } /* table of devices that work with this driver */ static const struct usb_device_id bcm5974_table[] = { /* MacbookAir1.1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_JIS), /* MacbookProPenryn */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_JIS), /* Macbook5,1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS), /* MacbookAir3,2 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS), /* MacbookAir3,1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), /* MacbookPro8 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), /* Terminating entry */ {} }; MODULE_DEVICE_TABLE(usb, bcm5974_table); MODULE_AUTHOR("Henrik Rydberg"); MODULE_DESCRIPTION("Apple USB BCM5974 multitouch driver"); MODULE_LICENSE("GPL"); #define dprintk(level, format, a...)\ { if (debug >= level) printk(KERN_DEBUG format, ##a); } static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activate debugging output"); /* button data structure */ struct bt_data { u8 unknown1; /* constant */ u8 button; /* left button */ u8 rel_x; /* relative x coordinate */ u8 rel_y; /* relative y coordinate */ }; /* trackpad header types */ enum tp_type { TYPE1, /* plain trackpad */ TYPE2 /* button integrated in trackpad */ }; /* trackpad finger data offsets, le16-aligned */ #define FINGER_TYPE1 (13 * sizeof(__le16)) #define FINGER_TYPE2 (15 * sizeof(__le16)) /* trackpad button data offsets */ #define BUTTON_TYPE2 15 /* list of device capability bits */ #define HAS_INTEGRATED_BUTTON 1 /* trackpad finger structure, le16-aligned */ struct tp_finger { __le16 origin; /* zero when switching track finger */ __le16 abs_x; /* absolute x coodinate */ __le16 abs_y; /* absolute y coodinate */ __le16 rel_x; /* relative x coodinate */ __le16 rel_y; /* relative y coodinate */ __le16 size_major; /* finger size, major axis? */ __le16 size_minor; /* finger size, minor axis? */ __le16 orientation; /* 16384 when point, else 15 bit angle */ __le16 force_major; /* trackpad force, major axis? */ __le16 force_minor; /* trackpad force, minor axis? */ __le16 unused[3]; /* zeros */ __le16 multi; /* one finger: varies, more fingers: constant */ } __attribute__((packed,aligned(2))); /* trackpad finger data size, empirically at least ten fingers */ #define SIZEOF_FINGER sizeof(struct tp_finger) #define SIZEOF_ALL_FINGERS (16 * SIZEOF_FINGER) #define MAX_FINGER_ORIENTATION 16384 /* device-specific parameters */ struct bcm5974_param { int dim; /* logical dimension */ int fuzz; /* logical noise value */ int devmin; /* device minimum reading */ int devmax; /* device maximum reading */ }; /* device-specific configuration */ struct bcm5974_config { int ansi, iso, jis; /* the product id of this device */ int caps; /* device capability bitmask */ int bt_ep; /* the endpoint of the button interface */ int bt_datalen; /* data length of the button interface */ int tp_ep; /* the endpoint of the trackpad interface */ enum tp_type tp_type; /* type of trackpad interface */ int tp_offset; /* offset to trackpad finger data */ int tp_datalen; /* data length of the trackpad interface */ struct bcm5974_param p; /* finger pressure limits */ struct bcm5974_param w; /* finger width limits */ struct bcm5974_param x; /* horizontal limits */ struct bcm5974_param y; /* vertical limits */ }; /* logical device structure */ struct bcm5974 { char phys[64]; struct usb_device *udev; /* usb device */ struct usb_interface *intf; /* our interface */ struct input_dev *input; /* input dev */ struct bcm5974_config cfg; /* device configuration */ struct mutex pm_mutex; /* serialize access to open/suspend */ int opened; /* 1: opened, 0: closed */ struct urb *bt_urb; /* button usb request block */ struct bt_data *bt_data; /* button transferred data */ struct urb *tp_urb; /* trackpad usb request block */ u8 *tp_data; /* trackpad transferred data */ int fingers; /* number of fingers on trackpad */ }; /* logical dimensions */ #define DIM_PRESSURE 256 /* maximum finger pressure */ #define DIM_WIDTH 16 /* maximum finger width */ #define DIM_X 1280 /* maximum trackpad x value */ #define DIM_Y 800 /* maximum trackpad y value */ /* logical signal quality */ #define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ #define SN_WIDTH 100 /* width signal-to-noise ratio */ #define SN_COORD 250 /* coordinate signal-to-noise ratio */ /* pressure thresholds */ #define PRESSURE_LOW (2 * DIM_PRESSURE / SN_PRESSURE) #define PRESSURE_HIGH (3 * PRESSURE_LOW) /* device constants */ static const struct bcm5974_config bcm5974_config_table[] = { { USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, 0, 0x84, sizeof(struct bt_data), 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4824, 5342 }, { DIM_Y, DIM_Y / SN_COORD, -172, 5820 } }, { USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, 0, 0x84, sizeof(struct bt_data), 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4824, 4824 }, { DIM_Y, DIM_Y / SN_COORD, -172, 4290 } }, { USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4460, 5166 }, { DIM_Y, DIM_Y / SN_COORD, -75, 6700 } }, { USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } }, { USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4616, 5112 }, { DIM_Y, DIM_Y / SN_COORD, -142, 5234 } }, { USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } }, {} }; /* return the device-specific configuration by device */ static const struct bcm5974_config *bcm5974_get_config(struct usb_device *udev) { u16 id = le16_to_cpu(udev->descriptor.idProduct); const struct bcm5974_config *cfg; for (cfg = bcm5974_config_table; cfg->ansi; ++cfg) if (cfg->ansi == id || cfg->iso == id || cfg->jis == id) return cfg; return bcm5974_config_table; } /* convert 16-bit little endian to signed integer */ static inline int raw2int(__le16 x) { return (signed short)le16_to_cpu(x); } /* scale device data to logical dimensions (asserts devmin < devmax) */ static inline int int2scale(const struct bcm5974_param *p, int x) { return x * p->dim / (p->devmax - p->devmin); } /* all logical value ranges are [0,dim). */ static inline int int2bound(const struct bcm5974_param *p, int x) { int s = int2scale(p, x); return clamp_val(s, 0, p->dim - 1); } /* setup which logical events to report */ static void setup_events_to_report(struct input_dev *input_dev, const struct bcm5974_config *cfg) { __set_bit(EV_ABS, input_dev->evbit); input_set_abs_params(input_dev, ABS_PRESSURE, 0, cfg->p.dim, cfg->p.fuzz, 0); input_set_abs_params(input_dev, ABS_TOOL_WIDTH, 0, cfg->w.dim, cfg->w.fuzz, 0); input_set_abs_params(input_dev, ABS_X, 0, cfg->x.dim, cfg->x.fuzz, 0); input_set_abs_params(input_dev, ABS_Y, 0, cfg->y.dim, cfg->y.fuzz, 0); /* finger touch area */ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, cfg->w.devmin, cfg->w.devmax, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, cfg->w.devmin, cfg->w.devmax, 0, 0); /* finger approach area */ input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, cfg->w.devmin, cfg->w.devmax, 0, 0); input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, cfg->w.devmin, cfg->w.devmax, 0, 0); /* finger orientation */ input_set_abs_params(input_dev, ABS_MT_ORIENTATION, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION, 0, 0); /* finger position */ input_set_abs_params(input_dev, ABS_MT_POSITION_X, cfg->x.devmin, cfg->x.devmax, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, cfg->y.devmin, cfg->y.devmax, 0, 0); __set_bit(EV_KEY, input_dev->evbit); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(BTN_TOOL_FINGER, input_dev->keybit); __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit); __set_bit(BTN_LEFT, input_dev->keybit); input_set_events_per_packet(input_dev, 60); } /* report button data as logical button state */ static int report_bt_state(struct bcm5974 *dev, int size) { if (size != sizeof(struct bt_data)) return -EIO; dprintk(7, "bcm5974: button data: %x %x %x %x\n", dev->bt_data->unknown1, dev->bt_data->button, dev->bt_data->rel_x, dev->bt_data->rel_y); input_report_key(dev->input, BTN_LEFT, dev->bt_data->button); input_sync(dev->input); return 0; } static void report_finger_data(struct input_dev *input, const struct bcm5974_config *cfg, const struct tp_finger *f) { input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major) << 1); input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor) << 1); input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major) << 1); input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor) << 1); input_report_abs(input, ABS_MT_ORIENTATION, MAX_FINGER_ORIENTATION - raw2int(f->orientation)); input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); input_report_abs(input, ABS_MT_POSITION_Y, cfg->y.devmin + cfg->y.devmax - raw2int(f->abs_y)); input_mt_sync(input); } /* report trackpad data as logical trackpad state */ static int report_tp_state(struct bcm5974 *dev, int size) { const struct bcm5974_config *c = &dev->cfg; const struct tp_finger *f; struct input_dev *input = dev->input; int raw_p, raw_w, raw_x, raw_y, raw_n, i; int ptest, origin, ibt = 0, nmin = 0, nmax = 0; int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0; if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) return -EIO; /* finger data, le16-aligned */ f = (const struct tp_finger *)(dev->tp_data + c->tp_offset); raw_n = (size - c->tp_offset) / SIZEOF_FINGER; /* always track the first finger; when detached, start over */ if (raw_n) { /* report raw trackpad data */ for (i = 0; i < raw_n; i++) report_finger_data(input, c, &f[i]); raw_p = raw2int(f->force_major); raw_w = raw2int(f->size_major); raw_x = raw2int(f->abs_x); raw_y = raw2int(f->abs_y); dprintk(9, "bcm5974: " "raw: p: %+05d w: %+05d x: %+05d y: %+05d n: %d\n", raw_p, raw_w, raw_x, raw_y, raw_n); ptest = int2bound(&c->p, raw_p); origin = raw2int(f->origin); /* while tracking finger still valid, count all fingers */ if (ptest > PRESSURE_LOW && origin) { abs_p = ptest; abs_w = int2bound(&c->w, raw_w); abs_x = int2bound(&c->x, raw_x - c->x.devmin); abs_y = int2bound(&c->y, c->y.devmax - raw_y); while (raw_n--) { ptest = int2bound(&c->p, raw2int(f->force_major)); if (ptest > PRESSURE_LOW) nmax++; if (ptest > PRESSURE_HIGH) nmin++; f++; } } } /* set the integrated button if applicable */ if (c->tp_type == TYPE2) ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); if (dev->fingers < nmin) dev->fingers = nmin; if (dev->fingers > nmax) dev->fingers = nmax; input_report_key(input, BTN_TOUCH, dev->fingers > 0); input_report_key(input, BTN_TOOL_FINGER, dev->fingers == 1); input_report_key(input, BTN_TOOL_DOUBLETAP, dev->fingers == 2); input_report_key(input, BTN_TOOL_TRIPLETAP, dev->fingers == 3); input_report_key(input, BTN_TOOL_QUADTAP, dev->fingers > 3); input_report_abs(input, ABS_PRESSURE, abs_p); input_report_abs(input, ABS_TOOL_WIDTH, abs_w); if (abs_p) { input_report_abs(input, ABS_X, abs_x); input_report_abs(input, ABS_Y, abs_y); dprintk(8, "bcm5974: abs: p: %+05d w: %+05d x: %+05d y: %+05d " "nmin: %d nmax: %d n: %d ibt: %d\n", abs_p, abs_w, abs_x, abs_y, nmin, nmax, dev->fingers, ibt); } /* type 2 reports button events via ibt only */ if (c->tp_type == TYPE2) input_report_key(input, BTN_LEFT, ibt); input_sync(input); return 0; } /* Wellspring initialization constants */ #define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1 #define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9 #define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300 #define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0 #define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01 #define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08 static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) { char *data = kmalloc(8, GFP_KERNEL); int retval = 0, size; if (!data) { err("bcm5974: out of memory"); retval = -ENOMEM; goto out; } /* read configuration */ size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, BCM5974_WELLSPRING_MODE_REQUEST_VALUE, BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000); if (size != 8) { err("bcm5974: could not read from device"); retval = -EIO; goto out; } /* apply the mode switch */ data[0] = on ? BCM5974_WELLSPRING_MODE_VENDOR_VALUE : BCM5974_WELLSPRING_MODE_NORMAL_VALUE; /* write configuration */ size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, BCM5974_WELLSPRING_MODE_REQUEST_VALUE, BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000); if (size != 8) { err("bcm5974: could not write to device"); retval = -EIO; goto out; } dprintk(2, "bcm5974: switched to %s mode.\n", on ? "wellspring" : "normal"); out: kfree(data); return retval; } static void bcm5974_irq_button(struct urb *urb) { struct bcm5974 *dev = urb->context; int error; switch (urb->status) { case 0: break; case -EOVERFLOW: case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dbg("bcm5974: button urb shutting down: %d", urb->status); return; default: dbg("bcm5974: button urb status: %d", urb->status); goto exit; } if (report_bt_state(dev, dev->bt_urb->actual_length)) dprintk(1, "bcm5974: bad button package, length: %d\n", dev->bt_urb->actual_length); exit: error = usb_submit_urb(dev->bt_urb, GFP_ATOMIC); if (error) err("bcm5974: button urb failed: %d", error); } static void bcm5974_irq_trackpad(struct urb *urb) { struct bcm5974 *dev = urb->context; int error; switch (urb->status) { case 0: break; case -EOVERFLOW: case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dbg("bcm5974: trackpad urb shutting down: %d", urb->status); return; default: dbg("bcm5974: trackpad urb status: %d", urb->status); goto exit; } /* control response ignored */ if (dev->tp_urb->actual_length == 2) goto exit; if (report_tp_state(dev, dev->tp_urb->actual_length)) dprintk(1, "bcm5974: bad trackpad package, length: %d\n", dev->tp_urb->actual_length); exit: error = usb_submit_urb(dev->tp_urb, GFP_ATOMIC); if (error) err("bcm5974: trackpad urb failed: %d", error); } /* * The Wellspring trackpad, like many recent Apple trackpads, share * the usb device with the keyboard. Since keyboards are usually * handled by the HID system, the device ends up being handled by two * modules. Setting up the device therefore becomes slightly * complicated. To enable multitouch features, a mode switch is * required, which is usually applied via the control interface of the * device. It can be argued where this switch should take place. In * some drivers, like appletouch, the switch is made during * probe. However, the hid module may also alter the state of the * device, resulting in trackpad malfunction under certain * circumstances. To get around this problem, there is at least one * example that utilizes the USB_QUIRK_RESET_RESUME quirk in order to * receive a reset_resume request rather than the normal resume. * Since the implementation of reset_resume is equal to mode switch * plus start_traffic, it seems easier to always do the switch when * starting traffic on the device. */ static int bcm5974_start_traffic(struct bcm5974 *dev) { int error; error = bcm5974_wellspring_mode(dev, true); if (error) { dprintk(1, "bcm5974: mode switch failed\n"); goto err_out; } error = usb_submit_urb(dev->bt_urb, GFP_KERNEL); if (error) goto err_reset_mode; error = usb_submit_urb(dev->tp_urb, GFP_KERNEL); if (error) goto err_kill_bt; return 0; err_kill_bt: usb_kill_urb(dev->bt_urb); err_reset_mode: bcm5974_wellspring_mode(dev, false); err_out: return error; } static void bcm5974_pause_traffic(struct bcm5974 *dev) { usb_kill_urb(dev->tp_urb); usb_kill_urb(dev->bt_urb); bcm5974_wellspring_mode(dev, false); } /* * The code below implements open/close and manual suspend/resume. * All functions may be called in random order. * * Opening a suspended device fails with EACCES - permission denied. * * Failing a resume leaves the device resumed but closed. */ static int bcm5974_open(struct input_dev *input) { struct bcm5974 *dev = input_get_drvdata(input); int error; error = usb_autopm_get_interface(dev->intf); if (error) return error; mutex_lock(&dev->pm_mutex); error = bcm5974_start_traffic(dev); if (!error) dev->opened = 1; mutex_unlock(&dev->pm_mutex); if (error) usb_autopm_put_interface(dev->intf); return error; } static void bcm5974_close(struct input_dev *input) { struct bcm5974 *dev = input_get_drvdata(input); mutex_lock(&dev->pm_mutex); bcm5974_pause_traffic(dev); dev->opened = 0; mutex_unlock(&dev->pm_mutex); usb_autopm_put_interface(dev->intf); } static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message) { struct bcm5974 *dev = usb_get_intfdata(iface); mutex_lock(&dev->pm_mutex); if (dev->opened) bcm5974_pause_traffic(dev); mutex_unlock(&dev->pm_mutex); return 0; } static int bcm5974_resume(struct usb_interface *iface) { struct bcm5974 *dev = usb_get_intfdata(iface); int error = 0; mutex_lock(&dev->pm_mutex); if (dev->opened) error = bcm5974_start_traffic(dev); mutex_unlock(&dev->pm_mutex); return error; } static int bcm5974_probe(struct usb_interface *iface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(iface); const struct bcm5974_config *cfg; struct bcm5974 *dev; struct input_dev *input_dev; int error = -ENOMEM; /* find the product index */ cfg = bcm5974_get_config(udev); /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL); input_dev = input_allocate_device(); if (!dev || !input_dev) { err("bcm5974: out of memory"); goto err_free_devs; } dev->udev = udev; dev->intf = iface; dev->input = input_dev; dev->cfg = *cfg; mutex_init(&dev->pm_mutex); /* setup urbs */ dev->bt_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->bt_urb) goto err_free_devs; dev->tp_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->tp_urb) goto err_free_bt_urb; dev->bt_data = usb_alloc_coherent(dev->udev, dev->cfg.bt_datalen, GFP_KERNEL, &dev->bt_urb->transfer_dma); if (!dev->bt_data) goto err_free_urb; dev->tp_data = usb_alloc_coherent(dev->udev, dev->cfg.tp_datalen, GFP_KERNEL, &dev->tp_urb->transfer_dma); if (!dev->tp_data) goto err_free_bt_buffer; usb_fill_int_urb(dev->bt_urb, udev, usb_rcvintpipe(udev, cfg->bt_ep), dev->bt_data, dev->cfg.bt_datalen, bcm5974_irq_button, dev, 1); usb_fill_int_urb(dev->tp_urb, udev, usb_rcvintpipe(udev, cfg->tp_ep), dev->tp_data, dev->cfg.tp_datalen, bcm5974_irq_trackpad, dev, 1); /* create bcm5974 device */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); input_dev->name = "bcm5974"; input_dev->phys = dev->phys; usb_to_input_id(dev->udev, &input_dev->id); /* report driver capabilities via the version field */ input_dev->id.version = cfg->caps; input_dev->dev.parent = &iface->dev; input_set_drvdata(input_dev, dev); input_dev->open = bcm5974_open; input_dev->close = bcm5974_close; setup_events_to_report(input_dev, cfg); error = input_register_device(dev->input); if (error) goto err_free_buffer; /* save our data pointer in this interface device */ usb_set_intfdata(iface, dev); return 0; err_free_buffer: usb_free_coherent(dev->udev, dev->cfg.tp_datalen, dev->tp_data, dev->tp_urb->transfer_dma); err_free_bt_buffer: usb_free_coherent(dev->udev, dev->cfg.bt_datalen, dev->bt_data, dev->bt_urb->transfer_dma); err_free_urb: usb_free_urb(dev->tp_urb); err_free_bt_urb: usb_free_urb(dev->bt_urb); err_free_devs: usb_set_intfdata(iface, NULL); input_free_device(input_dev); kfree(dev); return error; } static void bcm5974_disconnect(struct usb_interface *iface) { struct bcm5974 *dev = usb_get_intfdata(iface); usb_set_intfdata(iface, NULL); input_unregister_device(dev->input); usb_free_coherent(dev->udev, dev->cfg.tp_datalen, dev->tp_data, dev->tp_urb->transfer_dma); usb_free_coherent(dev->udev, dev->cfg.bt_datalen, dev->bt_data, dev->bt_urb->transfer_dma); usb_free_urb(dev->tp_urb); usb_free_urb(dev->bt_urb); kfree(dev); } static struct usb_driver bcm5974_driver = { .name = "bcm5974", .probe = bcm5974_probe, .disconnect = bcm5974_disconnect, .suspend = bcm5974_suspend, .resume = bcm5974_resume, .id_table = bcm5974_table, .supports_autosuspend = 1, }; static int __init bcm5974_init(void) { return usb_register(&bcm5974_driver); } static void __exit bcm5974_exit(void) { usb_deregister(&bcm5974_driver); } module_init(bcm5974_init); module_exit(bcm5974_exit);
gpl-2.0
pawitp/android_kernel_samsung_i9082
sound/soc/blackfin/bf5xx-ssm2602.c
2937
4337
/* * File: sound/soc/blackfin/bf5xx-ssm2602.c * Author: Cliff Cai <Cliff.Cai@analog.com> * * Created: Tue June 06 2008 * Description: board driver for SSM2602 sound chip * * Modified: * Copyright 2008 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <asm/dma.h> #include <asm/portmux.h> #include <linux/gpio.h> #include "../codecs/ssm2602.h" #include "bf5xx-sport.h" #include "bf5xx-i2s-pcm.h" static struct snd_soc_card bf5xx_ssm2602; static int bf5xx_ssm2602_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int clk = 0; int ret = 0; pr_debug("%s rate %d format %x\n", __func__, params_rate(params), params_format(params)); /* * If you are using a crystal source which frequency is not 12MHz * then modify the below case statement with frequency of the crystal. * * If you are using the SPORT to generate clocking then this is * where to do it. */ switch (params_rate(params)) { case 8000: case 16000: case 48000: case 96000: case 11025: case 22050: case 44100: clk = 12000000; break; } /* * CODEC is master for BCLK and LRC in this configuration. */ /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(codec_dai, SSM2602_SYSCLK, clk, SND_SOC_CLOCK_IN); if (ret < 0) return ret; return 0; } static struct snd_soc_ops bf5xx_ssm2602_ops = { .hw_params = bf5xx_ssm2602_hw_params, }; static struct snd_soc_dai_link bf5xx_ssm2602_dai[] = { { .name = "ssm2602", .stream_name = "SSM2602", .cpu_dai_name = "bfin-i2s.0", .codec_dai_name = "ssm2602-hifi", .platform_name = "bfin-i2s-pcm-audio", .codec_name = "ssm2602.0-001b", .ops = &bf5xx_ssm2602_ops, }, { .name = "ssm2602", .stream_name = "SSM2602", .cpu_dai_name = "bfin-i2s.1", .codec_dai_name = "ssm2602-hifi", .platform_name = "bfin-i2s-pcm-audio", .codec_name = "ssm2602.0-001b", .ops = &bf5xx_ssm2602_ops, }, }; static struct snd_soc_card bf5xx_ssm2602 = { .name = "bfin-ssm2602", .dai_link = &bf5xx_ssm2602_dai[CONFIG_SND_BF5XX_SPORT_NUM], .num_links = 1, }; static struct platform_device *bf5xx_ssm2602_snd_device; static int __init bf5xx_ssm2602_init(void) { int ret; pr_debug("%s enter\n", __func__); bf5xx_ssm2602_snd_device = platform_device_alloc("soc-audio", -1); if (!bf5xx_ssm2602_snd_device) return -ENOMEM; platform_set_drvdata(bf5xx_ssm2602_snd_device, &bf5xx_ssm2602); ret = platform_device_add(bf5xx_ssm2602_snd_device); if (ret) platform_device_put(bf5xx_ssm2602_snd_device); return ret; } static void __exit bf5xx_ssm2602_exit(void) { pr_debug("%s enter\n", __func__); platform_device_unregister(bf5xx_ssm2602_snd_device); } module_init(bf5xx_ssm2602_init); module_exit(bf5xx_ssm2602_exit); /* Module information */ MODULE_AUTHOR("Cliff Cai"); MODULE_DESCRIPTION("ALSA SoC SSM2602 BF527-EZKIT"); MODULE_LICENSE("GPL");
gpl-2.0
andi34/kernel_oneplus_msm8974
arch/arm/mach-msm/board-9615-regulator.c
3193
13168
/* * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/regulator/pm8xxx-regulator.h> #include <linux/regulator/msm-gpio-regulator.h> #include <mach/rpm-regulator.h> #include "board-9615.h" #define VREG_CONSUMERS(_id) \ static struct regulator_consumer_supply vreg_consumers_##_id[] /* * Consumer specific regulator names: * regulator name consumer dev_name */ VREG_CONSUMERS(L2) = { REGULATOR_SUPPLY("8018_l2", NULL), REGULATOR_SUPPLY("HSUSB_1p8", "msm_otg"), }; VREG_CONSUMERS(L3) = { REGULATOR_SUPPLY("8018_l3", NULL), }; VREG_CONSUMERS(L4) = { REGULATOR_SUPPLY("8018_l4", NULL), REGULATOR_SUPPLY("HSUSB_3p3", "msm_otg"), }; VREG_CONSUMERS(L5) = { REGULATOR_SUPPLY("8018_l5", NULL), }; VREG_CONSUMERS(L6) = { REGULATOR_SUPPLY("8018_l6", NULL), }; VREG_CONSUMERS(L7) = { REGULATOR_SUPPLY("8018_l7", NULL), }; VREG_CONSUMERS(L8) = { REGULATOR_SUPPLY("8018_l8", NULL), }; VREG_CONSUMERS(L9) = { REGULATOR_SUPPLY("8018_l9", NULL), }; VREG_CONSUMERS(L10) = { REGULATOR_SUPPLY("8018_l10", NULL), }; VREG_CONSUMERS(L11) = { REGULATOR_SUPPLY("8018_l11", NULL), }; VREG_CONSUMERS(L12) = { REGULATOR_SUPPLY("8018_l12", NULL), }; VREG_CONSUMERS(L13) = { REGULATOR_SUPPLY("8018_l13", NULL), REGULATOR_SUPPLY("sdc_vdd_io", "msm_sdcc.1"), }; VREG_CONSUMERS(L14) = { REGULATOR_SUPPLY("8018_l14", NULL), REGULATOR_SUPPLY("VDDI2", "ebi2_lcd.0"), }; VREG_CONSUMERS(S1) = { REGULATOR_SUPPLY("8018_s1", NULL), }; VREG_CONSUMERS(S2) = { REGULATOR_SUPPLY("8018_s2", NULL), REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "tabla-slim"), REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "tabla2x-slim"), REGULATOR_SUPPLY("VDDD_CDC_D", "tabla-slim"), REGULATOR_SUPPLY("VDDD_CDC_D", "tabla2x-slim"), REGULATOR_SUPPLY("VDDD_CDC_D", "0-000d"), REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "0-000d"), REGULATOR_SUPPLY("VDDD_CDC_D", "tabla top level"), REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "tabla top level"), }; VREG_CONSUMERS(S3) = { REGULATOR_SUPPLY("8018_s3", NULL), REGULATOR_SUPPLY("wlan_vreg", "wlan_ar6000_pm_dev"), REGULATOR_SUPPLY("CDC_VDD_CP", "tabla-slim"), REGULATOR_SUPPLY("CDC_VDD_CP", "tabla2x-slim"), REGULATOR_SUPPLY("CDC_VDDA_RX", "tabla-slim"), REGULATOR_SUPPLY("CDC_VDDA_RX", "tabla2x-slim"), REGULATOR_SUPPLY("CDC_VDDA_TX", "tabla-slim"), REGULATOR_SUPPLY("CDC_VDDA_TX", "tabla2x-slim"), REGULATOR_SUPPLY("VDDIO_CDC", "tabla-slim"), REGULATOR_SUPPLY("VDDIO_CDC", "tabla2x-slim"), REGULATOR_SUPPLY("VDDIO_CDC", "tabla top level"), REGULATOR_SUPPLY("CDC_VDD_CP", "tabla top level"), REGULATOR_SUPPLY("CDC_VDDA_TX", "tabla top level"), REGULATOR_SUPPLY("CDC_VDDA_RX", "tabla top level"), REGULATOR_SUPPLY("VDDIO_CDC", "0-000d"), REGULATOR_SUPPLY("CDC_VDD_CP", "0-000d"), REGULATOR_SUPPLY("CDC_VDDA_TX", "0-000d"), REGULATOR_SUPPLY("CDC_VDDA_RX", "0-000d"), }; VREG_CONSUMERS(S4) = { REGULATOR_SUPPLY("8018_s4", NULL), }; VREG_CONSUMERS(S5) = { REGULATOR_SUPPLY("8018_s5", NULL), }; VREG_CONSUMERS(LVS1) = { REGULATOR_SUPPLY("8018_lvs1", NULL), }; VREG_CONSUMERS(EXT_2P95V) = { REGULATOR_SUPPLY("ext_2p95v", NULL), REGULATOR_SUPPLY("sdc_vdd", "msm_sdcc.1"), }; VREG_CONSUMERS(VDD_DIG_CORNER) = { REGULATOR_SUPPLY("hsusb_vdd_dig", "msm_otg"), REGULATOR_SUPPLY("hsic_vdd_dig", "msm_hsic_peripheral"), REGULATOR_SUPPLY("hsic_vdd_dig", "msm_hsic_host"), }; #define PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, _modes, _ops, \ _apply_uV, _pull_down, _always_on, _supply_regulator, \ _system_uA, _enable_time, _reg_id) \ { \ .init_data = { \ .constraints = { \ .valid_modes_mask = _modes, \ .valid_ops_mask = _ops, \ .min_uV = _min_uV, \ .max_uV = _max_uV, \ .input_uV = _max_uV, \ .apply_uV = _apply_uV, \ .always_on = _always_on, \ .name = _name, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ .supply_regulator = _supply_regulator, \ }, \ .id = _reg_id, \ .pull_down_enable = _pull_down, \ .system_uA = _system_uA, \ .enable_time = _enable_time, \ } #define PM8XXX_LDO(_id, _name, _always_on, _pull_down, _min_uV, _max_uV, \ _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE | \ REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_DRMS, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_NLDO1200(_id, _name, _always_on, _pull_down, _min_uV, \ _max_uV, _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE | \ REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_DRMS, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_SMPS(_id, _name, _always_on, _pull_down, _min_uV, _max_uV, \ _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE | \ REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_DRMS, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_VS(_id, _name, _always_on, _pull_down, _enable_time, \ _supply_regulator, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, 0, 0, 0, REGULATOR_CHANGE_STATUS, 0, \ _pull_down, _always_on, _supply_regulator, 0, _enable_time, \ _reg_id) /* Pin control initialization */ #define PM8XXX_PC(_id, _name, _always_on, _pin_fn, _pin_ctrl, \ _supply_regulator, _reg_id) \ { \ .init_data = { \ .constraints = { \ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ .always_on = _always_on, \ .name = _name, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id##_PC), \ .consumer_supplies = vreg_consumers_##_id##_PC, \ .supply_regulator = _supply_regulator, \ }, \ .id = _reg_id, \ .pin_fn = PM8XXX_VREG_PIN_FN_##_pin_fn, \ .pin_ctrl = _pin_ctrl, \ } #define RPM_INIT(_id, _min_uV, _max_uV, _modes, _ops, _apply_uV, _default_uV, \ _peak_uA, _avg_uA, _pull_down, _pin_ctrl, _freq, _pin_fn, \ _force_mode, _sleep_set_force_mode, _power_mode, _state, \ _sleep_selectable, _always_on, _supply_regulator, _system_uA) \ { \ .init_data = { \ .constraints = { \ .valid_modes_mask = _modes, \ .valid_ops_mask = _ops, \ .min_uV = _min_uV, \ .max_uV = _max_uV, \ .input_uV = _min_uV, \ .apply_uV = _apply_uV, \ .always_on = _always_on, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ .supply_regulator = _supply_regulator, \ }, \ .id = RPM_VREG_ID_PM8018_##_id, \ .default_uV = _default_uV, \ .peak_uA = _peak_uA, \ .avg_uA = _avg_uA, \ .pull_down_enable = _pull_down, \ .pin_ctrl = _pin_ctrl, \ .freq = RPM_VREG_FREQ_##_freq, \ .pin_fn = _pin_fn, \ .force_mode = _force_mode, \ .sleep_set_force_mode = _sleep_set_force_mode, \ .power_mode = _power_mode, \ .state = _state, \ .sleep_selectable = _sleep_selectable, \ .system_uA = _system_uA, \ } #define RPM_LDO(_id, _always_on, _pd, _sleep_selectable, _min_uV, _max_uV, \ _supply_regulator, _system_uA, _init_peak_uA) \ RPM_INIT(_id, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE \ | REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE \ | REGULATOR_CHANGE_DRMS, 0, _max_uV, _init_peak_uA, 0, _pd, \ RPM_VREG_PIN_CTRL_NONE, NONE, RPM_VREG_PIN_FN_9615_NONE, \ RPM_VREG_FORCE_MODE_9615_NONE, \ RPM_VREG_FORCE_MODE_9615_NONE, RPM_VREG_POWER_MODE_9615_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, _system_uA) #define RPM_SMPS(_id, _always_on, _pd, _sleep_selectable, _min_uV, _max_uV, \ _supply_regulator, _system_uA, _freq) \ RPM_INIT(_id, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE \ | REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE \ | REGULATOR_CHANGE_DRMS, 0, _max_uV, _system_uA, 0, _pd, \ RPM_VREG_PIN_CTRL_NONE, _freq, RPM_VREG_PIN_FN_9615_NONE, \ RPM_VREG_FORCE_MODE_9615_NONE, \ RPM_VREG_FORCE_MODE_9615_NONE, RPM_VREG_POWER_MODE_9615_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, _system_uA) #define RPM_VS(_id, _always_on, _pd, _sleep_selectable, _supply_regulator) \ RPM_INIT(_id, 0, 0, 0, REGULATOR_CHANGE_STATUS, 0, 0, 1000, 1000, _pd, \ RPM_VREG_PIN_CTRL_NONE, NONE, RPM_VREG_PIN_FN_9615_NONE, \ RPM_VREG_FORCE_MODE_9615_NONE, \ RPM_VREG_FORCE_MODE_9615_NONE, RPM_VREG_POWER_MODE_9615_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, 0) #define RPM_CORNER(_id, _always_on, _sleep_selectable, _min_uV, _max_uV, \ _supply_regulator) \ RPM_INIT(_id, _min_uV, _max_uV, 0, REGULATOR_CHANGE_VOLTAGE \ | REGULATOR_CHANGE_STATUS, 0, _max_uV, 0, 0, 0, \ RPM_VREG_PIN_CTRL_NONE, NONE, RPM_VREG_PIN_FN_9615_NONE, \ RPM_VREG_FORCE_MODE_9615_NONE, \ RPM_VREG_FORCE_MODE_9615_NONE, RPM_VREG_POWER_MODE_9615_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, 0) /* Pin control initialization */ #define RPM_PC_INIT(_id, _always_on, _pin_fn, _pin_ctrl, _supply_regulator) \ { \ .init_data = { \ .constraints = { \ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ .always_on = _always_on, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id##_PC), \ .consumer_supplies = vreg_consumers_##_id##_PC, \ .supply_regulator = _supply_regulator, \ }, \ .id = RPM_VREG_ID_PM8018_##_id##_PC, \ .pin_fn = RPM_VREG_PIN_FN_9615_##_pin_fn, \ .pin_ctrl = _pin_ctrl, \ } #define GPIO_VREG_INIT(_id, _reg_name, _gpio_label, _gpio) \ [GPIO_VREG_ID_##_id] = { \ .init_data = { \ .constraints = { \ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ }, \ .regulator_name = _reg_name, \ .gpio_label = _gpio_label, \ .gpio = _gpio, \ } /* GPIO regulator constraints */ struct gpio_regulator_platform_data msm_gpio_regulator_pdata[] = { GPIO_VREG_INIT(EXT_2P95V, "ext_2p95v", "ext_2p95_en", 18), }; /* PM8018 regulator constraints */ struct pm8xxx_regulator_platform_data msm_pm8018_regulator_pdata[] __devinitdata = { }; static struct rpm_regulator_init_data msm_rpm_regulator_init_data[] __devinitdata = { /* ID a_on pd ss min_uV max_uV supply sys_uA freq */ RPM_SMPS(S1, 0, 1, 1, 500000, 1150000, NULL, 100000, 1p60), RPM_SMPS(S2, 0, 1, 0, 1225000, 1300000, NULL, 0, 1p60), RPM_SMPS(S3, 1, 1, 0, 1800000, 1800000, NULL, 100000, 1p60), RPM_SMPS(S4, 0, 1, 0, 2100000, 2200000, NULL, 0, 1p60), RPM_SMPS(S5, 1, 1, 0, 1350000, 1350000, NULL, 100000, 1p60), /* ID a_on pd ss min_uV max_uV supply sys_uA init_ip */ RPM_LDO(L2, 1, 1, 0, 1800000, 1800000, NULL, 0, 10000), RPM_LDO(L3, 1, 1, 0, 1800000, 1800000, NULL, 0, 0), RPM_LDO(L4, 0, 1, 0, 3075000, 3075000, NULL, 0, 0), RPM_LDO(L5, 0, 1, 0, 2850000, 2850000, NULL, 0, 0), RPM_LDO(L6, 0, 1, 0, 1800000, 2850000, NULL, 0, 0), RPM_LDO(L7, 0, 1, 0, 1850000, 1900000, "8018_s4", 0, 0), RPM_LDO(L8, 0, 1, 0, 1200000, 1200000, "8018_s3", 0, 0), RPM_LDO(L9, 0, 1, 1, 750000, 1150000, "8018_s5", 10000, 10000), RPM_LDO(L10, 0, 1, 0, 1050000, 1050000, "8018_s5", 0, 0), RPM_LDO(L11, 0, 1, 0, 1050000, 1050000, "8018_s5", 0, 0), RPM_LDO(L12, 0, 1, 0, 1050000, 1050000, "8018_s5", 0, 0), RPM_LDO(L13, 0, 1, 0, 1850000, 2950000, NULL, 0, 0), RPM_LDO(L14, 0, 1, 0, 2850000, 2850000, NULL, 0, 0), /* ID a_on pd ss supply */ RPM_VS(LVS1, 0, 1, 0, "8018_s3"), /* ID a_on ss min_corner max_corner supply */ RPM_CORNER(VDD_DIG_CORNER, 0, 1, RPM_VREG_CORNER_NONE, RPM_VREG_CORNER_HIGH, NULL), }; int msm_pm8018_regulator_pdata_len __devinitdata = ARRAY_SIZE(msm_pm8018_regulator_pdata); struct rpm_regulator_platform_data msm_rpm_regulator_9615_pdata __devinitdata = { .init_data = msm_rpm_regulator_init_data, .num_regulators = ARRAY_SIZE(msm_rpm_regulator_init_data), .version = RPM_VREG_VERSION_9615, .vreg_id_vdd_mem = RPM_VREG_ID_PM8018_L9, .vreg_id_vdd_dig = RPM_VREG_ID_PM8018_VDD_DIG_CORNER, };
gpl-2.0
Renzo-Olivares/android_kernel_htc_m7-gpe
arch/alpha/mm/fault.c
3961
5797
/* * linux/arch/alpha/mm/fault.c * * Copyright (C) 1995 Linus Torvalds */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <asm/io.h> #define __EXTERN_INLINE inline #include <asm/mmu_context.h> #include <asm/tlbflush.h> #undef __EXTERN_INLINE #include <linux/signal.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <asm/uaccess.h> extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); /* * Force a new ASN for a task. */ #ifndef CONFIG_SMP unsigned long last_asn = ASN_FIRST_VERSION; #endif void __load_new_mm_context(struct mm_struct *next_mm) { unsigned long mmc; struct pcb_struct *pcb; mmc = __get_new_mm_context(next_mm, smp_processor_id()); next_mm->context[smp_processor_id()] = mmc; pcb = &current_thread_info()->pcb; pcb->asn = mmc & HARDWARE_ASN_MASK; pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; __reload_thread(pcb); } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to handle_mm_fault(). * * mmcsr: * 0 = translation not valid * 1 = access violation * 2 = fault-on-read * 3 = fault-on-execute * 4 = fault-on-write * * cause: * -1 = instruction fetch * 0 = load * 1 = store * * Registers $9 through $15 are saved in a block just prior to `regs' and * are saved and restored around the call to allow exception code to * modify them. */ /* Macro for exception fixup code to access integer registers. */ #define dpf_reg(r) \ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ (r) <= 18 ? (r)+8 : (r)-10]) asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; const struct exception_table_entry *fixup; int fault, si_code = SEGV_MAPERR; siginfo_t info; /* As of EV6, a load into $31/$f31 is a prefetch, and never faults (or is suppressed by the PALcode). Support that for older CPUs by ignoring such an instruction. */ if (cause == 0) { unsigned int insn; __get_user(insn, (unsigned int __user *)regs->pc); if ((insn >> 21 & 0x1f) == 0x1f && /* ldq ldl ldt lds ldg ldf ldwu ldbu */ (1ul << (insn >> 26) & 0x30f00001400ul)) { regs->pc += 4; return; } } /* If we're in an interrupt context, or have no user context, we must not take the fault. */ if (!mm || in_atomic()) goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC if (address >= TASK_SIZE) goto vmalloc_fault; #endif down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* Ok, we have a good vm_area for this memory access, so we can handle it. */ good_area: si_code = SEGV_ACCERR; if (cause < 0) { if (!(vma->vm_flags & VM_EXEC)) goto bad_area; } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) goto bad_area; } else { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) current->maj_flt++; else current->min_flt++; return; /* Something tried to access memory that isn't in our memory map. Fix it, but check if it's kernel or user first. */ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) goto do_sigsegv; no_context: /* Are we prepared to handle this fault as an exception? */ if ((fixup = search_exception_tables(regs->pc)) != 0) { unsigned long newpc; newpc = fixup_exception(dpf_reg, fixup, regs->pc); regs->pc = newpc; return; } /* Oops. The kernel tried to access some bad page. We'll have to terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address); die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); do_exit(SIGKILL); /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: /* Send a sigbus, regardless of whether we were in kernel or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void __user *) address; force_sig_info(SIGBUS, &info, current); if (!user_mode(regs)) goto no_context; return; do_sigsegv: info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = si_code; info.si_addr = (void __user *) address; force_sig_info(SIGSEGV, &info, current); return; #ifdef CONFIG_ALPHA_LARGE_VMALLOC vmalloc_fault: if (user_mode(regs)) goto do_sigsegv; else { /* Synchronize this task's top level page-table with the "reference" page table from init. */ long index = pgd_index(address); pgd_t *pgd, *pgd_k; pgd = current->active_mm->pgd + index; pgd_k = swapper_pg_dir + index; if (!pgd_present(*pgd) && pgd_present(*pgd_k)) { pgd_val(*pgd) = pgd_val(*pgd_k); return; } goto no_context; } #endif }
gpl-2.0
Lenovo-Kraft-A6000/android_kernel_lenovo_msm8916
arch/um/drivers/slirp_kern.c
4729
2652
/* * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL. */ #include <linux/if_arp.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/string.h> #include <net_kern.h> #include <net_user.h> #include "slirp.h" struct slirp_init { struct arg_list_dummy_wrapper argw; /* XXX should be simpler... */ }; void slirp_init(struct net_device *dev, void *data) { struct uml_net_private *private; struct slirp_data *spri; struct slirp_init *init = data; int i; private = netdev_priv(dev); spri = (struct slirp_data *) private->user; spri->argw = init->argw; spri->pid = -1; spri->slave = -1; spri->dev = dev; slip_proto_init(&spri->slip); dev->hard_header_len = 0; dev->header_ops = NULL; dev->addr_len = 0; dev->type = ARPHRD_SLIP; dev->tx_queue_len = 256; dev->flags = IFF_NOARP; printk("SLIRP backend - command line:"); for (i = 0; spri->argw.argv[i] != NULL; i++) printk(" '%s'",spri->argw.argv[i]); printk("\n"); } static unsigned short slirp_protocol(struct sk_buff *skbuff) { return htons(ETH_P_IP); } static int slirp_read(int fd, struct sk_buff *skb, struct uml_net_private *lp) { return slirp_user_read(fd, skb_mac_header(skb), skb->dev->mtu, (struct slirp_data *) &lp->user); } static int slirp_write(int fd, struct sk_buff *skb, struct uml_net_private *lp) { return slirp_user_write(fd, skb->data, skb->len, (struct slirp_data *) &lp->user); } const struct net_kern_info slirp_kern_info = { .init = slirp_init, .protocol = slirp_protocol, .read = slirp_read, .write = slirp_write, }; static int slirp_setup(char *str, char **mac_out, void *data) { struct slirp_init *init = data; int i=0; *init = ((struct slirp_init) { .argw = { { "slirp", NULL } } }); str = split_if_spec(str, mac_out, NULL); if (str == NULL) /* no command line given after MAC addr */ return 1; do { if (i >= SLIRP_MAX_ARGS - 1) { printk(KERN_WARNING "slirp_setup: truncating slirp " "arguments\n"); break; } init->argw.argv[i++] = str; while(*str && *str!=',') { if (*str == '_') *str=' '; str++; } if (*str != ',') break; *str++ = '\0'; } while (1); init->argw.argv[i] = NULL; return 1; } static struct transport slirp_transport = { .list = LIST_HEAD_INIT(slirp_transport.list), .name = "slirp", .setup = slirp_setup, .user = &slirp_user_info, .kern = &slirp_kern_info, .private_size = sizeof(struct slirp_data), .setup_size = sizeof(struct slirp_init), }; static int register_slirp(void) { register_transport(&slirp_transport); return 0; } late_initcall(register_slirp);
gpl-2.0
D2005-devs/cafkernel2-old
drivers/input/serio/q40kbd.c
4985
4917
/* * Copyright (c) 2000-2001 Vojtech Pavlik * * Based on the work of: * Richard Zidlicky <Richard.Zidlicky@stud.informatik.uni-erlangen.de> */ /* * Q40 PS/2 keyboard controller driver for Linux/m68k */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/init.h> #include <linux/serio.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/q40_master.h> #include <asm/irq.h> #include <asm/q40ints.h> #define DRV_NAME "q40kbd" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Q40 PS/2 keyboard controller driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); struct q40kbd { struct serio *port; spinlock_t lock; }; static irqreturn_t q40kbd_interrupt(int irq, void *dev_id) { struct q40kbd *q40kbd = dev_id; unsigned long flags; spin_lock_irqsave(&q40kbd->lock, flags); if (Q40_IRQ_KEYB_MASK & master_inb(INTERRUPT_REG)) serio_interrupt(q40kbd->port, master_inb(KEYCODE_REG), 0); master_outb(-1, KEYBOARD_UNLOCK_REG); spin_unlock_irqrestore(&q40kbd->lock, flags); return IRQ_HANDLED; } /* * q40kbd_flush() flushes all data that may be in the keyboard buffers */ static void q40kbd_flush(struct q40kbd *q40kbd) { int maxread = 100; unsigned long flags; spin_lock_irqsave(&q40kbd->lock, flags); while (maxread-- && (Q40_IRQ_KEYB_MASK & master_inb(INTERRUPT_REG))) master_inb(KEYCODE_REG); spin_unlock_irqrestore(&q40kbd->lock, flags); } static void q40kbd_stop(void) { master_outb(0, KEY_IRQ_ENABLE_REG); master_outb(-1, KEYBOARD_UNLOCK_REG); } /* * q40kbd_open() is called when a port is open by the higher layer. * It allocates the interrupt and enables in in the chip. */ static int q40kbd_open(struct serio *port) { struct q40kbd *q40kbd = port->port_data; q40kbd_flush(q40kbd); /* off we go */ master_outb(-1, KEYBOARD_UNLOCK_REG); master_outb(1, KEY_IRQ_ENABLE_REG); return 0; } static void q40kbd_close(struct serio *port) { struct q40kbd *q40kbd = port->port_data; q40kbd_stop(); q40kbd_flush(q40kbd); } static int __devinit q40kbd_probe(struct platform_device *pdev) { struct q40kbd *q40kbd; struct serio *port; int error; q40kbd = kzalloc(sizeof(struct q40kbd), GFP_KERNEL); port = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!q40kbd || !port) { error = -ENOMEM; goto err_free_mem; } q40kbd->port = port; spin_lock_init(&q40kbd->lock); port->id.type = SERIO_8042; port->open = q40kbd_open; port->close = q40kbd_close; port->port_data = q40kbd; port->dev.parent = &pdev->dev; strlcpy(port->name, "Q40 Kbd Port", sizeof(port->name)); strlcpy(port->phys, "Q40", sizeof(port->phys)); q40kbd_stop(); error = request_irq(Q40_IRQ_KEYBOARD, q40kbd_interrupt, 0, DRV_NAME, q40kbd); if (error) { dev_err(&pdev->dev, "Can't get irq %d.\n", Q40_IRQ_KEYBOARD); goto err_free_mem; } serio_register_port(q40kbd->port); platform_set_drvdata(pdev, q40kbd); printk(KERN_INFO "serio: Q40 kbd registered\n"); return 0; err_free_mem: kfree(port); kfree(q40kbd); return error; } static int __devexit q40kbd_remove(struct platform_device *pdev) { struct q40kbd *q40kbd = platform_get_drvdata(pdev); /* * q40kbd_close() will be called as part of unregistering * and will ensure that IRQ is turned off, so it is safe * to unregister port first and free IRQ later. */ serio_unregister_port(q40kbd->port); free_irq(Q40_IRQ_KEYBOARD, q40kbd); kfree(q40kbd); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver q40kbd_driver = { .driver = { .name = "q40kbd", .owner = THIS_MODULE, }, .remove = __devexit_p(q40kbd_remove), }; static int __init q40kbd_init(void) { return platform_driver_probe(&q40kbd_driver, q40kbd_probe); } static void __exit q40kbd_exit(void) { platform_driver_unregister(&q40kbd_driver); } module_init(q40kbd_init); module_exit(q40kbd_exit);
gpl-2.0
sebirdman/m7_kernel
drivers/isdn/hisax/avma1_cs.c
4985
4146
/* * PCMCIA client driver for AVM A1 / Fritz!PCMCIA * * Author Carsten Paeth * Copyright 1998-2001 by Carsten Paeth <calle@calle.in-berlin.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <asm/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include "hisax_cfg.h" MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for AVM A1/Fritz!PCMCIA cards"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /*====================================================================*/ /* Parameters that can be set with 'insmod' */ static int isdnprot = 2; module_param(isdnprot, int, 0); /*====================================================================*/ static int avma1cs_config(struct pcmcia_device *link) __devinit; static void avma1cs_release(struct pcmcia_device *link); static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit; static int __devinit avma1cs_probe(struct pcmcia_device *p_dev) { dev_dbg(&p_dev->dev, "avma1cs_attach()\n"); /* General socket configuration */ p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; p_dev->config_index = 1; p_dev->config_regs = PRESENT_OPTION; return avma1cs_config(p_dev); } /* avma1cs_attach */ static void __devexit avma1cs_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link); avma1cs_release(link); kfree(link->priv); } /* avma1cs_detach */ static int avma1cs_configcheck(struct pcmcia_device *p_dev, void *priv_data) { p_dev->resource[0]->end = 16; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->io_lines = 5; return pcmcia_request_io(p_dev); } static int __devinit avma1cs_config(struct pcmcia_device *link) { int i = -1; char devname[128]; IsdnCard_t icard; int busy = 0; dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link); devname[0] = 0; if (link->prod_id[1]) strlcpy(devname, link->prod_id[1], sizeof(devname)); if (pcmcia_loop_config(link, avma1cs_configcheck, NULL)) return -ENODEV; do { /* * allocate an interrupt line */ if (!link->irq) { /* undo */ pcmcia_disable_device(link); break; } /* * configure the PCMCIA socket */ i = pcmcia_enable_device(link); if (i != 0) { pcmcia_disable_device(link); break; } } while (0); /* If any step failed, release any partially configured state */ if (i != 0) { avma1cs_release(link); return -ENODEV; } icard.para[0] = link->irq; icard.para[1] = link->resource[0]->start; icard.protocol = isdnprot; icard.typ = ISDN_CTYPE_A1_PCMCIA; i = hisax_init_pcmcia(link, &busy, &icard); if (i < 0) { printk(KERN_ERR "avma1_cs: failed to initialize AVM A1 " "PCMCIA %d at i/o %#x\n", i, (unsigned int) link->resource[0]->start); avma1cs_release(link); return -ENODEV; } link->priv = (void *) (unsigned long) i; return 0; } /* avma1cs_config */ static void avma1cs_release(struct pcmcia_device *link) { unsigned long minor = (unsigned long) link->priv; dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link); /* now unregister function with hisax */ HiSax_closecard(minor); pcmcia_disable_device(link); } /* avma1cs_release */ static const struct pcmcia_device_id avma1cs_ids[] = { PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb), PCMCIA_DEVICE_PROD_ID12("ISDN", "CARD", 0x8d9761c8, 0x01c5aa7b), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, avma1cs_ids); static struct pcmcia_driver avma1cs_driver = { .owner = THIS_MODULE, .name = "avma1_cs", .probe = avma1cs_probe, .remove = __devexit_p(avma1cs_detach), .id_table = avma1cs_ids, }; static int __init init_avma1_cs(void) { return pcmcia_register_driver(&avma1cs_driver); } static void __exit exit_avma1_cs(void) { pcmcia_unregister_driver(&avma1cs_driver); } module_init(init_avma1_cs); module_exit(exit_avma1_cs);
gpl-2.0
friedrich420/S5-AEL-Kernel
drivers/input/serio/altera_ps2.c
4985
4589
/* * Altera University Program PS2 controller driver * * Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw> * * Based on sa1111ps2.c, which is: * Copyright (C) 2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/of.h> #define DRV_NAME "altera_ps2" struct ps2if { struct serio *io; struct resource *iomem_res; void __iomem *base; unsigned irq; }; /* * Read all bytes waiting in the PS2 port. There should be * at the most one, but we loop for safety. */ static irqreturn_t altera_ps2_rxint(int irq, void *dev_id) { struct ps2if *ps2if = dev_id; unsigned int status; int handled = IRQ_NONE; while ((status = readl(ps2if->base)) & 0xffff0000) { serio_interrupt(ps2if->io, status & 0xff, 0); handled = IRQ_HANDLED; } return handled; } /* * Write a byte to the PS2 port. */ static int altera_ps2_write(struct serio *io, unsigned char val) { struct ps2if *ps2if = io->port_data; writel(val, ps2if->base); return 0; } static int altera_ps2_open(struct serio *io) { struct ps2if *ps2if = io->port_data; /* clear fifo */ while (readl(ps2if->base) & 0xffff0000) /* empty */; writel(1, ps2if->base + 4); /* enable rx irq */ return 0; } static void altera_ps2_close(struct serio *io) { struct ps2if *ps2if = io->port_data; writel(0, ps2if->base); /* disable rx irq */ } /* * Add one device to this driver. */ static int __devinit altera_ps2_probe(struct platform_device *pdev) { struct ps2if *ps2if; struct serio *serio; int error, irq; ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL); serio = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!ps2if || !serio) { error = -ENOMEM; goto err_free_mem; } serio->id.type = SERIO_8042; serio->write = altera_ps2_write; serio->open = altera_ps2_open; serio->close = altera_ps2_close; strlcpy(serio->name, dev_name(&pdev->dev), sizeof(serio->name)); strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys)); serio->port_data = ps2if; serio->dev.parent = &pdev->dev; ps2if->io = serio; ps2if->iomem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (ps2if->iomem_res == NULL) { error = -ENOENT; goto err_free_mem; } irq = platform_get_irq(pdev, 0); if (irq < 0) { error = -ENXIO; goto err_free_mem; } ps2if->irq = irq; if (!request_mem_region(ps2if->iomem_res->start, resource_size(ps2if->iomem_res), pdev->name)) { error = -EBUSY; goto err_free_mem; } ps2if->base = ioremap(ps2if->iomem_res->start, resource_size(ps2if->iomem_res)); if (!ps2if->base) { error = -ENOMEM; goto err_free_res; } error = request_irq(ps2if->irq, altera_ps2_rxint, 0, pdev->name, ps2if); if (error) { dev_err(&pdev->dev, "could not allocate IRQ %d: %d\n", ps2if->irq, error); goto err_unmap; } dev_info(&pdev->dev, "base %p, irq %d\n", ps2if->base, ps2if->irq); serio_register_port(ps2if->io); platform_set_drvdata(pdev, ps2if); return 0; err_unmap: iounmap(ps2if->base); err_free_res: release_mem_region(ps2if->iomem_res->start, resource_size(ps2if->iomem_res)); err_free_mem: kfree(ps2if); kfree(serio); return error; } /* * Remove one device from this driver. */ static int __devexit altera_ps2_remove(struct platform_device *pdev) { struct ps2if *ps2if = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); serio_unregister_port(ps2if->io); free_irq(ps2if->irq, ps2if); iounmap(ps2if->base); release_mem_region(ps2if->iomem_res->start, resource_size(ps2if->iomem_res)); kfree(ps2if); return 0; } #ifdef CONFIG_OF static const struct of_device_id altera_ps2_match[] = { { .compatible = "ALTR,ps2-1.0", }, {}, }; MODULE_DEVICE_TABLE(of, altera_ps2_match); #endif /* CONFIG_OF */ /* * Our device driver structure */ static struct platform_driver altera_ps2_driver = { .probe = altera_ps2_probe, .remove = __devexit_p(altera_ps2_remove), .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = of_match_ptr(altera_ps2_match), }, }; module_platform_driver(altera_ps2_driver); MODULE_DESCRIPTION("Altera University Program PS2 controller driver"); MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
isnehalkiran/kernel-msm
sound/soc/pxa/raumfeld.c
8313
8090
/* * raumfeld_audio.c -- SoC audio for Raumfeld audio devices * * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * based on code from: * * Wolfson Microelectronics PLC. * Openedhand Ltd. * Liam Girdwood <lrg@slimlogic.co.uk> * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/gpio.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include "pxa-ssp.h" #define GPIO_SPDIF_RESET (38) #define GPIO_MCLK_RESET (111) #define GPIO_CODEC_RESET (120) static struct i2c_client *max9486_client; static struct i2c_board_info max9486_hwmon_info = { I2C_BOARD_INFO("max9485", 0x63), }; #define MAX9485_MCLK_FREQ_112896 0x22 #define MAX9485_MCLK_FREQ_122880 0x23 #define MAX9485_MCLK_FREQ_225792 0x32 #define MAX9485_MCLK_FREQ_245760 0x33 static void set_max9485_clk(char clk) { i2c_master_send(max9486_client, &clk, 1); } static void raumfeld_enable_audio(bool en) { if (en) { gpio_set_value(GPIO_MCLK_RESET, 1); /* wait some time to let the clocks become stable */ msleep(100); gpio_set_value(GPIO_SPDIF_RESET, 1); gpio_set_value(GPIO_CODEC_RESET, 1); } else { gpio_set_value(GPIO_MCLK_RESET, 0); gpio_set_value(GPIO_SPDIF_RESET, 0); gpio_set_value(GPIO_CODEC_RESET, 0); } } /* CS4270 */ static int raumfeld_cs4270_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; /* set freq to 0 to enable all possible codec sample rates */ return snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0); } static void raumfeld_cs4270_shutdown(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; /* set freq to 0 to enable all possible codec sample rates */ snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0); } static int raumfeld_cs4270_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int fmt, clk = 0; int ret = 0; switch (params_rate(params)) { case 44100: set_max9485_clk(MAX9485_MCLK_FREQ_112896); clk = 11289600; break; case 48000: set_max9485_clk(MAX9485_MCLK_FREQ_122880); clk = 12288000; break; case 88200: set_max9485_clk(MAX9485_MCLK_FREQ_225792); clk = 22579200; break; case 96000: set_max9485_clk(MAX9485_MCLK_FREQ_245760); clk = 24576000; break; default: return -EINVAL; } fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; /* setup the CODEC DAI */ ret = snd_soc_dai_set_fmt(codec_dai, fmt); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(codec_dai, 0, clk, 0); if (ret < 0) return ret; /* setup the CPU DAI */ ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, clk); if (ret < 0) return ret; ret = snd_soc_dai_set_fmt(cpu_dai, fmt); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_DIV_SCR, 4); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_EXT, clk, 1); if (ret < 0) return ret; return 0; } static struct snd_soc_ops raumfeld_cs4270_ops = { .startup = raumfeld_cs4270_startup, .shutdown = raumfeld_cs4270_shutdown, .hw_params = raumfeld_cs4270_hw_params, }; static int raumfeld_analog_suspend(struct snd_soc_card *card) { raumfeld_enable_audio(false); return 0; } static int raumfeld_analog_resume(struct snd_soc_card *card) { raumfeld_enable_audio(true); return 0; } /* AK4104 */ static int raumfeld_ak4104_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int fmt, ret = 0, clk = 0; switch (params_rate(params)) { case 44100: set_max9485_clk(MAX9485_MCLK_FREQ_112896); clk = 11289600; break; case 48000: set_max9485_clk(MAX9485_MCLK_FREQ_122880); clk = 12288000; break; case 88200: set_max9485_clk(MAX9485_MCLK_FREQ_225792); clk = 22579200; break; case 96000: set_max9485_clk(MAX9485_MCLK_FREQ_245760); clk = 24576000; break; default: return -EINVAL; } fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF; /* setup the CODEC DAI */ ret = snd_soc_dai_set_fmt(codec_dai, fmt | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* setup the CPU DAI */ ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, clk); if (ret < 0) return ret; ret = snd_soc_dai_set_fmt(cpu_dai, fmt | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_DIV_SCR, 4); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_EXT, clk, 1); if (ret < 0) return ret; return 0; } static struct snd_soc_ops raumfeld_ak4104_ops = { .hw_params = raumfeld_ak4104_hw_params, }; #define DAI_LINK_CS4270 \ { \ .name = "CS4270", \ .stream_name = "CS4270", \ .cpu_dai_name = "pxa-ssp-dai.0", \ .platform_name = "pxa-pcm-audio", \ .codec_dai_name = "cs4270-hifi", \ .codec_name = "cs4270.0-0048", \ .ops = &raumfeld_cs4270_ops, \ } #define DAI_LINK_AK4104 \ { \ .name = "ak4104", \ .stream_name = "Playback", \ .cpu_dai_name = "pxa-ssp-dai.1", \ .codec_dai_name = "ak4104-hifi", \ .platform_name = "pxa-pcm-audio", \ .ops = &raumfeld_ak4104_ops, \ .codec_name = "spi0.0", \ } static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] = { DAI_LINK_CS4270, DAI_LINK_AK4104, }; static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] = { DAI_LINK_CS4270, }; static struct snd_soc_card snd_soc_raumfeld_connector = { .name = "Raumfeld Connector", .owner = THIS_MODULE, .dai_link = snd_soc_raumfeld_connector_dai, .num_links = ARRAY_SIZE(snd_soc_raumfeld_connector_dai), .suspend_post = raumfeld_analog_suspend, .resume_pre = raumfeld_analog_resume, }; static struct snd_soc_card snd_soc_raumfeld_speaker = { .name = "Raumfeld Speaker", .owner = THIS_MODULE, .dai_link = snd_soc_raumfeld_speaker_dai, .num_links = ARRAY_SIZE(snd_soc_raumfeld_speaker_dai), .suspend_post = raumfeld_analog_suspend, .resume_pre = raumfeld_analog_resume, }; static struct platform_device *raumfeld_audio_device; static int __init raumfeld_audio_init(void) { int ret; if (!machine_is_raumfeld_speaker() && !machine_is_raumfeld_connector()) return 0; max9486_client = i2c_new_device(i2c_get_adapter(0), &max9486_hwmon_info); if (!max9486_client) return -ENOMEM; set_max9485_clk(MAX9485_MCLK_FREQ_122880); /* Register analog device */ raumfeld_audio_device = platform_device_alloc("soc-audio", 0); if (!raumfeld_audio_device) return -ENOMEM; if (machine_is_raumfeld_speaker()) platform_set_drvdata(raumfeld_audio_device, &snd_soc_raumfeld_speaker); if (machine_is_raumfeld_connector()) platform_set_drvdata(raumfeld_audio_device, &snd_soc_raumfeld_connector); ret = platform_device_add(raumfeld_audio_device); if (ret < 0) { platform_device_put(raumfeld_audio_device); return ret; } raumfeld_enable_audio(true); return 0; } static void __exit raumfeld_audio_exit(void) { raumfeld_enable_audio(false); platform_device_unregister(raumfeld_audio_device); i2c_unregister_device(max9486_client); gpio_free(GPIO_MCLK_RESET); gpio_free(GPIO_CODEC_RESET); gpio_free(GPIO_SPDIF_RESET); } module_init(raumfeld_audio_init); module_exit(raumfeld_audio_exit); /* Module information */ MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("Raumfeld audio SoC"); MODULE_LICENSE("GPL");
gpl-2.0
DennisBold/CodeAurora-MSM-Kernel
arch/arm/mach-s5pc100/setup-sdhci-gpio.c
8313
2169
/* linux/arch/arm/plat-s5pc100/setup-sdhci-gpio.c * * Copyright 2009 Samsung Eletronics * * S5PC100 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <plat/gpio-cfg.h> #include <plat/regs-sdhci.h> #include <plat/sdhci.h> void s5pc100_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width) { struct s3c_sdhci_platdata *pdata = dev->dev.platform_data; unsigned int num; num = width; /* In case of 8 width, we should decrease the 2 */ if (width == 8) num = width - 2; /* Set all the necessary GPG0/GPG1 pins to special-function 0 */ s3c_gpio_cfgrange_nopull(S5PC100_GPG0(0), 2 + num, S3C_GPIO_SFN(2)); if (width == 8) s3c_gpio_cfgrange_nopull(S5PC100_GPG1(0), 2, S3C_GPIO_SFN(2)); if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) { s3c_gpio_setpull(S5PC100_GPG1(2), S3C_GPIO_PULL_UP); s3c_gpio_cfgpin(S5PC100_GPG1(2), S3C_GPIO_SFN(2)); } } void s5pc100_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width) { struct s3c_sdhci_platdata *pdata = dev->dev.platform_data; /* Set all the necessary GPG2 pins to special-function 2 */ s3c_gpio_cfgrange_nopull(S5PC100_GPG2(0), 2 + width, S3C_GPIO_SFN(2)); if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) { s3c_gpio_setpull(S5PC100_GPG2(6), S3C_GPIO_PULL_UP); s3c_gpio_cfgpin(S5PC100_GPG2(6), S3C_GPIO_SFN(2)); } } void s5pc100_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width) { struct s3c_sdhci_platdata *pdata = dev->dev.platform_data; /* Set all the necessary GPG3 pins to special-function 2 */ s3c_gpio_cfgrange_nopull(S5PC100_GPG3(0), 2 + width, S3C_GPIO_SFN(2)); if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) { s3c_gpio_setpull(S5PC100_GPG3(6), S3C_GPIO_PULL_UP); s3c_gpio_cfgpin(S5PC100_GPG3(6), S3C_GPIO_SFN(2)); } }
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_oneplus_msm8994
arch/blackfin/mach-bf538/ext-gpio.c
9337
4282
/* * GPIOLIB interface for BF538/9 PORT C, D, and E GPIOs * * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/err.h> #include <asm/blackfin.h> #include <asm/gpio.h> #include <asm/portmux.h> #define DEFINE_REG(reg, off) \ static inline u16 read_##reg(void __iomem *port) \ { return bfin_read16(port + off); } \ static inline void write_##reg(void __iomem *port, u16 v) \ { bfin_write16(port + off, v); } DEFINE_REG(PORTIO, 0x00) DEFINE_REG(PORTIO_CLEAR, 0x10) DEFINE_REG(PORTIO_SET, 0x20) DEFINE_REG(PORTIO_DIR, 0x40) DEFINE_REG(PORTIO_INEN, 0x50) static void __iomem *gpio_chip_to_mmr(struct gpio_chip *chip) { switch (chip->base) { default: /* not really needed, but keeps gcc happy */ case GPIO_PC0: return (void __iomem *)PORTCIO; case GPIO_PD0: return (void __iomem *)PORTDIO; case GPIO_PE0: return (void __iomem *)PORTEIO; } } static int bf538_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { void __iomem *port = gpio_chip_to_mmr(chip); return !!(read_PORTIO(port) & (1u << gpio)); } static void bf538_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { void __iomem *port = gpio_chip_to_mmr(chip); if (value) write_PORTIO_SET(port, (1u << gpio)); else write_PORTIO_CLEAR(port, (1u << gpio)); } static int bf538_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { void __iomem *port = gpio_chip_to_mmr(chip); write_PORTIO_DIR(port, read_PORTIO_DIR(port) & ~(1u << gpio)); write_PORTIO_INEN(port, read_PORTIO_INEN(port) | (1u << gpio)); return 0; } static int bf538_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { void __iomem *port = gpio_chip_to_mmr(chip); write_PORTIO_INEN(port, read_PORTIO_INEN(port) & ~(1u << gpio)); bf538_gpio_set_value(port, gpio, value); write_PORTIO_DIR(port, read_PORTIO_DIR(port) | (1u << gpio)); return 0; } static int bf538_gpio_request(struct gpio_chip *chip, unsigned gpio) { return bfin_special_gpio_request(chip->base + gpio, chip->label); } static void bf538_gpio_free(struct gpio_chip *chip, unsigned gpio) { return bfin_special_gpio_free(chip->base + gpio); } /* We don't set the irq fields as these banks cannot generate interrupts */ static struct gpio_chip bf538_portc_chip = { .label = "GPIO-PC", .direction_input = bf538_gpio_direction_input, .get = bf538_gpio_get_value, .direction_output = bf538_gpio_direction_output, .set = bf538_gpio_set_value, .request = bf538_gpio_request, .free = bf538_gpio_free, .base = GPIO_PC0, .ngpio = GPIO_PC9 - GPIO_PC0 + 1, }; static struct gpio_chip bf538_portd_chip = { .label = "GPIO-PD", .direction_input = bf538_gpio_direction_input, .get = bf538_gpio_get_value, .direction_output = bf538_gpio_direction_output, .set = bf538_gpio_set_value, .request = bf538_gpio_request, .free = bf538_gpio_free, .base = GPIO_PD0, .ngpio = GPIO_PD13 - GPIO_PD0 + 1, }; static struct gpio_chip bf538_porte_chip = { .label = "GPIO-PE", .direction_input = bf538_gpio_direction_input, .get = bf538_gpio_get_value, .direction_output = bf538_gpio_direction_output, .set = bf538_gpio_set_value, .request = bf538_gpio_request, .free = bf538_gpio_free, .base = GPIO_PE0, .ngpio = GPIO_PE15 - GPIO_PE0 + 1, }; static int __init bf538_extgpio_setup(void) { return gpiochip_add(&bf538_portc_chip) | gpiochip_add(&bf538_portd_chip) | gpiochip_add(&bf538_porte_chip); } arch_initcall(bf538_extgpio_setup); #ifdef CONFIG_PM static struct { u16 data, dir, inen; } gpio_bank_saved[3]; static void __iomem * const port_bases[3] = { (void *)PORTCIO, (void *)PORTDIO, (void *)PORTEIO, }; void bfin_special_gpio_pm_hibernate_suspend(void) { int i; for (i = 0; i < ARRAY_SIZE(port_bases); ++i) { gpio_bank_saved[i].data = read_PORTIO(port_bases[i]); gpio_bank_saved[i].inen = read_PORTIO_INEN(port_bases[i]); gpio_bank_saved[i].dir = read_PORTIO_DIR(port_bases[i]); } } void bfin_special_gpio_pm_hibernate_restore(void) { int i; for (i = 0; i < ARRAY_SIZE(port_bases); ++i) { write_PORTIO_INEN(port_bases[i], gpio_bank_saved[i].inen); write_PORTIO_SET(port_bases[i], gpio_bank_saved[i].data & gpio_bank_saved[i].dir); write_PORTIO_DIR(port_bases[i], gpio_bank_saved[i].dir); } } #endif
gpl-2.0
TeamPrimo/android_kernel_htc_primo
arch/powerpc/boot/epapr.c
9593
1896
/* * Bootwrapper for ePAPR compliant firmwares * * Copyright 2010 David Gibson <david@gibson.dropbear.id.au>, IBM Corporation. * * Based on earlier bootwrappers by: * (c) Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp,\ * and * Scott Wood <scottwood@freescale.com> * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "io.h" #include <libfdt.h> BSS_STACK(4096); #define EPAPR_SMAGIC 0x65504150 #define EPAPR_EMAGIC 0x45504150 static unsigned epapr_magic; static unsigned long ima_size; static unsigned long fdt_addr; static void platform_fixups(void) { if ((epapr_magic != EPAPR_EMAGIC) && (epapr_magic != EPAPR_SMAGIC)) fatal("r6 contained 0x%08x instead of ePAPR magic number\n", epapr_magic); if (ima_size < (unsigned long)_end) printf("WARNING: Image loaded outside IMA!" " (_end=%p, ima_size=0x%lx)\n", _end, ima_size); if (ima_size < fdt_addr) printf("WARNING: Device tree address is outside IMA!" "(fdt_addr=0x%lx, ima_size=0x%lx)\n", fdt_addr, ima_size); if (ima_size < fdt_addr + fdt_totalsize((void *)fdt_addr)) printf("WARNING: Device tree extends outside IMA!" " (fdt_addr=0x%lx, size=0x%x, ima_size=0x%lx\n", fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { epapr_magic = r6; ima_size = r7; fdt_addr = r3; /* FIXME: we should process reserve entries */ simple_alloc_init(_end, ima_size - (unsigned long)_end, 32, 64); fdt_init((void *)fdt_addr); serial_console_init(); platform_ops.fixups = platform_fixups; }
gpl-2.0
chrisch1974/htc8960-3.0
arch/powerpc/boot/epapr.c
9593
1896
/* * Bootwrapper for ePAPR compliant firmwares * * Copyright 2010 David Gibson <david@gibson.dropbear.id.au>, IBM Corporation. * * Based on earlier bootwrappers by: * (c) Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp,\ * and * Scott Wood <scottwood@freescale.com> * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "io.h" #include <libfdt.h> BSS_STACK(4096); #define EPAPR_SMAGIC 0x65504150 #define EPAPR_EMAGIC 0x45504150 static unsigned epapr_magic; static unsigned long ima_size; static unsigned long fdt_addr; static void platform_fixups(void) { if ((epapr_magic != EPAPR_EMAGIC) && (epapr_magic != EPAPR_SMAGIC)) fatal("r6 contained 0x%08x instead of ePAPR magic number\n", epapr_magic); if (ima_size < (unsigned long)_end) printf("WARNING: Image loaded outside IMA!" " (_end=%p, ima_size=0x%lx)\n", _end, ima_size); if (ima_size < fdt_addr) printf("WARNING: Device tree address is outside IMA!" "(fdt_addr=0x%lx, ima_size=0x%lx)\n", fdt_addr, ima_size); if (ima_size < fdt_addr + fdt_totalsize((void *)fdt_addr)) printf("WARNING: Device tree extends outside IMA!" " (fdt_addr=0x%lx, size=0x%x, ima_size=0x%lx\n", fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { epapr_magic = r6; ima_size = r7; fdt_addr = r3; /* FIXME: we should process reserve entries */ simple_alloc_init(_end, ima_size - (unsigned long)_end, 32, 64); fdt_init((void *)fdt_addr); serial_console_init(); platform_ops.fixups = platform_fixups; }
gpl-2.0
draekko/huawei-kernel-3.4
arch/avr32/oprofile/backtrace.c
13689
2074
/* * AVR32 specific backtracing code for oprofile * * Copyright 2008 Weinmann GmbH * * Author: Nikolaus Voss <n.voss@weinmann.de> * * Based on i386 oprofile backtrace code by John Levon and David Smith * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/oprofile.h> #include <linux/sched.h> #include <linux/uaccess.h> /* The first two words of each frame on the stack look like this if we have * frame pointers */ struct frame_head { unsigned long lr; struct frame_head *fp; }; /* copied from arch/avr32/kernel/process.c */ static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p) { return (p > (unsigned long)tinfo) && (p < (unsigned long)tinfo + THREAD_SIZE - 3); } /* copied from arch/x86/oprofile/backtrace.c */ static struct frame_head *dump_user_backtrace(struct frame_head *head) { struct frame_head bufhead[2]; /* Also check accessibility of one struct frame_head beyond */ if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) return NULL; if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) return NULL; oprofile_add_trace(bufhead[0].lr); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ if (bufhead[0].fp <= head) return NULL; return bufhead[0].fp; } void avr32_backtrace(struct pt_regs * const regs, unsigned int depth) { /* Get first frame pointer */ struct frame_head *head = (struct frame_head *)(regs->r7); if (!user_mode(regs)) { #ifdef CONFIG_FRAME_POINTER /* * Traverse the kernel stack from frame to frame up to * "depth" steps. */ while (depth-- && valid_stack_ptr(task_thread_info(current), (unsigned long)head)) { oprofile_add_trace(head->lr); if (head->fp <= head) break; head = head->fp; } #endif } else { /* Assume we have frame pointers in user mode process */ while (depth-- && head) head = dump_user_backtrace(head); } }
gpl-2.0
Tepira/linux-sunxi
modules/wifi/bcm40181/5.90.125.69.2/open-src/src/dhd/sys/dhd_linux_mon.c
122
10672
/* * Broadcom Dongle Host Driver (DHD), Linux monitor network interface * * Copyright (C) 1999-2011, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: dhd_linux_mon.c 278714 2011-08-19 19:25:22Z $ */ #include <linux/string.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/ieee80211.h> #include <linux/rtnetlink.h> #include <net/ieee80211_radiotap.h> #include <wlioctl.h> #include <bcmutils.h> #include <linux_osl.h> #include <dhd_dbg.h> #include <dngl_stats.h> #include <dhd.h> typedef enum monitor_states { MONITOR_STATE_DEINIT = 0x0, MONITOR_STATE_INIT = 0x1, MONITOR_STATE_INTERFACE_ADDED = 0x2, MONITOR_STATE_INTERFACE_DELETED = 0x4 } monitor_states_t; extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); /** * Local declarations and defintions (not exposed) */ #define MON_PRINT(format, ...) printf("DHD-MON: %s " format, __func__, ##__VA_ARGS__) #define MON_TRACE MON_PRINT typedef struct monitor_interface { int radiotap_enabled; struct net_device* real_ndev; /* The real interface that the monitor is on */ struct net_device* mon_ndev; } monitor_interface; typedef struct dhd_linux_monitor { void *dhd_pub; monitor_states_t monitor_state; monitor_interface mon_if[DHD_MAX_IFS]; struct mutex lock; /* lock to protect mon_if */ } dhd_linux_monitor_t; static dhd_linux_monitor_t g_monitor; static struct net_device* lookup_real_netdev(char *name); static monitor_interface* ndev_to_monif(struct net_device *ndev); static int dhd_mon_if_open(struct net_device *ndev); static int dhd_mon_if_stop(struct net_device *ndev); static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev); static void dhd_mon_if_set_multicast_list(struct net_device *ndev); static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr); static const struct net_device_ops dhd_mon_if_ops = { .ndo_open = dhd_mon_if_open, .ndo_stop = dhd_mon_if_stop, .ndo_start_xmit = dhd_mon_if_subif_start_xmit, .ndo_set_rx_mode = dhd_mon_if_set_multicast_list, .ndo_set_mac_address = dhd_mon_if_change_mac, }; /** * Local static function defintions */ /* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0" * "p2p-eth0-0" is a match for "mon.p2p-eth0-0") */ static struct net_device* lookup_real_netdev(char *name) { int i; int last_name_len = 0; struct net_device *ndev; struct net_device *ndev_found = NULL; /* We want to find interface "p2p-eth0-0" for monitor interface "mon.p2p-eth0-0", so * we skip "eth0" even if "mon.p2p-eth0-0" contains "eth0" */ for (i = 0; i < DHD_MAX_IFS; i++) { ndev = dhd_idx2net(g_monitor.dhd_pub, i); if (ndev && strstr(name, ndev->name)) { if (strlen(ndev->name) > last_name_len) { ndev_found = ndev; last_name_len = strlen(ndev->name); } } } return ndev_found; } static monitor_interface* ndev_to_monif(struct net_device *ndev) { int i; for (i = 0; i < DHD_MAX_IFS; i++) { if (g_monitor.mon_if[i].mon_ndev == ndev) return &g_monitor.mon_if[i]; } return NULL; } static int dhd_mon_if_open(struct net_device *ndev) { int ret = 0; MON_PRINT("enter\n"); return ret; } static int dhd_mon_if_stop(struct net_device *ndev) { int ret = 0; MON_PRINT("enter\n"); return ret; } static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev) { int ret = 0; int rtap_len; int qos_len = 0; int dot11_hdr_len = 24; int snap_len = 6; unsigned char *pdata; unsigned short frame_ctl; unsigned char src_mac_addr[6]; unsigned char dst_mac_addr[6]; struct ieee80211_hdr *dot11_hdr; struct ieee80211_radiotap_header *rtap_hdr; monitor_interface* mon_if; MON_PRINT("enter\n"); mon_if = ndev_to_monif(ndev); if (mon_if == NULL || mon_if->real_ndev == NULL) { MON_PRINT(" cannot find matched net dev, skip the packet\n"); goto fail; } if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) goto fail; rtap_hdr = (struct ieee80211_radiotap_header *)skb->data; if (unlikely(rtap_hdr->it_version)) goto fail; rtap_len = ieee80211_get_radiotap_len(skb->data); if (unlikely(skb->len < rtap_len)) goto fail; MON_PRINT("radiotap len (should be 14): %d\n", rtap_len); /* Skip the ratio tap header */ skb_pull(skb, rtap_len); dot11_hdr = (struct ieee80211_hdr *)skb->data; frame_ctl = le16_to_cpu(dot11_hdr->frame_control); /* Check if the QoS bit is set */ if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { /* Check if this ia a Wireless Distribution System (WDS) frame * which has 4 MAC addresses */ if (dot11_hdr->frame_control & 0x0080) qos_len = 2; if ((dot11_hdr->frame_control & 0x0300) == 0x0300) dot11_hdr_len += 6; memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr)); memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr)); /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for * for two MAC addresses */ skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2); pdata = (unsigned char*)skb->data; memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr)); memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr)); MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); /* Use the real net device to transmit the packet */ ret = dhd_start_xmit(skb, mon_if->real_ndev); return ret; } fail: dev_kfree_skb(skb); return 0; } static void dhd_mon_if_set_multicast_list(struct net_device *ndev) { monitor_interface* mon_if; mon_if = ndev_to_monif(ndev); if (mon_if == NULL || mon_if->real_ndev == NULL) { MON_PRINT(" cannot find matched net dev, skip the packet\n"); } MON_PRINT("enter, if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); } static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr) { int ret = 0; monitor_interface* mon_if; mon_if = ndev_to_monif(ndev); if (mon_if == NULL || mon_if->real_ndev == NULL) { MON_PRINT(" cannot find matched net dev, skip the packet\n"); } MON_PRINT("enter, if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); return ret; } /** * Global function definitions (declared in dhd_linux_mon.h) */ int dhd_add_monitor(char *name, struct net_device **new_ndev) { int i; int idx = -1; int ret = 0; struct net_device* ndev = NULL; dhd_linux_monitor_t **dhd_mon; mutex_lock(&g_monitor.lock); MON_TRACE("enter, if name: %s\n", name); if (!name || !new_ndev) { MON_PRINT("invalid parameters\n"); ret = -EINVAL; goto out; } /* * Find a vacancy */ for (i = 0; i < DHD_MAX_IFS; i++) if (g_monitor.mon_if[i].mon_ndev == NULL) { idx = i; break; } if (idx == -1) { MON_PRINT("exceeds maximum interfaces\n"); ret = -EFAULT; goto out; } ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*)); if (!ndev) { MON_PRINT("failed to allocate memory\n"); ret = -ENOMEM; goto out; } ndev->type = ARPHRD_IEEE80211_RADIOTAP; strncpy(ndev->name, name, IFNAMSIZ); ndev->name[IFNAMSIZ - 1] = 0; ndev->netdev_ops = &dhd_mon_if_ops; ret = register_netdevice(ndev); if (ret) { MON_PRINT(" register_netdevice failed (%d)\n", ret); goto out; } *new_ndev = ndev; g_monitor.mon_if[idx].radiotap_enabled = TRUE; g_monitor.mon_if[idx].mon_ndev = ndev; g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name); dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev); *dhd_mon = &g_monitor; g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED; MON_PRINT("net device returned: 0x%p\n", ndev); MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name); out: if (ret && ndev) free_netdev(ndev); mutex_unlock(&g_monitor.lock); return ret; } int dhd_del_monitor(struct net_device *ndev) { int i; bool rollback_lock = false; if (!ndev) return -EINVAL; mutex_lock(&g_monitor.lock); for (i = 0; i < DHD_MAX_IFS; i++) { if (g_monitor.mon_if[i].mon_ndev == ndev || g_monitor.mon_if[i].real_ndev == ndev) { g_monitor.mon_if[i].real_ndev = NULL; if (rtnl_is_locked()) { rtnl_unlock(); rollback_lock = true; } unregister_netdev(g_monitor.mon_if[i].mon_ndev); free_netdev(g_monitor.mon_if[i].mon_ndev); g_monitor.mon_if[i].mon_ndev = NULL; g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED; break; } } if (rollback_lock) { rtnl_lock(); rollback_lock = false; } if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED) MON_PRINT("interface not found in monitor IF array, is this a monitor IF? 0x%p\n", ndev); mutex_unlock(&g_monitor.lock); return 0; } int dhd_monitor_init(void *dhd_pub) { if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) { g_monitor.dhd_pub = dhd_pub; mutex_init(&g_monitor.lock); g_monitor.monitor_state = MONITOR_STATE_INIT; } return 0; } int dhd_monitor_uninit(void) { int i; struct net_device *ndev; bool rollback_lock = false; mutex_lock(&g_monitor.lock); if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) { for (i = 0; i < DHD_MAX_IFS; i++) { ndev = g_monitor.mon_if[i].mon_ndev; if (ndev) { if (rtnl_is_locked()) { rtnl_unlock(); rollback_lock = true; } unregister_netdev(ndev); free_netdev(ndev); g_monitor.mon_if[i].real_ndev = NULL; g_monitor.mon_if[i].mon_ndev = NULL; if (rollback_lock) { rtnl_lock(); rollback_lock = false; } } } g_monitor.monitor_state = MONITOR_STATE_DEINIT; } mutex_unlock(&g_monitor.lock); return 0; }
gpl-2.0
erikcas/android_kernel_samsung_msm8916-caf
sound/usb/pcm.c
1146
43677
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/slab.h> #include <linux/bitrev.h> #include <linux/ratelimit.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "usbaudio.h" #include "card.h" #include "quirks.h" #include "debug.h" #include "endpoint.h" #include "helper.h" #include "pcm.h" #include "clock.h" #include "power.h" #define SUBSTREAM_FLAG_DATA_EP_STARTED 0 #define SUBSTREAM_FLAG_SYNC_EP_STARTED 1 /* return the estimated delay based on USB frame counters */ snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs, unsigned int rate) { int current_frame_number; int frame_diff; int est_delay; if (!subs->last_delay) return 0; /* short path */ current_frame_number = usb_get_current_frame_number(subs->dev); /* * HCD implementations use different widths, use lower 8 bits. * The delay will be managed up to 256ms, which is more than * enough */ frame_diff = (current_frame_number - subs->last_frame_number) & 0xff; /* Approximation based on number of samples per USB frame (ms), some truncation for 44.1 but the estimate is good enough */ est_delay = frame_diff * rate / 1000; if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) est_delay = subs->last_delay - est_delay; else est_delay = subs->last_delay + est_delay; if (est_delay < 0) est_delay = 0; return est_delay; } /* * return the current pcm pointer. just based on the hwptr_done value. */ static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_usb_substream *subs; unsigned int hwptr_done; subs = (struct snd_usb_substream *)substream->runtime->private_data; if (subs->stream->chip->shutdown) return SNDRV_PCM_POS_XRUN; spin_lock(&subs->lock); hwptr_done = subs->hwptr_done; substream->runtime->delay = snd_usb_pcm_delay(subs, substream->runtime->rate); spin_unlock(&subs->lock); return hwptr_done / (substream->runtime->frame_bits >> 3); } /* * find a matching audio format */ static struct audioformat *find_format(struct snd_usb_substream *subs) { struct audioformat *fp; struct audioformat *found = NULL; int cur_attr = 0, attr; list_for_each_entry(fp, &subs->fmt_list, list) { if (!(fp->formats & pcm_format_to_bits(subs->pcm_format))) continue; if (fp->channels != subs->channels) continue; if (subs->cur_rate < fp->rate_min || subs->cur_rate > fp->rate_max) continue; if (! (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)) { unsigned int i; for (i = 0; i < fp->nr_rates; i++) if (fp->rate_table[i] == subs->cur_rate) break; if (i >= fp->nr_rates) continue; } attr = fp->ep_attr & USB_ENDPOINT_SYNCTYPE; if (! found) { found = fp; cur_attr = attr; continue; } /* avoid async out and adaptive in if the other method * supports the same format. * this is a workaround for the case like * M-audio audiophile USB. */ if (attr != cur_attr) { if ((attr == USB_ENDPOINT_SYNC_ASYNC && subs->direction == SNDRV_PCM_STREAM_PLAYBACK) || (attr == USB_ENDPOINT_SYNC_ADAPTIVE && subs->direction == SNDRV_PCM_STREAM_CAPTURE)) continue; if ((cur_attr == USB_ENDPOINT_SYNC_ASYNC && subs->direction == SNDRV_PCM_STREAM_PLAYBACK) || (cur_attr == USB_ENDPOINT_SYNC_ADAPTIVE && subs->direction == SNDRV_PCM_STREAM_CAPTURE)) { found = fp; cur_attr = attr; continue; } } /* find the format with the largest max. packet size */ if (fp->maxpacksize > found->maxpacksize) { found = fp; cur_attr = attr; } } return found; } static int init_pitch_v1(struct snd_usb_audio *chip, int iface, struct usb_host_interface *alts, struct audioformat *fmt) { struct usb_device *dev = chip->dev; unsigned int ep; unsigned char data[1]; int err; ep = get_endpoint(alts, 0)->bEndpointAddress; data[0] = 1; if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR, USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT, UAC_EP_CS_ATTR_PITCH_CONTROL << 8, ep, data, sizeof(data))) < 0) { snd_printk(KERN_ERR "%d:%d:%d: cannot set enable PITCH\n", dev->devnum, iface, ep); return err; } return 0; } static int init_pitch_v2(struct snd_usb_audio *chip, int iface, struct usb_host_interface *alts, struct audioformat *fmt) { struct usb_device *dev = chip->dev; unsigned char data[1]; int err; data[0] = 1; if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR, USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT, UAC2_EP_CS_PITCH << 8, 0, data, sizeof(data))) < 0) { snd_printk(KERN_ERR "%d:%d:%d: cannot set enable PITCH (v2)\n", dev->devnum, iface, fmt->altsetting); return err; } return 0; } /* * initialize the pitch control and sample rate */ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, struct usb_host_interface *alts, struct audioformat *fmt) { struct usb_interface_descriptor *altsd = get_iface_desc(alts); /* if endpoint doesn't have pitch control, bail out */ if (!(fmt->attributes & UAC_EP_CS_ATTR_PITCH_CONTROL)) return 0; switch (altsd->bInterfaceProtocol) { case UAC_VERSION_1: default: return init_pitch_v1(chip, iface, alts, fmt); case UAC_VERSION_2: return init_pitch_v2(chip, iface, alts, fmt); } } static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep) { int err; if (!subs->data_endpoint) return -EINVAL; if (!test_and_set_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags)) { struct snd_usb_endpoint *ep = subs->data_endpoint; snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep); ep->data_subs = subs; err = snd_usb_endpoint_start(ep, can_sleep); if (err < 0) { clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); return err; } } if (subs->sync_endpoint && !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) { struct snd_usb_endpoint *ep = subs->sync_endpoint; if (subs->data_endpoint->iface != subs->sync_endpoint->iface || subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) { err = usb_set_interface(subs->dev, subs->sync_endpoint->iface, subs->sync_endpoint->alt_idx); if (err < 0) { snd_printk(KERN_ERR "%d:%d:%d: cannot set interface (%d)\n", subs->dev->devnum, subs->sync_endpoint->iface, subs->sync_endpoint->alt_idx, err); return -EIO; } } snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep); ep->sync_slave = subs->data_endpoint; err = snd_usb_endpoint_start(ep, can_sleep); if (err < 0) { clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); return err; } } return 0; } static void stop_endpoints(struct snd_usb_substream *subs, bool wait) { if (test_and_clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) snd_usb_endpoint_stop(subs->sync_endpoint); if (test_and_clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags)) snd_usb_endpoint_stop(subs->data_endpoint); if (wait) { snd_usb_endpoint_sync_pending_stop(subs->sync_endpoint); snd_usb_endpoint_sync_pending_stop(subs->data_endpoint); } } static int deactivate_endpoints(struct snd_usb_substream *subs) { int reta, retb; reta = snd_usb_endpoint_deactivate(subs->sync_endpoint); retb = snd_usb_endpoint_deactivate(subs->data_endpoint); if (reta < 0) return reta; if (retb < 0) return retb; return 0; } /* * find a matching format and set up the interface */ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) { struct usb_device *dev = subs->dev; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct usb_interface *iface; unsigned int ep, attr; int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK; int err, implicit_fb = 0; iface = usb_ifnum_to_if(dev, fmt->iface); if (WARN_ON(!iface)) return -EINVAL; alts = &iface->altsetting[fmt->altset_idx]; altsd = get_iface_desc(alts); if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting)) return -EINVAL; if (fmt == subs->cur_audiofmt) return 0; /* close the old interface */ if (subs->interface >= 0 && subs->interface != fmt->iface) { err = usb_set_interface(subs->dev, subs->interface, 0); if (err < 0) { snd_printk(KERN_ERR "%d:%d:%d: return to setting 0 failed (%d)\n", dev->devnum, fmt->iface, fmt->altsetting, err); return -EIO; } subs->interface = -1; subs->altset_idx = 0; } /* set interface */ if (subs->interface != fmt->iface || subs->altset_idx != fmt->altset_idx) { err = usb_set_interface(dev, fmt->iface, fmt->altsetting); if (err < 0) { snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed (%d)\n", dev->devnum, fmt->iface, fmt->altsetting, err); return -EIO; } snd_printdd(KERN_INFO "setting usb interface %d:%d\n", fmt->iface, fmt->altsetting); subs->interface = fmt->iface; subs->altset_idx = fmt->altset_idx; snd_usb_set_interface_quirk(dev); } subs->data_endpoint = snd_usb_add_endpoint(subs->stream->chip, alts, fmt->endpoint, subs->direction, SND_USB_ENDPOINT_TYPE_DATA); if (!subs->data_endpoint) return -EINVAL; /* we need a sync pipe in async OUT or adaptive IN mode */ /* check the number of EP, since some devices have broken * descriptors which fool us. if it has only one EP, * assume it as adaptive-out or sync-in. */ attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE; switch (subs->stream->chip->usb_id) { case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */ case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */ if (is_playback) { implicit_fb = 1; ep = 0x81; iface = usb_ifnum_to_if(dev, 3); if (!iface || iface->num_altsetting == 0) return -EINVAL; alts = &iface->altsetting[1]; goto add_sync_ep; } break; case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */ case USB_ID(0x0763, 0x2081): if (is_playback) { implicit_fb = 1; ep = 0x81; iface = usb_ifnum_to_if(dev, 2); if (!iface || iface->num_altsetting == 0) return -EINVAL; alts = &iface->altsetting[1]; goto add_sync_ep; } } if (((is_playback && attr == USB_ENDPOINT_SYNC_ASYNC) || (!is_playback && attr == USB_ENDPOINT_SYNC_ADAPTIVE)) && altsd->bNumEndpoints >= 2) { /* check sync-pipe endpoint */ /* ... and check descriptor size before accessing bSynchAddress because there is a version of the SB Audigy 2 NX firmware lacking the audio fields in the endpoint descriptors */ if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC || (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && get_endpoint(alts, 1)->bSynchAddress != 0 && !implicit_fb)) { snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n", dev->devnum, fmt->iface, fmt->altsetting, get_endpoint(alts, 1)->bmAttributes, get_endpoint(alts, 1)->bLength, get_endpoint(alts, 1)->bSynchAddress); return -EINVAL; } ep = get_endpoint(alts, 1)->bEndpointAddress; if (!implicit_fb && get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) || (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) { snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n", dev->devnum, fmt->iface, fmt->altsetting, is_playback, ep, get_endpoint(alts, 0)->bSynchAddress); return -EINVAL; } implicit_fb = (get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_USAGE_MASK) == USB_ENDPOINT_USAGE_IMPLICIT_FB; add_sync_ep: subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip, alts, ep, !subs->direction, implicit_fb ? SND_USB_ENDPOINT_TYPE_DATA : SND_USB_ENDPOINT_TYPE_SYNC); if (!subs->sync_endpoint) return -EINVAL; subs->data_endpoint->sync_master = subs->sync_endpoint; } if ((err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt)) < 0) return err; subs->cur_audiofmt = fmt; snd_usb_set_format_quirk(subs, fmt); #if 0 printk(KERN_DEBUG "setting done: format = %d, rate = %d..%d, channels = %d\n", fmt->format, fmt->rate_min, fmt->rate_max, fmt->channels); printk(KERN_DEBUG " datapipe = 0x%0x, syncpipe = 0x%0x\n", subs->datapipe, subs->syncpipe); #endif return 0; } /* * Return the score of matching two audioformats. * Veto the audioformat if: * - It has no channels for some reason. * - Requested PCM format is not supported. * - Requested sample rate is not supported. */ static int match_endpoint_audioformats(struct audioformat *fp, struct audioformat *match, int rate, snd_pcm_format_t pcm_format) { int i; int score = 0; if (fp->channels < 1) { snd_printdd("%s: (fmt @%p) no channels\n", __func__, fp); return 0; } if (!(fp->formats & pcm_format_to_bits(pcm_format))) { snd_printdd("%s: (fmt @%p) no match for format %d\n", __func__, fp, pcm_format); return 0; } for (i = 0; i < fp->nr_rates; i++) { if (fp->rate_table[i] == rate) { score++; break; } } if (!score) { snd_printdd("%s: (fmt @%p) no match for rate %d\n", __func__, fp, rate); return 0; } if (fp->channels == match->channels) score++; snd_printdd("%s: (fmt @%p) score %d\n", __func__, fp, score); return score; } /* * Configure the sync ep using the rate and pcm format of the data ep. */ static int configure_sync_endpoint(struct snd_usb_substream *subs) { int ret; struct audioformat *fp; struct audioformat *sync_fp = NULL; int cur_score = 0; int sync_period_bytes = subs->period_bytes; struct snd_usb_substream *sync_subs = &subs->stream->substream[subs->direction ^ 1]; if (subs->sync_endpoint->type != SND_USB_ENDPOINT_TYPE_DATA || !subs->stream) return snd_usb_endpoint_set_params(subs->sync_endpoint, subs->pcm_format, subs->channels, subs->period_bytes, subs->cur_rate, subs->cur_audiofmt, NULL); /* Try to find the best matching audioformat. */ list_for_each_entry(fp, &sync_subs->fmt_list, list) { int score = match_endpoint_audioformats(fp, subs->cur_audiofmt, subs->cur_rate, subs->pcm_format); if (score > cur_score) { sync_fp = fp; cur_score = score; } } if (unlikely(sync_fp == NULL)) { snd_printk(KERN_ERR "%s: no valid audioformat for sync ep %x found\n", __func__, sync_subs->ep_num); return -EINVAL; } /* * Recalculate the period bytes if channel number differ between * data and sync ep audioformat. */ if (sync_fp->channels != subs->channels) { sync_period_bytes = (subs->period_bytes / subs->channels) * sync_fp->channels; snd_printdd("%s: adjusted sync ep period bytes (%d -> %d)\n", __func__, subs->period_bytes, sync_period_bytes); } ret = snd_usb_endpoint_set_params(subs->sync_endpoint, subs->pcm_format, sync_fp->channels, sync_period_bytes, subs->cur_rate, sync_fp, NULL); return ret; } /* * configure endpoint params * * called during initial setup and upon resume */ static int configure_endpoint(struct snd_usb_substream *subs) { int ret; /* format changed */ stop_endpoints(subs, true); ret = snd_usb_endpoint_set_params(subs->data_endpoint, subs->pcm_format, subs->channels, subs->period_bytes, subs->cur_rate, subs->cur_audiofmt, subs->sync_endpoint); if (ret < 0) return ret; if (subs->sync_endpoint) ret = configure_sync_endpoint(subs); return ret; } /* * hw_params callback * * allocate a buffer and set the given audio format. * * so far we use a physically linear buffer although packetize transfer * doesn't need a continuous area. * if sg buffer is supported on the later version of alsa, we'll follow * that. */ static int snd_usb_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_usb_substream *subs = substream->runtime->private_data; struct audioformat *fmt; int ret; ret = snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); if (ret < 0) return ret; subs->pcm_format = params_format(hw_params); subs->period_bytes = params_period_bytes(hw_params); subs->channels = params_channels(hw_params); subs->cur_rate = params_rate(hw_params); fmt = find_format(subs); if (!fmt) { snd_printd(KERN_DEBUG "cannot set format: format = %#x, rate = %d, channels = %d\n", subs->pcm_format, subs->cur_rate, subs->channels); return -EINVAL; } down_read(&subs->stream->chip->shutdown_rwsem); if (subs->stream->chip->shutdown) ret = -ENODEV; else ret = set_format(subs, fmt); up_read(&subs->stream->chip->shutdown_rwsem); if (ret < 0) return ret; subs->interface = fmt->iface; subs->altset_idx = fmt->altset_idx; subs->need_setup_ep = true; return 0; } /* * hw_free callback * * reset the audio format and release the buffer */ static int snd_usb_hw_free(struct snd_pcm_substream *substream) { struct snd_usb_substream *subs = substream->runtime->private_data; subs->cur_audiofmt = NULL; subs->cur_rate = 0; subs->period_bytes = 0; down_read(&subs->stream->chip->shutdown_rwsem); if (!subs->stream->chip->shutdown) { stop_endpoints(subs, true); deactivate_endpoints(subs); } up_read(&subs->stream->chip->shutdown_rwsem); return snd_pcm_lib_free_vmalloc_buffer(substream); } /* * prepare callback * * only a few subtle things... */ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usb_substream *subs = runtime->private_data; struct usb_host_interface *alts; struct usb_interface *iface; int ret; if (! subs->cur_audiofmt) { snd_printk(KERN_ERR "usbaudio: no format is specified!\n"); return -ENXIO; } down_read(&subs->stream->chip->shutdown_rwsem); if (subs->stream->chip->shutdown) { ret = -ENODEV; goto unlock; } if (snd_BUG_ON(!subs->data_endpoint)) { ret = -EIO; goto unlock; } snd_usb_endpoint_sync_pending_stop(subs->sync_endpoint); snd_usb_endpoint_sync_pending_stop(subs->data_endpoint); ret = set_format(subs, subs->cur_audiofmt); if (ret < 0) goto unlock; iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface); alts = &iface->altsetting[subs->cur_audiofmt->altset_idx]; ret = snd_usb_init_sample_rate(subs->stream->chip, subs->cur_audiofmt->iface, alts, subs->cur_audiofmt, subs->cur_rate); if (ret < 0) goto unlock; if (subs->need_setup_ep) { ret = configure_endpoint(subs); if (ret < 0) goto unlock; subs->need_setup_ep = false; } /* some unit conversions in runtime */ subs->data_endpoint->maxframesize = bytes_to_frames(runtime, subs->data_endpoint->maxpacksize); subs->data_endpoint->curframesize = bytes_to_frames(runtime, subs->data_endpoint->curpacksize); /* reset the pointer */ subs->hwptr_done = 0; subs->transfer_done = 0; subs->last_delay = 0; subs->last_frame_number = 0; runtime->delay = 0; /* for playback, submit the URBs now; otherwise, the first hwptr_done * updates for all URBs would happen at the same time when starting */ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) ret = start_endpoints(subs, true); unlock: up_read(&subs->stream->chip->shutdown_rwsem); return ret; } static struct snd_pcm_hardware snd_usb_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE, .buffer_bytes_max = 1024 * 1024, .period_bytes_min = 64, .period_bytes_max = 512 * 1024, .periods_min = 2, .periods_max = 1024, }; static int hw_check_valid_format(struct snd_usb_substream *subs, struct snd_pcm_hw_params *params, struct audioformat *fp) { struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *ct = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_mask *fmts = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); struct snd_interval *pt = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME); struct snd_mask check_fmts; unsigned int ptime; /* check the format */ snd_mask_none(&check_fmts); check_fmts.bits[0] = (u32)fp->formats; check_fmts.bits[1] = (u32)(fp->formats >> 32); snd_mask_intersect(&check_fmts, fmts); if (snd_mask_empty(&check_fmts)) { hwc_debug(" > check: no supported format %d\n", fp->format); return 0; } /* check the channels */ if (fp->channels < ct->min || fp->channels > ct->max) { hwc_debug(" > check: no valid channels %d (%d/%d)\n", fp->channels, ct->min, ct->max); return 0; } /* check the rate is within the range */ if (fp->rate_min > it->max || (fp->rate_min == it->max && it->openmax)) { hwc_debug(" > check: rate_min %d > max %d\n", fp->rate_min, it->max); return 0; } if (fp->rate_max < it->min || (fp->rate_max == it->min && it->openmin)) { hwc_debug(" > check: rate_max %d < min %d\n", fp->rate_max, it->min); return 0; } /* check whether the period time is >= the data packet interval */ if (subs->speed != USB_SPEED_FULL) { ptime = 125 * (1 << fp->datainterval); if (ptime > pt->max || (ptime == pt->max && pt->openmax)) { hwc_debug(" > check: ptime %u > max %u\n", ptime, pt->max); return 0; } } return 1; } static int hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct audioformat *fp; struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); unsigned int rmin, rmax; int changed; hwc_debug("hw_rule_rate: (%d,%d)\n", it->min, it->max); changed = 0; rmin = rmax = 0; list_for_each_entry(fp, &subs->fmt_list, list) { if (!hw_check_valid_format(subs, params, fp)) continue; if (changed++) { if (rmin > fp->rate_min) rmin = fp->rate_min; if (rmax < fp->rate_max) rmax = fp->rate_max; } else { rmin = fp->rate_min; rmax = fp->rate_max; } } if (!changed) { hwc_debug(" --> get empty\n"); it->empty = 1; return -EINVAL; } changed = 0; if (it->min < rmin) { it->min = rmin; it->openmin = 0; changed = 1; } if (it->max > rmax) { it->max = rmax; it->openmax = 0; changed = 1; } if (snd_interval_checkempty(it)) { it->empty = 1; return -EINVAL; } hwc_debug(" --> (%d, %d) (changed = %d)\n", it->min, it->max, changed); return changed; } static int hw_rule_channels(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct audioformat *fp; struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); unsigned int rmin, rmax; int changed; hwc_debug("hw_rule_channels: (%d,%d)\n", it->min, it->max); changed = 0; rmin = rmax = 0; list_for_each_entry(fp, &subs->fmt_list, list) { if (!hw_check_valid_format(subs, params, fp)) continue; if (changed++) { if (rmin > fp->channels) rmin = fp->channels; if (rmax < fp->channels) rmax = fp->channels; } else { rmin = fp->channels; rmax = fp->channels; } } if (!changed) { hwc_debug(" --> get empty\n"); it->empty = 1; return -EINVAL; } changed = 0; if (it->min < rmin) { it->min = rmin; it->openmin = 0; changed = 1; } if (it->max > rmax) { it->max = rmax; it->openmax = 0; changed = 1; } if (snd_interval_checkempty(it)) { it->empty = 1; return -EINVAL; } hwc_debug(" --> (%d, %d) (changed = %d)\n", it->min, it->max, changed); return changed; } static int hw_rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct audioformat *fp; struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); u64 fbits; u32 oldbits[2]; int changed; hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]); fbits = 0; list_for_each_entry(fp, &subs->fmt_list, list) { if (!hw_check_valid_format(subs, params, fp)) continue; fbits |= fp->formats; } oldbits[0] = fmt->bits[0]; oldbits[1] = fmt->bits[1]; fmt->bits[0] &= (u32)fbits; fmt->bits[1] &= (u32)(fbits >> 32); if (!fmt->bits[0] && !fmt->bits[1]) { hwc_debug(" --> get empty\n"); return -EINVAL; } changed = (oldbits[0] != fmt->bits[0] || oldbits[1] != fmt->bits[1]); hwc_debug(" --> %x:%x (changed = %d)\n", fmt->bits[0], fmt->bits[1], changed); return changed; } static int hw_rule_period_time(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct audioformat *fp; struct snd_interval *it; unsigned char min_datainterval; unsigned int pmin; int changed; it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME); hwc_debug("hw_rule_period_time: (%u,%u)\n", it->min, it->max); min_datainterval = 0xff; list_for_each_entry(fp, &subs->fmt_list, list) { if (!hw_check_valid_format(subs, params, fp)) continue; min_datainterval = min(min_datainterval, fp->datainterval); } if (min_datainterval == 0xff) { hwc_debug(" --> get empty\n"); it->empty = 1; return -EINVAL; } pmin = 125 * (1 << min_datainterval); changed = 0; if (it->min < pmin) { it->min = pmin; it->openmin = 0; changed = 1; } if (snd_interval_checkempty(it)) { it->empty = 1; return -EINVAL; } hwc_debug(" --> (%u,%u) (changed = %d)\n", it->min, it->max, changed); return changed; } /* * If the device supports unusual bit rates, does the request meet these? */ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs) { struct audioformat *fp; int *rate_list; int count = 0, needs_knot = 0; int err; kfree(subs->rate_list.list); subs->rate_list.list = NULL; list_for_each_entry(fp, &subs->fmt_list, list) { if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) return 0; count += fp->nr_rates; if (fp->rates & SNDRV_PCM_RATE_KNOT) needs_knot = 1; } if (!needs_knot) return 0; subs->rate_list.list = rate_list = kmalloc(sizeof(int) * count, GFP_KERNEL); if (!subs->rate_list.list) return -ENOMEM; subs->rate_list.count = count; subs->rate_list.mask = 0; count = 0; list_for_each_entry(fp, &subs->fmt_list, list) { int i; for (i = 0; i < fp->nr_rates; i++) rate_list[count++] = fp->rate_table[i]; } err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &subs->rate_list); if (err < 0) return err; return 0; } /* * set up the runtime hardware information. */ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs) { struct audioformat *fp; unsigned int pt, ptmin; int param_period_time_if_needed; int err; runtime->hw.formats = subs->formats; runtime->hw.rate_min = 0x7fffffff; runtime->hw.rate_max = 0; runtime->hw.channels_min = 256; runtime->hw.channels_max = 0; runtime->hw.rates = 0; ptmin = UINT_MAX; /* check min/max rates and channels */ list_for_each_entry(fp, &subs->fmt_list, list) { runtime->hw.rates |= fp->rates; if (runtime->hw.rate_min > fp->rate_min) runtime->hw.rate_min = fp->rate_min; if (runtime->hw.rate_max < fp->rate_max) runtime->hw.rate_max = fp->rate_max; if (runtime->hw.channels_min > fp->channels) runtime->hw.channels_min = fp->channels; if (runtime->hw.channels_max < fp->channels) runtime->hw.channels_max = fp->channels; if (fp->fmt_type == UAC_FORMAT_TYPE_II && fp->frame_size > 0) { /* FIXME: there might be more than one audio formats... */ runtime->hw.period_bytes_min = runtime->hw.period_bytes_max = fp->frame_size; } pt = 125 * (1 << fp->datainterval); ptmin = min(ptmin, pt); } err = snd_usb_autoresume(subs->stream->chip); if (err < 0) return err; param_period_time_if_needed = SNDRV_PCM_HW_PARAM_PERIOD_TIME; if (subs->speed == USB_SPEED_FULL) /* full speed devices have fixed data packet interval */ ptmin = 1000; if (ptmin == 1000) /* if period time doesn't go below 1 ms, no rules needed */ param_period_time_if_needed = -1; snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, ptmin, UINT_MAX); if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, hw_rule_rate, subs, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_CHANNELS, param_period_time_if_needed, -1)) < 0) goto rep_err; if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, hw_rule_channels, subs, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_RATE, param_period_time_if_needed, -1)) < 0) goto rep_err; if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, hw_rule_format, subs, SNDRV_PCM_HW_PARAM_RATE, SNDRV_PCM_HW_PARAM_CHANNELS, param_period_time_if_needed, -1)) < 0) goto rep_err; if (param_period_time_if_needed >= 0) { err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME, hw_rule_period_time, subs, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_CHANNELS, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) goto rep_err; } if ((err = snd_usb_pcm_check_knot(runtime, subs)) < 0) goto rep_err; return 0; rep_err: snd_usb_autosuspend(subs->stream->chip); return err; } static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction) { struct snd_usb_stream *as = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usb_substream *subs = &as->substream[direction]; subs->interface = -1; subs->altset_idx = 0; runtime->hw = snd_usb_hardware; runtime->private_data = subs; subs->pcm_substream = substream; /* runtime PM is also done there */ /* initialize DSD/DOP context */ subs->dsd_dop.byte_idx = 0; subs->dsd_dop.channel = 0; subs->dsd_dop.marker = 1; return setup_hw_info(runtime, subs); } static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction) { struct snd_usb_stream *as = snd_pcm_substream_chip(substream); struct snd_usb_substream *subs = &as->substream[direction]; stop_endpoints(subs, true); if (!as->chip->shutdown && subs->interface >= 0) { usb_set_interface(subs->dev, subs->interface, 0); subs->interface = -1; } subs->pcm_substream = NULL; snd_usb_autosuspend(subs->stream->chip); return 0; } /* Since a URB can handle only a single linear buffer, we must use double * buffering when the data to be transferred overflows the buffer boundary. * To avoid inconsistencies when updating hwptr_done, we use double buffering * for all URBs. */ static void retire_capture_urb(struct snd_usb_substream *subs, struct urb *urb) { struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; unsigned int stride, frames, bytes, oldptr; int i, period_elapsed = 0; unsigned long flags; unsigned char *cp; int current_frame_number; /* read frame number here, update pointer in critical section */ current_frame_number = usb_get_current_frame_number(subs->dev); stride = runtime->frame_bits >> 3; for (i = 0; i < urb->number_of_packets; i++) { cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset + subs->pkt_offset_adj; if (urb->iso_frame_desc[i].status && printk_ratelimit()) { snd_printdd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status); // continue; } bytes = urb->iso_frame_desc[i].actual_length; frames = bytes / stride; if (!subs->txfr_quirk) bytes = frames * stride; if (bytes % (runtime->sample_bits >> 3) != 0) { int oldbytes = bytes; bytes = frames * stride; snd_printdd(KERN_ERR "Corrected urb data len. %d->%d\n", oldbytes, bytes); } /* update the current pointer */ spin_lock_irqsave(&subs->lock, flags); oldptr = subs->hwptr_done; subs->hwptr_done += bytes; if (subs->hwptr_done >= runtime->buffer_size * stride) subs->hwptr_done -= runtime->buffer_size * stride; frames = (bytes + (oldptr % stride)) / stride; subs->transfer_done += frames; if (subs->transfer_done >= runtime->period_size) { subs->transfer_done -= runtime->period_size; period_elapsed = 1; } /* capture delay is by construction limited to one URB, * reset delays here */ runtime->delay = subs->last_delay = 0; /* realign last_frame_number */ subs->last_frame_number = current_frame_number; subs->last_frame_number &= 0xFF; /* keep 8 LSBs */ spin_unlock_irqrestore(&subs->lock, flags); /* copy a data chunk */ if (oldptr + bytes > runtime->buffer_size * stride) { unsigned int bytes1 = runtime->buffer_size * stride - oldptr; memcpy(runtime->dma_area + oldptr, cp, bytes1); memcpy(runtime->dma_area, cp + bytes1, bytes - bytes1); } else { memcpy(runtime->dma_area + oldptr, cp, bytes); } } if (period_elapsed) snd_pcm_period_elapsed(subs->pcm_substream); } static inline void fill_playback_urb_dsd_dop(struct snd_usb_substream *subs, struct urb *urb, unsigned int bytes) { struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; unsigned int stride = runtime->frame_bits >> 3; unsigned int dst_idx = 0; unsigned int src_idx = subs->hwptr_done; unsigned int wrap = runtime->buffer_size * stride; u8 *dst = urb->transfer_buffer; u8 *src = runtime->dma_area; u8 marker[] = { 0x05, 0xfa }; /* * The DSP DOP format defines a way to transport DSD samples over * normal PCM data endpoints. It requires stuffing of marker bytes * (0x05 and 0xfa, alternating per sample frame), and then expects * 2 additional bytes of actual payload. The whole frame is stored * LSB. * * Hence, for a stereo transport, the buffer layout looks like this, * where L refers to left channel samples and R to right. * * L1 L2 0x05 R1 R2 0x05 L3 L4 0xfa R3 R4 0xfa * L5 L6 0x05 R5 R6 0x05 L7 L8 0xfa R7 R8 0xfa * ..... * */ while (bytes--) { if (++subs->dsd_dop.byte_idx == 3) { /* frame boundary? */ dst[dst_idx++] = marker[subs->dsd_dop.marker]; src_idx += 2; subs->dsd_dop.byte_idx = 0; if (++subs->dsd_dop.channel % runtime->channels == 0) { /* alternate the marker */ subs->dsd_dop.marker++; subs->dsd_dop.marker %= ARRAY_SIZE(marker); subs->dsd_dop.channel = 0; } } else { /* stuff the DSD payload */ int idx = (src_idx + subs->dsd_dop.byte_idx - 1) % wrap; if (subs->cur_audiofmt->dsd_bitrev) dst[dst_idx++] = bitrev8(src[idx]); else dst[dst_idx++] = src[idx]; subs->hwptr_done++; } } } static void prepare_playback_urb(struct snd_usb_substream *subs, struct urb *urb) { struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; struct snd_usb_endpoint *ep = subs->data_endpoint; struct snd_urb_ctx *ctx = urb->context; unsigned int counts, frames, bytes; int i, stride, period_elapsed = 0; unsigned long flags; stride = runtime->frame_bits >> 3; frames = 0; urb->number_of_packets = 0; spin_lock_irqsave(&subs->lock, flags); for (i = 0; i < ctx->packets; i++) { if (ctx->packet_size[i]) counts = ctx->packet_size[i]; else counts = snd_usb_endpoint_next_packet_size(ep); /* set up descriptor */ urb->iso_frame_desc[i].offset = frames * ep->stride; urb->iso_frame_desc[i].length = counts * ep->stride; frames += counts; urb->number_of_packets++; subs->transfer_done += counts; if (subs->transfer_done >= runtime->period_size) { subs->transfer_done -= runtime->period_size; period_elapsed = 1; if (subs->fmt_type == UAC_FORMAT_TYPE_II) { if (subs->transfer_done > 0) { /* FIXME: fill-max mode is not * supported yet */ frames -= subs->transfer_done; counts -= subs->transfer_done; urb->iso_frame_desc[i].length = counts * ep->stride; subs->transfer_done = 0; } i++; if (i < ctx->packets) { /* add a transfer delimiter */ urb->iso_frame_desc[i].offset = frames * ep->stride; urb->iso_frame_desc[i].length = 0; urb->number_of_packets++; } break; } } if (period_elapsed && !snd_usb_endpoint_implicit_feedback_sink(subs->data_endpoint)) /* finish at the period boundary */ break; } bytes = frames * ep->stride; if (unlikely(subs->pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && subs->cur_audiofmt->dsd_dop)) { fill_playback_urb_dsd_dop(subs, urb, bytes); } else if (unlikely(subs->pcm_format == SNDRV_PCM_FORMAT_DSD_U8 && subs->cur_audiofmt->dsd_bitrev)) { /* bit-reverse the bytes */ u8 *buf = urb->transfer_buffer; for (i = 0; i < bytes; i++) { int idx = (subs->hwptr_done + i) % (runtime->buffer_size * stride); buf[i] = bitrev8(runtime->dma_area[idx]); } subs->hwptr_done += bytes; } else { /* usual PCM */ if (subs->hwptr_done + bytes > runtime->buffer_size * stride) { /* err, the transferred area goes over buffer boundary. */ unsigned int bytes1 = runtime->buffer_size * stride - subs->hwptr_done; memcpy(urb->transfer_buffer, runtime->dma_area + subs->hwptr_done, bytes1); memcpy(urb->transfer_buffer + bytes1, runtime->dma_area, bytes - bytes1); } else { memcpy(urb->transfer_buffer, runtime->dma_area + subs->hwptr_done, bytes); } subs->hwptr_done += bytes; } if (subs->hwptr_done >= runtime->buffer_size * stride) subs->hwptr_done -= runtime->buffer_size * stride; /* update delay with exact number of samples queued */ runtime->delay = subs->last_delay; runtime->delay += frames; subs->last_delay = runtime->delay; /* realign last_frame_number */ subs->last_frame_number = usb_get_current_frame_number(subs->dev); subs->last_frame_number &= 0xFF; /* keep 8 LSBs */ spin_unlock_irqrestore(&subs->lock, flags); urb->transfer_buffer_length = bytes; if (period_elapsed) snd_pcm_period_elapsed(subs->pcm_substream); } /* * process after playback data complete * - decrease the delay count again */ static void retire_playback_urb(struct snd_usb_substream *subs, struct urb *urb) { unsigned long flags; struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; struct snd_usb_endpoint *ep = subs->data_endpoint; int processed = urb->transfer_buffer_length / ep->stride; int est_delay; /* ignore the delay accounting when procssed=0 is given, i.e. * silent payloads are procssed before handling the actual data */ if (!processed) return; spin_lock_irqsave(&subs->lock, flags); if (!subs->last_delay) goto out; /* short path */ est_delay = snd_usb_pcm_delay(subs, runtime->rate); /* update delay with exact number of samples played */ if (processed > subs->last_delay) subs->last_delay = 0; else subs->last_delay -= processed; runtime->delay = subs->last_delay; /* * Report when delay estimate is off by more than 2ms. * The error should be lower than 2ms since the estimate relies * on two reads of a counter updated every ms. */ if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2) dev_dbg_ratelimited(&subs->dev->dev, "delay: estimated %d, actual %d\n", est_delay, subs->last_delay); if (!subs->running) { /* update last_frame_number for delay counting here since * prepare_playback_urb won't be called during pause */ subs->last_frame_number = usb_get_current_frame_number(subs->dev) & 0xff; } out: spin_unlock_irqrestore(&subs->lock, flags); } static int snd_usb_playback_open(struct snd_pcm_substream *substream) { return snd_usb_pcm_open(substream, SNDRV_PCM_STREAM_PLAYBACK); } static int snd_usb_playback_close(struct snd_pcm_substream *substream) { return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_PLAYBACK); } static int snd_usb_capture_open(struct snd_pcm_substream *substream) { return snd_usb_pcm_open(substream, SNDRV_PCM_STREAM_CAPTURE); } static int snd_usb_capture_close(struct snd_pcm_substream *substream) { return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_CAPTURE); } static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_usb_substream *subs = substream->runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: subs->data_endpoint->prepare_data_urb = prepare_playback_urb; subs->data_endpoint->retire_data_urb = retire_playback_urb; subs->running = 1; return 0; case SNDRV_PCM_TRIGGER_STOP: stop_endpoints(subs, false); subs->running = 0; return 0; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: subs->data_endpoint->prepare_data_urb = NULL; /* keep retire_data_urb for delay calculation */ subs->data_endpoint->retire_data_urb = retire_playback_urb; subs->running = 0; return 0; } return -EINVAL; } static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd) { int err; struct snd_usb_substream *subs = substream->runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: err = start_endpoints(subs, false); if (err < 0) return err; subs->data_endpoint->retire_data_urb = retire_capture_urb; subs->running = 1; return 0; case SNDRV_PCM_TRIGGER_STOP: stop_endpoints(subs, false); subs->running = 0; return 0; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: subs->data_endpoint->retire_data_urb = NULL; subs->running = 0; return 0; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: subs->data_endpoint->retire_data_urb = retire_capture_urb; subs->running = 1; return 0; } return -EINVAL; } static struct snd_pcm_ops snd_usb_playback_ops = { .open = snd_usb_playback_open, .close = snd_usb_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_usb_hw_params, .hw_free = snd_usb_hw_free, .prepare = snd_usb_pcm_prepare, .trigger = snd_usb_substream_playback_trigger, .pointer = snd_usb_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; static struct snd_pcm_ops snd_usb_capture_ops = { .open = snd_usb_capture_open, .close = snd_usb_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_usb_hw_params, .hw_free = snd_usb_hw_free, .prepare = snd_usb_pcm_prepare, .trigger = snd_usb_substream_capture_trigger, .pointer = snd_usb_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream) { snd_pcm_set_ops(pcm, stream, stream == SNDRV_PCM_STREAM_PLAYBACK ? &snd_usb_playback_ops : &snd_usb_capture_ops); }
gpl-2.0
Global-KANGs/droid2we_kernel
sound/core/seq/seq_dummy.c
1914
6755
/* * ALSA sequencer MIDI-through client * Copyright (c) 1999-2000 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <sound/core.h> #include "seq_clientmgr.h" #include <sound/initval.h> #include <sound/asoundef.h> /* Sequencer MIDI-through client This gives a simple midi-through client. All the normal input events are redirected to output port immediately. The routing can be done via aconnect program in alsa-utils. Each client has a static client number 62 (= SNDRV_SEQ_CLIENT_DUMMY). If you want to auto-load this module, you may add the following alias in your /etc/conf.modules file. alias snd-seq-client-62 snd-seq-dummy The module is loaded on demand for client 62, or /proc/asound/seq/ is accessed. If you don't need this module to be loaded, alias snd-seq-client-62 as "off". This will help modprobe. The number of ports to be created can be specified via the module parameter "ports". For example, to create four ports, add the following option in /etc/modprobe.conf: option snd-seq-dummy ports=4 The modle option "duplex=1" enables duplex operation to the port. In duplex mode, a pair of ports are created instead of single port, and events are tunneled between pair-ports. For example, input to port A is sent to output port of another port B and vice versa. In duplex mode, each port has DUPLEX capability. */ MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA sequencer MIDI-through client"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-seq-client-" __stringify(SNDRV_SEQ_CLIENT_DUMMY)); static int ports = 1; static int duplex; module_param(ports, int, 0444); MODULE_PARM_DESC(ports, "number of ports to be created"); module_param(duplex, bool, 0444); MODULE_PARM_DESC(duplex, "create DUPLEX ports"); struct snd_seq_dummy_port { int client; int port; int duplex; int connect; }; static int my_client = -1; /* * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events * to subscribers. * Note: this callback is called only after all subscribers are removed. */ static int dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info) { struct snd_seq_dummy_port *p; int i; struct snd_seq_event ev; p = private_data; memset(&ev, 0, sizeof(ev)); if (p->duplex) ev.source.port = p->connect; else ev.source.port = p->port; ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; ev.type = SNDRV_SEQ_EVENT_CONTROLLER; for (i = 0; i < 16; i++) { ev.data.control.channel = i; ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF; snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS; snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); } return 0; } /* * event input callback - just redirect events to subscribers */ static int dummy_input(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop) { struct snd_seq_dummy_port *p; struct snd_seq_event tmpev; p = private_data; if (ev->source.client == SNDRV_SEQ_CLIENT_SYSTEM || ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR) return 0; /* ignore system messages */ tmpev = *ev; if (p->duplex) tmpev.source.port = p->connect; else tmpev.source.port = p->port; tmpev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; return snd_seq_kernel_client_dispatch(p->client, &tmpev, atomic, hop); } /* * free_private callback */ static void dummy_free(void *private_data) { kfree(private_data); } /* * create a port */ static struct snd_seq_dummy_port __init * create_port(int idx, int type) { struct snd_seq_port_info pinfo; struct snd_seq_port_callback pcb; struct snd_seq_dummy_port *rec; if ((rec = kzalloc(sizeof(*rec), GFP_KERNEL)) == NULL) return NULL; rec->client = my_client; rec->duplex = duplex; rec->connect = 0; memset(&pinfo, 0, sizeof(pinfo)); pinfo.addr.client = my_client; if (duplex) sprintf(pinfo.name, "Midi Through Port-%d:%c", idx, (type ? 'B' : 'A')); else sprintf(pinfo.name, "Midi Through Port-%d", idx); pinfo.capability = SNDRV_SEQ_PORT_CAP_READ | SNDRV_SEQ_PORT_CAP_SUBS_READ; pinfo.capability |= SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE; if (duplex) pinfo.capability |= SNDRV_SEQ_PORT_CAP_DUPLEX; pinfo.type = SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC | SNDRV_SEQ_PORT_TYPE_SOFTWARE | SNDRV_SEQ_PORT_TYPE_PORT; memset(&pcb, 0, sizeof(pcb)); pcb.owner = THIS_MODULE; pcb.unuse = dummy_unuse; pcb.event_input = dummy_input; pcb.private_free = dummy_free; pcb.private_data = rec; pinfo.kernel = &pcb; if (snd_seq_kernel_client_ctl(my_client, SNDRV_SEQ_IOCTL_CREATE_PORT, &pinfo) < 0) { kfree(rec); return NULL; } rec->port = pinfo.addr.port; return rec; } /* * register client and create ports */ static int __init register_client(void) { struct snd_seq_dummy_port *rec1, *rec2; int i; if (ports < 1) { snd_printk(KERN_ERR "invalid number of ports %d\n", ports); return -EINVAL; } /* create client */ my_client = snd_seq_create_kernel_client(NULL, SNDRV_SEQ_CLIENT_DUMMY, "Midi Through"); if (my_client < 0) return my_client; /* create ports */ for (i = 0; i < ports; i++) { rec1 = create_port(i, 0); if (rec1 == NULL) { snd_seq_delete_kernel_client(my_client); return -ENOMEM; } if (duplex) { rec2 = create_port(i, 1); if (rec2 == NULL) { snd_seq_delete_kernel_client(my_client); return -ENOMEM; } rec1->connect = rec2->port; rec2->connect = rec1->port; } } return 0; } /* * delete client if exists */ static void __exit delete_client(void) { if (my_client >= 0) snd_seq_delete_kernel_client(my_client); } /* * Init part */ static int __init alsa_seq_dummy_init(void) { int err; snd_seq_autoload_lock(); err = register_client(); snd_seq_autoload_unlock(); return err; } static void __exit alsa_seq_dummy_exit(void) { delete_client(); } module_init(alsa_seq_dummy_init) module_exit(alsa_seq_dummy_exit)
gpl-2.0
kashifmin/KKernel_yu_msm8916
drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
2170
8607
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/client.h> #include <core/os.h> #include <core/class.h> #include <core/engctx.h> #include <core/handle.h> #include <subdev/fb.h> #include <subdev/timer.h> #include <subdev/instmem.h> #include <engine/fifo.h> #include <engine/mpeg.h> #include <engine/graph/nv40.h> struct nv31_mpeg_priv { struct nouveau_mpeg base; atomic_t refcount; }; struct nv31_mpeg_chan { struct nouveau_object base; }; /******************************************************************************* * MPEG object classes ******************************************************************************/ static int nv31_mpeg_object_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_gpuobj *obj; int ret; ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent, 20, 16, 0, &obj); *pobject = nv_object(obj); if (ret) return ret; nv_wo32(obj, 0x00, nv_mclass(obj)); nv_wo32(obj, 0x04, 0x00000000); nv_wo32(obj, 0x08, 0x00000000); nv_wo32(obj, 0x0c, 0x00000000); return 0; } static int nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len) { struct nouveau_instmem *imem = nouveau_instmem(object); struct nv31_mpeg_priv *priv = (void *)object->engine; u32 inst = *(u32 *)arg << 4; u32 dma0 = nv_ro32(imem, inst + 0); u32 dma1 = nv_ro32(imem, inst + 4); u32 dma2 = nv_ro32(imem, inst + 8); u32 base = (dma2 & 0xfffff000) | (dma0 >> 20); u32 size = dma1 + 1; /* only allow linear DMA objects */ if (!(dma0 & 0x00002000)) return -EINVAL; if (mthd == 0x0190) { /* DMA_CMD */ nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000)); nv_wr32(priv, 0x00b334, base); nv_wr32(priv, 0x00b324, size); } else if (mthd == 0x01a0) { /* DMA_DATA */ nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2); nv_wr32(priv, 0x00b360, base); nv_wr32(priv, 0x00b364, size); } else { /* DMA_IMAGE, VRAM only */ if (dma0 & 0x000c0000) return -EINVAL; nv_wr32(priv, 0x00b370, base); nv_wr32(priv, 0x00b374, size); } return 0; } static struct nouveau_ofuncs nv31_mpeg_ofuncs = { .ctor = nv31_mpeg_object_ctor, .dtor = _nouveau_gpuobj_dtor, .init = _nouveau_gpuobj_init, .fini = _nouveau_gpuobj_fini, .rd32 = _nouveau_gpuobj_rd32, .wr32 = _nouveau_gpuobj_wr32, }; static struct nouveau_omthds nv31_mpeg_omthds[] = { { 0x0190, 0x0190, nv31_mpeg_mthd_dma }, { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma }, { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma }, {} }; struct nouveau_oclass nv31_mpeg_sclass[] = { { 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds }, {} }; /******************************************************************************* * PMPEG context ******************************************************************************/ static int nv31_mpeg_context_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv31_mpeg_priv *priv = (void *)engine; struct nv31_mpeg_chan *chan; int ret; if (!atomic_add_unless(&priv->refcount, 1, 1)) return -EBUSY; ret = nouveau_object_create(parent, engine, oclass, 0, &chan); *pobject = nv_object(chan); if (ret) return ret; return 0; } static void nv31_mpeg_context_dtor(struct nouveau_object *object) { struct nv31_mpeg_priv *priv = (void *)object->engine; struct nv31_mpeg_chan *chan = (void *)object; atomic_dec(&priv->refcount); nouveau_object_destroy(&chan->base); } static struct nouveau_oclass nv31_mpeg_cclass = { .handle = NV_ENGCTX(MPEG, 0x31), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv31_mpeg_context_ctor, .dtor = nv31_mpeg_context_dtor, .init = nouveau_object_init, .fini = nouveau_object_fini, }, }; /******************************************************************************* * PMPEG engine/subdev functions ******************************************************************************/ void nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i) { struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i]; struct nv31_mpeg_priv *priv = (void *)engine; nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch); nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit); nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr); } void nv31_mpeg_intr(struct nouveau_subdev *subdev) { struct nouveau_fifo *pfifo = nouveau_fifo(subdev); struct nouveau_engine *engine = nv_engine(subdev); struct nouveau_object *engctx; struct nouveau_handle *handle; struct nv31_mpeg_priv *priv = (void *)subdev; u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff; u32 stat = nv_rd32(priv, 0x00b100); u32 type = nv_rd32(priv, 0x00b230); u32 mthd = nv_rd32(priv, 0x00b234); u32 data = nv_rd32(priv, 0x00b238); u32 show = stat; int chid; engctx = nouveau_engctx_get(engine, inst); chid = pfifo->chid(pfifo, engctx); if (stat & 0x01000000) { /* happens on initial binding of the object */ if (type == 0x00000020 && mthd == 0x0000) { nv_mask(priv, 0x00b308, 0x00000000, 0x00000000); show &= ~0x01000000; } if (type == 0x00000010) { handle = nouveau_handle_get_class(engctx, 0x3174); if (handle && !nv_call(handle->object, mthd, data)) show &= ~0x01000000; nouveau_handle_put(handle); } } nv_wr32(priv, 0x00b100, stat); nv_wr32(priv, 0x00b230, 0x00000001); if (show) { nv_error(priv, "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n", chid, inst << 4, nouveau_client_name(engctx), stat, type, mthd, data); } nouveau_engctx_put(engctx); } static int nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv31_mpeg_priv *priv; int ret; ret = nouveau_mpeg_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00000002; nv_subdev(priv)->intr = nv31_mpeg_intr; nv_engine(priv)->cclass = &nv31_mpeg_cclass; nv_engine(priv)->sclass = nv31_mpeg_sclass; nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog; return 0; } int nv31_mpeg_init(struct nouveau_object *object) { struct nouveau_engine *engine = nv_engine(object->engine); struct nv31_mpeg_priv *priv = (void *)engine; struct nouveau_fb *pfb = nouveau_fb(object); int ret, i; ret = nouveau_mpeg_init(&priv->base); if (ret) return ret; /* VPE init */ nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ for (i = 0; i < pfb->tile.regions; i++) engine->tile_prog(engine, i); /* PMPEG init */ nv_wr32(priv, 0x00b32c, 0x00000000); nv_wr32(priv, 0x00b314, 0x00000100); nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031); nv_wr32(priv, 0x00b300, 0x02001ec1); nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); nv_wr32(priv, 0x00b100, 0xffffffff); nv_wr32(priv, 0x00b140, 0xffffffff); if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) { nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200)); return -EBUSY; } return 0; } struct nouveau_oclass nv31_mpeg_oclass = { .handle = NV_ENGINE(MPEG, 0x31), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv31_mpeg_ctor, .dtor = _nouveau_mpeg_dtor, .init = nv31_mpeg_init, .fini = _nouveau_mpeg_fini, }, };
gpl-2.0
DooMLoRD/android_kernel_htc_tegra3
arch/powerpc/platforms/85xx/mpc85xx_mds.c
2426
12995
/* * Copyright (C) Freescale Semicondutor, Inc. 2006-2010. All rights reserved. * * Author: Andy Fleming <afleming@freescale.com> * * Based on 83xx/mpc8360e_pb.c by: * Li Yang <LeoLi@freescale.com> * Yin Olivia <Hong-hua.Yin@freescale.com> * * Description: * MPC85xx MDS board specific routines. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/initrd.h> #include <linux/module.h> #include <linux/fsl_devices.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <linux/phy.h> #include <linux/memblock.h> #include <asm/system.h> #include <asm/atomic.h> #include <asm/time.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/irq.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <sysdev/simple_gpio.h> #include <asm/qe.h> #include <asm/qe_ic.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #undef DEBUG #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif #define MV88E1111_SCR 0x10 #define MV88E1111_SCR_125CLK 0x0010 static int mpc8568_fixup_125_clock(struct phy_device *phydev) { int scr; int err; /* Workaround for the 125 CLK Toggle */ scr = phy_read(phydev, MV88E1111_SCR); if (scr < 0) return scr; err = phy_write(phydev, MV88E1111_SCR, scr & ~(MV88E1111_SCR_125CLK)); if (err) return err; err = phy_write(phydev, MII_BMCR, BMCR_RESET); if (err) return err; scr = phy_read(phydev, MV88E1111_SCR); if (scr < 0) return scr; err = phy_write(phydev, MV88E1111_SCR, scr | 0x0008); return err; } static int mpc8568_mds_phy_fixups(struct phy_device *phydev) { int temp; int err; /* Errata */ err = phy_write(phydev,29, 0x0006); if (err) return err; temp = phy_read(phydev, 30); if (temp < 0) return temp; temp = (temp & (~0x8000)) | 0x4000; err = phy_write(phydev,30, temp); if (err) return err; err = phy_write(phydev,29, 0x000a); if (err) return err; temp = phy_read(phydev, 30); if (temp < 0) return temp; temp = phy_read(phydev, 30); if (temp < 0) return temp; temp &= ~0x0020; err = phy_write(phydev,30,temp); if (err) return err; /* Disable automatic MDI/MDIX selection */ temp = phy_read(phydev, 16); if (temp < 0) return temp; temp &= ~0x0060; err = phy_write(phydev,16,temp); return err; } /* ************************************************************************ * * Setup the architecture * */ #ifdef CONFIG_SMP extern void __init mpc85xx_smp_init(void); #endif #ifdef CONFIG_QUICC_ENGINE static struct of_device_id mpc85xx_qe_ids[] __initdata = { { .type = "qe", }, { .compatible = "fsl,qe", }, { }, }; static void __init mpc85xx_publish_qe_devices(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,qe"); if (!of_device_is_available(np)) { of_node_put(np); return; } of_platform_bus_probe(NULL, mpc85xx_qe_ids, NULL); } static void __init mpc85xx_mds_reset_ucc_phys(void) { struct device_node *np; static u8 __iomem *bcsr_regs; /* Map BCSR area */ np = of_find_node_by_name(NULL, "bcsr"); if (!np) return; bcsr_regs = of_iomap(np, 0); of_node_put(np); if (!bcsr_regs) return; if (machine_is(mpc8568_mds)) { #define BCSR_UCC1_GETH_EN (0x1 << 7) #define BCSR_UCC2_GETH_EN (0x1 << 7) #define BCSR_UCC1_MODE_MSK (0x3 << 4) #define BCSR_UCC2_MODE_MSK (0x3 << 0) /* Turn off UCC1 & UCC2 */ clrbits8(&bcsr_regs[8], BCSR_UCC1_GETH_EN); clrbits8(&bcsr_regs[9], BCSR_UCC2_GETH_EN); /* Mode is RGMII, all bits clear */ clrbits8(&bcsr_regs[11], BCSR_UCC1_MODE_MSK | BCSR_UCC2_MODE_MSK); /* Turn UCC1 & UCC2 on */ setbits8(&bcsr_regs[8], BCSR_UCC1_GETH_EN); setbits8(&bcsr_regs[9], BCSR_UCC2_GETH_EN); } else if (machine_is(mpc8569_mds)) { #define BCSR7_UCC12_GETHnRST (0x1 << 2) #define BCSR8_UEM_MARVELL_RST (0x1 << 1) #define BCSR_UCC_RGMII (0x1 << 6) #define BCSR_UCC_RTBI (0x1 << 5) /* * U-Boot mangles interrupt polarity for Marvell PHYs, * so reset built-in and UEM Marvell PHYs, this puts * the PHYs into their normal state. */ clrbits8(&bcsr_regs[7], BCSR7_UCC12_GETHnRST); setbits8(&bcsr_regs[8], BCSR8_UEM_MARVELL_RST); setbits8(&bcsr_regs[7], BCSR7_UCC12_GETHnRST); clrbits8(&bcsr_regs[8], BCSR8_UEM_MARVELL_RST); for (np = NULL; (np = of_find_compatible_node(np, "network", "ucc_geth")) != NULL;) { const unsigned int *prop; int ucc_num; prop = of_get_property(np, "cell-index", NULL); if (prop == NULL) continue; ucc_num = *prop - 1; prop = of_get_property(np, "phy-connection-type", NULL); if (prop == NULL) continue; if (strcmp("rtbi", (const char *)prop) == 0) clrsetbits_8(&bcsr_regs[7 + ucc_num], BCSR_UCC_RGMII, BCSR_UCC_RTBI); } } else if (machine_is(p1021_mds)) { #define BCSR11_ENET_MICRST (0x1 << 5) /* Reset Micrel PHY */ clrbits8(&bcsr_regs[11], BCSR11_ENET_MICRST); setbits8(&bcsr_regs[11], BCSR11_ENET_MICRST); } iounmap(bcsr_regs); } static void __init mpc85xx_mds_qe_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,qe"); if (!np) { np = of_find_node_by_name(NULL, "qe"); if (!np) return; } if (!of_device_is_available(np)) { of_node_put(np); return; } qe_reset(); of_node_put(np); np = of_find_node_by_name(NULL, "par_io"); if (np) { struct device_node *ucc; par_io_init(np); of_node_put(np); for_each_node_by_name(ucc, "ucc") par_io_of_config(ucc); } mpc85xx_mds_reset_ucc_phys(); if (machine_is(p1021_mds)) { #define MPC85xx_PMUXCR_OFFSET 0x60 #define MPC85xx_PMUXCR_QE0 0x00008000 #define MPC85xx_PMUXCR_QE3 0x00001000 #define MPC85xx_PMUXCR_QE9 0x00000040 #define MPC85xx_PMUXCR_QE12 0x00000008 static __be32 __iomem *pmuxcr; np = of_find_node_by_name(NULL, "global-utilities"); if (np) { pmuxcr = of_iomap(np, 0) + MPC85xx_PMUXCR_OFFSET; if (!pmuxcr) printk(KERN_EMERG "Error: Alternate function" " signal multiplex control register not" " mapped!\n"); else /* P1021 has pins muxed for QE and other functions. To * enable QE UEC mode, we need to set bit QE0 for UCC1 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 * and QE12 for QE MII management signals in PMUXCR * register. */ setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 | MPC85xx_PMUXCR_QE3 | MPC85xx_PMUXCR_QE9 | MPC85xx_PMUXCR_QE12); of_node_put(np); } } } static void __init mpc85xx_mds_qeic_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,qe"); if (!of_device_is_available(np)) { of_node_put(np); return; } np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); if (!np) { np = of_find_node_by_type(NULL, "qeic"); if (!np) return; } if (machine_is(p1021_mds)) qe_ic_init(np, 0, qe_ic_cascade_low_mpic, qe_ic_cascade_high_mpic); else qe_ic_init(np, 0, qe_ic_cascade_muxed_mpic, NULL); of_node_put(np); } #else static void __init mpc85xx_publish_qe_devices(void) { } static void __init mpc85xx_mds_qe_init(void) { } static void __init mpc85xx_mds_qeic_init(void) { } #endif /* CONFIG_QUICC_ENGINE */ static void __init mpc85xx_mds_setup_arch(void) { #ifdef CONFIG_PCI struct pci_controller *hose; struct device_node *np; #endif dma_addr_t max = 0xffffffff; if (ppc_md.progress) ppc_md.progress("mpc85xx_mds_setup_arch()", 0); #ifdef CONFIG_PCI for_each_node_by_type(np, "pci") { if (of_device_is_compatible(np, "fsl,mpc8540-pci") || of_device_is_compatible(np, "fsl,mpc8548-pcie")) { struct resource rsrc; of_address_to_resource(np, 0, &rsrc); if ((rsrc.start & 0xfffff) == 0x8000) fsl_add_bridge(np, 1); else fsl_add_bridge(np, 0); hose = pci_find_hose_for_OF_device(np); max = min(max, hose->dma_window_base_cur + hose->dma_window_size); } } #endif #ifdef CONFIG_SMP mpc85xx_smp_init(); #endif mpc85xx_mds_qe_init(); #ifdef CONFIG_SWIOTLB if (memblock_end_of_DRAM() > max) { ppc_swiotlb_enable = 1; set_pci_dma_ops(&swiotlb_dma_ops); ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; } #endif } static int __init board_fixups(void) { char phy_id[20]; char *compstrs[2] = {"fsl,gianfar-mdio", "fsl,ucc-mdio"}; struct device_node *mdio; struct resource res; int i; for (i = 0; i < ARRAY_SIZE(compstrs); i++) { mdio = of_find_compatible_node(NULL, NULL, compstrs[i]); of_address_to_resource(mdio, 0, &res); snprintf(phy_id, sizeof(phy_id), "%llx:%02x", (unsigned long long)res.start, 1); phy_register_fixup_for_id(phy_id, mpc8568_fixup_125_clock); phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups); /* Register a workaround for errata */ snprintf(phy_id, sizeof(phy_id), "%llx:%02x", (unsigned long long)res.start, 7); phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups); of_node_put(mdio); } return 0; } machine_arch_initcall(mpc8568_mds, board_fixups); machine_arch_initcall(mpc8569_mds, board_fixups); static struct of_device_id mpc85xx_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, { .compatible = "gianfar", }, { .compatible = "fsl,rapidio-delta", }, { .compatible = "fsl,mpc8548-guts", }, { .compatible = "gpio-leds", }, {}, }; static struct of_device_id p1021_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, { .compatible = "gianfar", }, {}, }; static int __init mpc85xx_publish_devices(void) { if (machine_is(mpc8568_mds)) simple_gpiochip_init("fsl,mpc8568mds-bcsr-gpio"); if (machine_is(mpc8569_mds)) simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio"); of_platform_bus_probe(NULL, mpc85xx_ids, NULL); mpc85xx_publish_qe_devices(); return 0; } static int __init p1021_publish_devices(void) { of_platform_bus_probe(NULL, p1021_ids, NULL); mpc85xx_publish_qe_devices(); return 0; } machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices); machine_device_initcall(mpc8569_mds, mpc85xx_publish_devices); machine_device_initcall(p1021_mds, p1021_publish_devices); machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier); machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier); machine_arch_initcall(p1021_mds, swiotlb_setup_bus_notifier); static void __init mpc85xx_mds_pic_init(void) { struct mpic *mpic; struct resource r; struct device_node *np = NULL; np = of_find_node_by_type(NULL, "open-pic"); if (!np) return; if (of_address_to_resource(np, 0, &r)) { printk(KERN_ERR "Failed to map mpic register space\n"); of_node_put(np); return; } mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); of_node_put(np); mpic_init(mpic); mpc85xx_mds_qeic_init(); } static int __init mpc85xx_mds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "MPC85xxMDS"); } define_machine(mpc8568_mds) { .name = "MPC8568 MDS", .probe = mpc85xx_mds_probe, .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif }; static int __init mpc8569_mds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,MPC8569EMDS"); } define_machine(mpc8569_mds) { .name = "MPC8569 MDS", .probe = mpc8569_mds_probe, .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif }; static int __init p1021_mds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,P1021MDS"); } define_machine(p1021_mds) { .name = "P1021 MDS", .probe = p1021_mds_probe, .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif };
gpl-2.0
wangxingchao/s3c6410
arch/powerpc/platforms/85xx/mpc85xx_mds.c
2426
12995
/* * Copyright (C) Freescale Semicondutor, Inc. 2006-2010. All rights reserved. * * Author: Andy Fleming <afleming@freescale.com> * * Based on 83xx/mpc8360e_pb.c by: * Li Yang <LeoLi@freescale.com> * Yin Olivia <Hong-hua.Yin@freescale.com> * * Description: * MPC85xx MDS board specific routines. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/initrd.h> #include <linux/module.h> #include <linux/fsl_devices.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <linux/phy.h> #include <linux/memblock.h> #include <asm/system.h> #include <asm/atomic.h> #include <asm/time.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/irq.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <sysdev/simple_gpio.h> #include <asm/qe.h> #include <asm/qe_ic.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #undef DEBUG #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif #define MV88E1111_SCR 0x10 #define MV88E1111_SCR_125CLK 0x0010 static int mpc8568_fixup_125_clock(struct phy_device *phydev) { int scr; int err; /* Workaround for the 125 CLK Toggle */ scr = phy_read(phydev, MV88E1111_SCR); if (scr < 0) return scr; err = phy_write(phydev, MV88E1111_SCR, scr & ~(MV88E1111_SCR_125CLK)); if (err) return err; err = phy_write(phydev, MII_BMCR, BMCR_RESET); if (err) return err; scr = phy_read(phydev, MV88E1111_SCR); if (scr < 0) return scr; err = phy_write(phydev, MV88E1111_SCR, scr | 0x0008); return err; } static int mpc8568_mds_phy_fixups(struct phy_device *phydev) { int temp; int err; /* Errata */ err = phy_write(phydev,29, 0x0006); if (err) return err; temp = phy_read(phydev, 30); if (temp < 0) return temp; temp = (temp & (~0x8000)) | 0x4000; err = phy_write(phydev,30, temp); if (err) return err; err = phy_write(phydev,29, 0x000a); if (err) return err; temp = phy_read(phydev, 30); if (temp < 0) return temp; temp = phy_read(phydev, 30); if (temp < 0) return temp; temp &= ~0x0020; err = phy_write(phydev,30,temp); if (err) return err; /* Disable automatic MDI/MDIX selection */ temp = phy_read(phydev, 16); if (temp < 0) return temp; temp &= ~0x0060; err = phy_write(phydev,16,temp); return err; } /* ************************************************************************ * * Setup the architecture * */ #ifdef CONFIG_SMP extern void __init mpc85xx_smp_init(void); #endif #ifdef CONFIG_QUICC_ENGINE static struct of_device_id mpc85xx_qe_ids[] __initdata = { { .type = "qe", }, { .compatible = "fsl,qe", }, { }, }; static void __init mpc85xx_publish_qe_devices(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,qe"); if (!of_device_is_available(np)) { of_node_put(np); return; } of_platform_bus_probe(NULL, mpc85xx_qe_ids, NULL); } static void __init mpc85xx_mds_reset_ucc_phys(void) { struct device_node *np; static u8 __iomem *bcsr_regs; /* Map BCSR area */ np = of_find_node_by_name(NULL, "bcsr"); if (!np) return; bcsr_regs = of_iomap(np, 0); of_node_put(np); if (!bcsr_regs) return; if (machine_is(mpc8568_mds)) { #define BCSR_UCC1_GETH_EN (0x1 << 7) #define BCSR_UCC2_GETH_EN (0x1 << 7) #define BCSR_UCC1_MODE_MSK (0x3 << 4) #define BCSR_UCC2_MODE_MSK (0x3 << 0) /* Turn off UCC1 & UCC2 */ clrbits8(&bcsr_regs[8], BCSR_UCC1_GETH_EN); clrbits8(&bcsr_regs[9], BCSR_UCC2_GETH_EN); /* Mode is RGMII, all bits clear */ clrbits8(&bcsr_regs[11], BCSR_UCC1_MODE_MSK | BCSR_UCC2_MODE_MSK); /* Turn UCC1 & UCC2 on */ setbits8(&bcsr_regs[8], BCSR_UCC1_GETH_EN); setbits8(&bcsr_regs[9], BCSR_UCC2_GETH_EN); } else if (machine_is(mpc8569_mds)) { #define BCSR7_UCC12_GETHnRST (0x1 << 2) #define BCSR8_UEM_MARVELL_RST (0x1 << 1) #define BCSR_UCC_RGMII (0x1 << 6) #define BCSR_UCC_RTBI (0x1 << 5) /* * U-Boot mangles interrupt polarity for Marvell PHYs, * so reset built-in and UEM Marvell PHYs, this puts * the PHYs into their normal state. */ clrbits8(&bcsr_regs[7], BCSR7_UCC12_GETHnRST); setbits8(&bcsr_regs[8], BCSR8_UEM_MARVELL_RST); setbits8(&bcsr_regs[7], BCSR7_UCC12_GETHnRST); clrbits8(&bcsr_regs[8], BCSR8_UEM_MARVELL_RST); for (np = NULL; (np = of_find_compatible_node(np, "network", "ucc_geth")) != NULL;) { const unsigned int *prop; int ucc_num; prop = of_get_property(np, "cell-index", NULL); if (prop == NULL) continue; ucc_num = *prop - 1; prop = of_get_property(np, "phy-connection-type", NULL); if (prop == NULL) continue; if (strcmp("rtbi", (const char *)prop) == 0) clrsetbits_8(&bcsr_regs[7 + ucc_num], BCSR_UCC_RGMII, BCSR_UCC_RTBI); } } else if (machine_is(p1021_mds)) { #define BCSR11_ENET_MICRST (0x1 << 5) /* Reset Micrel PHY */ clrbits8(&bcsr_regs[11], BCSR11_ENET_MICRST); setbits8(&bcsr_regs[11], BCSR11_ENET_MICRST); } iounmap(bcsr_regs); } static void __init mpc85xx_mds_qe_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,qe"); if (!np) { np = of_find_node_by_name(NULL, "qe"); if (!np) return; } if (!of_device_is_available(np)) { of_node_put(np); return; } qe_reset(); of_node_put(np); np = of_find_node_by_name(NULL, "par_io"); if (np) { struct device_node *ucc; par_io_init(np); of_node_put(np); for_each_node_by_name(ucc, "ucc") par_io_of_config(ucc); } mpc85xx_mds_reset_ucc_phys(); if (machine_is(p1021_mds)) { #define MPC85xx_PMUXCR_OFFSET 0x60 #define MPC85xx_PMUXCR_QE0 0x00008000 #define MPC85xx_PMUXCR_QE3 0x00001000 #define MPC85xx_PMUXCR_QE9 0x00000040 #define MPC85xx_PMUXCR_QE12 0x00000008 static __be32 __iomem *pmuxcr; np = of_find_node_by_name(NULL, "global-utilities"); if (np) { pmuxcr = of_iomap(np, 0) + MPC85xx_PMUXCR_OFFSET; if (!pmuxcr) printk(KERN_EMERG "Error: Alternate function" " signal multiplex control register not" " mapped!\n"); else /* P1021 has pins muxed for QE and other functions. To * enable QE UEC mode, we need to set bit QE0 for UCC1 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 * and QE12 for QE MII management signals in PMUXCR * register. */ setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 | MPC85xx_PMUXCR_QE3 | MPC85xx_PMUXCR_QE9 | MPC85xx_PMUXCR_QE12); of_node_put(np); } } } static void __init mpc85xx_mds_qeic_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,qe"); if (!of_device_is_available(np)) { of_node_put(np); return; } np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); if (!np) { np = of_find_node_by_type(NULL, "qeic"); if (!np) return; } if (machine_is(p1021_mds)) qe_ic_init(np, 0, qe_ic_cascade_low_mpic, qe_ic_cascade_high_mpic); else qe_ic_init(np, 0, qe_ic_cascade_muxed_mpic, NULL); of_node_put(np); } #else static void __init mpc85xx_publish_qe_devices(void) { } static void __init mpc85xx_mds_qe_init(void) { } static void __init mpc85xx_mds_qeic_init(void) { } #endif /* CONFIG_QUICC_ENGINE */ static void __init mpc85xx_mds_setup_arch(void) { #ifdef CONFIG_PCI struct pci_controller *hose; struct device_node *np; #endif dma_addr_t max = 0xffffffff; if (ppc_md.progress) ppc_md.progress("mpc85xx_mds_setup_arch()", 0); #ifdef CONFIG_PCI for_each_node_by_type(np, "pci") { if (of_device_is_compatible(np, "fsl,mpc8540-pci") || of_device_is_compatible(np, "fsl,mpc8548-pcie")) { struct resource rsrc; of_address_to_resource(np, 0, &rsrc); if ((rsrc.start & 0xfffff) == 0x8000) fsl_add_bridge(np, 1); else fsl_add_bridge(np, 0); hose = pci_find_hose_for_OF_device(np); max = min(max, hose->dma_window_base_cur + hose->dma_window_size); } } #endif #ifdef CONFIG_SMP mpc85xx_smp_init(); #endif mpc85xx_mds_qe_init(); #ifdef CONFIG_SWIOTLB if (memblock_end_of_DRAM() > max) { ppc_swiotlb_enable = 1; set_pci_dma_ops(&swiotlb_dma_ops); ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; } #endif } static int __init board_fixups(void) { char phy_id[20]; char *compstrs[2] = {"fsl,gianfar-mdio", "fsl,ucc-mdio"}; struct device_node *mdio; struct resource res; int i; for (i = 0; i < ARRAY_SIZE(compstrs); i++) { mdio = of_find_compatible_node(NULL, NULL, compstrs[i]); of_address_to_resource(mdio, 0, &res); snprintf(phy_id, sizeof(phy_id), "%llx:%02x", (unsigned long long)res.start, 1); phy_register_fixup_for_id(phy_id, mpc8568_fixup_125_clock); phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups); /* Register a workaround for errata */ snprintf(phy_id, sizeof(phy_id), "%llx:%02x", (unsigned long long)res.start, 7); phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups); of_node_put(mdio); } return 0; } machine_arch_initcall(mpc8568_mds, board_fixups); machine_arch_initcall(mpc8569_mds, board_fixups); static struct of_device_id mpc85xx_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, { .compatible = "gianfar", }, { .compatible = "fsl,rapidio-delta", }, { .compatible = "fsl,mpc8548-guts", }, { .compatible = "gpio-leds", }, {}, }; static struct of_device_id p1021_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, { .compatible = "gianfar", }, {}, }; static int __init mpc85xx_publish_devices(void) { if (machine_is(mpc8568_mds)) simple_gpiochip_init("fsl,mpc8568mds-bcsr-gpio"); if (machine_is(mpc8569_mds)) simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio"); of_platform_bus_probe(NULL, mpc85xx_ids, NULL); mpc85xx_publish_qe_devices(); return 0; } static int __init p1021_publish_devices(void) { of_platform_bus_probe(NULL, p1021_ids, NULL); mpc85xx_publish_qe_devices(); return 0; } machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices); machine_device_initcall(mpc8569_mds, mpc85xx_publish_devices); machine_device_initcall(p1021_mds, p1021_publish_devices); machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier); machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier); machine_arch_initcall(p1021_mds, swiotlb_setup_bus_notifier); static void __init mpc85xx_mds_pic_init(void) { struct mpic *mpic; struct resource r; struct device_node *np = NULL; np = of_find_node_by_type(NULL, "open-pic"); if (!np) return; if (of_address_to_resource(np, 0, &r)) { printk(KERN_ERR "Failed to map mpic register space\n"); of_node_put(np); return; } mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); of_node_put(np); mpic_init(mpic); mpc85xx_mds_qeic_init(); } static int __init mpc85xx_mds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "MPC85xxMDS"); } define_machine(mpc8568_mds) { .name = "MPC8568 MDS", .probe = mpc85xx_mds_probe, .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif }; static int __init mpc8569_mds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,MPC8569EMDS"); } define_machine(mpc8569_mds) { .name = "MPC8569 MDS", .probe = mpc8569_mds_probe, .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif }; static int __init p1021_mds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,P1021MDS"); } define_machine(p1021_mds) { .name = "P1021 MDS", .probe = p1021_mds_probe, .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif };
gpl-2.0
jpjust/android_kernel_lge_v4xx
arch/arm/mach-msm/proc_comm.c
3194
3925
/* arch/arm/mach-msm/proc_comm.c * * Copyright (C) 2007-2008 Google, Inc. * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/module.h> #include <mach/msm_iomap.h> #include <mach/system.h> #include <mach/proc_comm.h> #include "smd_private.h" static inline void notify_other_proc_comm(void) { /* Make sure the write completes before interrupt */ wmb(); #if defined(CONFIG_ARCH_MSM7X30) __raw_writel(1 << 6, MSM_APCS_GCC_BASE + 0x8); #elif defined(CONFIG_ARCH_MSM8X60) __raw_writel(1 << 5, MSM_GCC_BASE + 0x8); #else __raw_writel(1, MSM_CSR_BASE + 0x400 + (6) * 4); #endif } #define APP_COMMAND 0x00 #define APP_STATUS 0x04 #define APP_DATA1 0x08 #define APP_DATA2 0x0C #define MDM_COMMAND 0x10 #define MDM_STATUS 0x14 #define MDM_DATA1 0x18 #define MDM_DATA2 0x1C static DEFINE_SPINLOCK(proc_comm_lock); static int msm_proc_comm_disable; /* Poll for a state change, checking for possible * modem crashes along the way (so we don't wait * forever while the ARM9 is blowing up. * * Return an error in the event of a modem crash and * restart so the msm_proc_comm() routine can restart * the operation from the beginning. */ static int proc_comm_wait_for(unsigned addr, unsigned value) { while (1) { /* Barrier here prevents excessive spinning */ mb(); if (readl_relaxed(addr) == value) return 0; if (smsm_check_for_modem_crash()) return -EAGAIN; udelay(5); } } void msm_proc_comm_reset_modem_now(void) { unsigned base = (unsigned)MSM_SHARED_RAM_BASE; unsigned long flags; spin_lock_irqsave(&proc_comm_lock, flags); again: if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY)) goto again; writel_relaxed(PCOM_RESET_MODEM, base + APP_COMMAND); writel_relaxed(0, base + APP_DATA1); writel_relaxed(0, base + APP_DATA2); spin_unlock_irqrestore(&proc_comm_lock, flags); /* Make sure the writes complete before notifying the other side */ wmb(); notify_other_proc_comm(); return; } EXPORT_SYMBOL(msm_proc_comm_reset_modem_now); int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2) { unsigned base = (unsigned)MSM_SHARED_RAM_BASE; unsigned long flags; int ret; spin_lock_irqsave(&proc_comm_lock, flags); if (msm_proc_comm_disable) { ret = -EIO; goto end; } again: if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY)) goto again; writel_relaxed(cmd, base + APP_COMMAND); writel_relaxed(data1 ? *data1 : 0, base + APP_DATA1); writel_relaxed(data2 ? *data2 : 0, base + APP_DATA2); /* Make sure the writes complete before notifying the other side */ wmb(); notify_other_proc_comm(); if (proc_comm_wait_for(base + APP_COMMAND, PCOM_CMD_DONE)) goto again; if (readl_relaxed(base + APP_STATUS) == PCOM_CMD_SUCCESS) { if (data1) *data1 = readl_relaxed(base + APP_DATA1); if (data2) *data2 = readl_relaxed(base + APP_DATA2); ret = 0; } else { ret = -EIO; } writel_relaxed(PCOM_CMD_IDLE, base + APP_COMMAND); switch (cmd) { case PCOM_RESET_CHIP: case PCOM_RESET_CHIP_IMM: case PCOM_RESET_APPS: msm_proc_comm_disable = 1; printk(KERN_ERR "msm: proc_comm: proc comm disabled\n"); break; } end: /* Make sure the writes complete before returning */ wmb(); spin_unlock_irqrestore(&proc_comm_lock, flags); return ret; } EXPORT_SYMBOL(msm_proc_comm);
gpl-2.0
croniccorey/old-cronmod-kernel
arch/arm/mach-iop13xx/setup.c
3962
15096
/* * iop13xx platform Initialization * Copyright (c) 2005-2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/serial_8250.h> #include <linux/io.h> #ifdef CONFIG_MTD_PHYSMAP #include <linux/mtd/physmap.h> #endif #include <asm/mach/map.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/hardware/iop_adma.h> #define IOP13XX_UART_XTAL 33334000 #define IOP13XX_SETUP_DEBUG 0 #define PRINTK(x...) ((void)(IOP13XX_SETUP_DEBUG && printk(x))) /* Standard IO mapping for all IOP13XX based systems */ static struct map_desc iop13xx_std_desc[] __initdata = { { /* mem mapped registers */ .virtual = IOP13XX_PMMR_VIRT_MEM_BASE, .pfn = __phys_to_pfn(IOP13XX_PMMR_PHYS_MEM_BASE), .length = IOP13XX_PMMR_SIZE, .type = MT_DEVICE, }, { /* PCIE IO space */ .virtual = IOP13XX_PCIE_LOWER_IO_VA, .pfn = __phys_to_pfn(IOP13XX_PCIE_LOWER_IO_PA), .length = IOP13XX_PCIX_IO_WINDOW_SIZE, .type = MT_DEVICE, }, { /* PCIX IO space */ .virtual = IOP13XX_PCIX_LOWER_IO_VA, .pfn = __phys_to_pfn(IOP13XX_PCIX_LOWER_IO_PA), .length = IOP13XX_PCIX_IO_WINDOW_SIZE, .type = MT_DEVICE, }, }; static struct resource iop13xx_uart0_resources[] = { [0] = { .start = IOP13XX_UART0_PHYS, .end = IOP13XX_UART0_PHYS + 0x3f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_UART0, .end = IRQ_IOP13XX_UART0, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_uart1_resources[] = { [0] = { .start = IOP13XX_UART1_PHYS, .end = IOP13XX_UART1_PHYS + 0x3f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_UART1, .end = IRQ_IOP13XX_UART1, .flags = IORESOURCE_IRQ } }; static struct plat_serial8250_port iop13xx_uart0_data[] = { { .membase = (char*)(IOP13XX_UART0_VIRT), .mapbase = (IOP13XX_UART0_PHYS), .irq = IRQ_IOP13XX_UART0, .uartclk = IOP13XX_UART_XTAL, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_SKIP_TEST, }, { }, }; static struct plat_serial8250_port iop13xx_uart1_data[] = { { .membase = (char*)(IOP13XX_UART1_VIRT), .mapbase = (IOP13XX_UART1_PHYS), .irq = IRQ_IOP13XX_UART1, .uartclk = IOP13XX_UART_XTAL, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_SKIP_TEST, }, { }, }; /* The ids are fixed up later in iop13xx_platform_init */ static struct platform_device iop13xx_uart0 = { .name = "serial8250", .id = 0, .dev.platform_data = iop13xx_uart0_data, .num_resources = 2, .resource = iop13xx_uart0_resources, }; static struct platform_device iop13xx_uart1 = { .name = "serial8250", .id = 0, .dev.platform_data = iop13xx_uart1_data, .num_resources = 2, .resource = iop13xx_uart1_resources }; static struct resource iop13xx_i2c_0_resources[] = { [0] = { .start = IOP13XX_I2C0_PHYS, .end = IOP13XX_I2C0_PHYS + 0x18, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_I2C_0, .end = IRQ_IOP13XX_I2C_0, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_i2c_1_resources[] = { [0] = { .start = IOP13XX_I2C1_PHYS, .end = IOP13XX_I2C1_PHYS + 0x18, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_I2C_1, .end = IRQ_IOP13XX_I2C_1, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_i2c_2_resources[] = { [0] = { .start = IOP13XX_I2C2_PHYS, .end = IOP13XX_I2C2_PHYS + 0x18, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_I2C_2, .end = IRQ_IOP13XX_I2C_2, .flags = IORESOURCE_IRQ } }; /* I2C controllers. The IOP13XX uses the same block as the IOP3xx, so * we just use the same device name. */ /* The ids are fixed up later in iop13xx_platform_init */ static struct platform_device iop13xx_i2c_0_controller = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = iop13xx_i2c_0_resources }; static struct platform_device iop13xx_i2c_1_controller = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = iop13xx_i2c_1_resources }; static struct platform_device iop13xx_i2c_2_controller = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = iop13xx_i2c_2_resources }; #ifdef CONFIG_MTD_PHYSMAP /* PBI Flash Device */ static struct physmap_flash_data iq8134x_flash_data = { .width = 2, }; static struct resource iq8134x_flash_resource = { .start = IQ81340_FLASHBASE, .end = 0, .flags = IORESOURCE_MEM, }; static struct platform_device iq8134x_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &iq8134x_flash_data, }, .num_resources = 1, .resource = &iq8134x_flash_resource, }; static unsigned long iq8134x_probe_flash_size(void) { uint8_t __iomem *flash_addr = ioremap(IQ81340_FLASHBASE, PAGE_SIZE); int i; char query[3]; unsigned long size = 0; int width = iq8134x_flash_data.width; if (flash_addr) { /* send CFI 'query' command */ writew(0x98, flash_addr); /* check for CFI compliance */ for (i = 0; i < 3 * width; i += width) query[i / width] = readb(flash_addr + (0x10 * width) + i); /* read the size */ if (memcmp(query, "QRY", 3) == 0) size = 1 << readb(flash_addr + (0x27 * width)); /* send CFI 'read array' command */ writew(0xff, flash_addr); iounmap(flash_addr); } return size; } #endif /* ADMA Channels */ static struct resource iop13xx_adma_0_resources[] = { [0] = { .start = IOP13XX_ADMA_PHYS_BASE(0), .end = IOP13XX_ADMA_UPPER_PA(0), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_ADMA0_EOT, .end = IRQ_IOP13XX_ADMA0_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_IOP13XX_ADMA0_EOC, .end = IRQ_IOP13XX_ADMA0_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_IOP13XX_ADMA0_ERR, .end = IRQ_IOP13XX_ADMA0_ERR, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_adma_1_resources[] = { [0] = { .start = IOP13XX_ADMA_PHYS_BASE(1), .end = IOP13XX_ADMA_UPPER_PA(1), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_ADMA1_EOT, .end = IRQ_IOP13XX_ADMA1_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_IOP13XX_ADMA1_EOC, .end = IRQ_IOP13XX_ADMA1_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_IOP13XX_ADMA1_ERR, .end = IRQ_IOP13XX_ADMA1_ERR, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_adma_2_resources[] = { [0] = { .start = IOP13XX_ADMA_PHYS_BASE(2), .end = IOP13XX_ADMA_UPPER_PA(2), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_ADMA2_EOT, .end = IRQ_IOP13XX_ADMA2_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_IOP13XX_ADMA2_EOC, .end = IRQ_IOP13XX_ADMA2_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_IOP13XX_ADMA2_ERR, .end = IRQ_IOP13XX_ADMA2_ERR, .flags = IORESOURCE_IRQ } }; static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); static struct iop_adma_platform_data iop13xx_adma_0_data = { .hw_id = 0, .pool_size = PAGE_SIZE, }; static struct iop_adma_platform_data iop13xx_adma_1_data = { .hw_id = 1, .pool_size = PAGE_SIZE, }; static struct iop_adma_platform_data iop13xx_adma_2_data = { .hw_id = 2, .pool_size = PAGE_SIZE, }; /* The ids are fixed up later in iop13xx_platform_init */ static struct platform_device iop13xx_adma_0_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop13xx_adma_0_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop13xx_adma_0_data, }, }; static struct platform_device iop13xx_adma_1_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop13xx_adma_1_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop13xx_adma_1_data, }, }; static struct platform_device iop13xx_adma_2_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop13xx_adma_2_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop13xx_adma_2_data, }, }; void __init iop13xx_map_io(void) { /* Initialize the Static Page Table maps */ iotable_init(iop13xx_std_desc, ARRAY_SIZE(iop13xx_std_desc)); } static int init_uart; static int init_i2c; static int init_adma; void __init iop13xx_platform_init(void) { int i; u32 uart_idx, i2c_idx, adma_idx, plat_idx; struct platform_device *iop13xx_devices[IQ81340_MAX_PLAT_DEVICES]; /* set the bases so we can read the device id */ iop13xx_set_atu_mmr_bases(); memset(iop13xx_devices, 0, sizeof(iop13xx_devices)); if (init_uart == IOP13XX_INIT_UART_DEFAULT) { switch (iop13xx_dev_id()) { /* enable both uarts on iop341 */ case 0x3380: case 0x3384: case 0x3388: case 0x338c: init_uart |= IOP13XX_INIT_UART_0; init_uart |= IOP13XX_INIT_UART_1; break; /* only enable uart 1 */ default: init_uart |= IOP13XX_INIT_UART_1; } } if (init_i2c == IOP13XX_INIT_I2C_DEFAULT) { switch (iop13xx_dev_id()) { /* enable all i2c units on iop341 and iop342 */ case 0x3380: case 0x3384: case 0x3388: case 0x338c: case 0x3382: case 0x3386: case 0x338a: case 0x338e: init_i2c |= IOP13XX_INIT_I2C_0; init_i2c |= IOP13XX_INIT_I2C_1; init_i2c |= IOP13XX_INIT_I2C_2; break; /* only enable i2c 1 and 2 */ default: init_i2c |= IOP13XX_INIT_I2C_1; init_i2c |= IOP13XX_INIT_I2C_2; } } if (init_adma == IOP13XX_INIT_ADMA_DEFAULT) { init_adma |= IOP13XX_INIT_ADMA_0; init_adma |= IOP13XX_INIT_ADMA_1; init_adma |= IOP13XX_INIT_ADMA_2; } plat_idx = 0; uart_idx = 0; i2c_idx = 0; /* uart 1 (if enabled) is ttyS0 */ if (init_uart & IOP13XX_INIT_UART_1) { PRINTK("Adding uart1 to platform device list\n"); iop13xx_uart1.id = uart_idx++; iop13xx_devices[plat_idx++] = &iop13xx_uart1; } if (init_uart & IOP13XX_INIT_UART_0) { PRINTK("Adding uart0 to platform device list\n"); iop13xx_uart0.id = uart_idx++; iop13xx_devices[plat_idx++] = &iop13xx_uart0; } for(i = 0; i < IQ81340_NUM_I2C; i++) { if ((init_i2c & (1 << i)) && IOP13XX_SETUP_DEBUG) printk("Adding i2c%d to platform device list\n", i); switch(init_i2c & (1 << i)) { case IOP13XX_INIT_I2C_0: iop13xx_i2c_0_controller.id = i2c_idx++; iop13xx_devices[plat_idx++] = &iop13xx_i2c_0_controller; break; case IOP13XX_INIT_I2C_1: iop13xx_i2c_1_controller.id = i2c_idx++; iop13xx_devices[plat_idx++] = &iop13xx_i2c_1_controller; break; case IOP13XX_INIT_I2C_2: iop13xx_i2c_2_controller.id = i2c_idx++; iop13xx_devices[plat_idx++] = &iop13xx_i2c_2_controller; break; } } /* initialize adma channel ids and capabilities */ adma_idx = 0; for (i = 0; i < IQ81340_NUM_ADMA; i++) { struct iop_adma_platform_data *plat_data; if ((init_adma & (1 << i)) && IOP13XX_SETUP_DEBUG) printk(KERN_INFO "Adding adma%d to platform device list\n", i); switch (init_adma & (1 << i)) { case IOP13XX_INIT_ADMA_0: iop13xx_adma_0_channel.id = adma_idx++; iop13xx_devices[plat_idx++] = &iop13xx_adma_0_channel; plat_data = &iop13xx_adma_0_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_1: iop13xx_adma_1_channel.id = adma_idx++; iop13xx_devices[plat_idx++] = &iop13xx_adma_1_channel; plat_data = &iop13xx_adma_1_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_2: iop13xx_adma_2_channel.id = adma_idx++; iop13xx_devices[plat_idx++] = &iop13xx_adma_2_channel; plat_data = &iop13xx_adma_2_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_PQ, plat_data->cap_mask); dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); break; } } #ifdef CONFIG_MTD_PHYSMAP iq8134x_flash_resource.end = iq8134x_flash_resource.start + iq8134x_probe_flash_size() - 1; if (iq8134x_flash_resource.end > iq8134x_flash_resource.start) iop13xx_devices[plat_idx++] = &iq8134x_flash; else printk(KERN_ERR "%s: Failed to probe flash size\n", __func__); #endif platform_add_devices(iop13xx_devices, plat_idx); } static int __init iop13xx_init_uart_setup(char *str) { if (str) { while (*str != '\0') { switch(*str) { case '0': init_uart |= IOP13XX_INIT_UART_0; break; case '1': init_uart |= IOP13XX_INIT_UART_1; break; case ',': case '=': break; default: PRINTK("\"iop13xx_init_uart\" malformed" " at character: \'%c\'", *str); *(str + 1) = '\0'; init_uart = IOP13XX_INIT_UART_DEFAULT; } str++; } } return 1; } static int __init iop13xx_init_i2c_setup(char *str) { if (str) { while (*str != '\0') { switch(*str) { case '0': init_i2c |= IOP13XX_INIT_I2C_0; break; case '1': init_i2c |= IOP13XX_INIT_I2C_1; break; case '2': init_i2c |= IOP13XX_INIT_I2C_2; break; case ',': case '=': break; default: PRINTK("\"iop13xx_init_i2c\" malformed" " at character: \'%c\'", *str); *(str + 1) = '\0'; init_i2c = IOP13XX_INIT_I2C_DEFAULT; } str++; } } return 1; } static int __init iop13xx_init_adma_setup(char *str) { if (str) { while (*str != '\0') { switch (*str) { case '0': init_adma |= IOP13XX_INIT_ADMA_0; break; case '1': init_adma |= IOP13XX_INIT_ADMA_1; break; case '2': init_adma |= IOP13XX_INIT_ADMA_2; break; case ',': case '=': break; default: PRINTK("\"iop13xx_init_adma\" malformed" " at character: \'%c\'", *str); *(str + 1) = '\0'; init_adma = IOP13XX_INIT_ADMA_DEFAULT; } str++; } } return 1; } __setup("iop13xx_init_adma", iop13xx_init_adma_setup); __setup("iop13xx_init_uart", iop13xx_init_uart_setup); __setup("iop13xx_init_i2c", iop13xx_init_i2c_setup);
gpl-2.0
Shelnutt2/android_kernel_lge_gee_3.4
arch/openrisc/kernel/irq.c
4474
4511
/* * OpenRISC irq.c * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/of.h> #include <linux/ftrace.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/kernel_stat.h> #include <linux/export.h> #include <linux/irqflags.h> /* read interrupt enabled status */ unsigned long arch_local_save_flags(void) { return mfspr(SPR_SR) & (SPR_SR_IEE|SPR_SR_TEE); } EXPORT_SYMBOL(arch_local_save_flags); /* set interrupt enabled status */ void arch_local_irq_restore(unsigned long flags) { mtspr(SPR_SR, ((mfspr(SPR_SR) & ~(SPR_SR_IEE|SPR_SR_TEE)) | flags)); } EXPORT_SYMBOL(arch_local_irq_restore); /* OR1K PIC implementation */ /* We're a couple of cycles faster than the generic implementations with * these 'fast' versions. */ static void or1k_pic_mask(struct irq_data *data) { mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->irq)); } static void or1k_pic_unmask(struct irq_data *data) { mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->irq)); } static void or1k_pic_ack(struct irq_data *data) { /* EDGE-triggered interrupts need to be ack'ed in order to clear * the latch. * LEVER-triggered interrupts do not need to be ack'ed; however, * ack'ing the interrupt has no ill-effect and is quicker than * trying to figure out what type it is... */ /* The OpenRISC 1000 spec says to write a 1 to the bit to ack the * interrupt, but the OR1200 does this backwards and requires a 0 * to be written... */ #ifdef CONFIG_OR1K_1200 /* There are two oddities with the OR1200 PIC implementation: * i) LEVEL-triggered interrupts are latched and need to be cleared * ii) the interrupt latch is cleared by writing a 0 to the bit, * as opposed to a 1 as mandated by the spec */ mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->irq)); #else WARN(1, "Interrupt handling possibily broken\n"); mtspr(SPR_PICSR, (1UL << irq)); #endif } static void or1k_pic_mask_ack(struct irq_data *data) { /* Comments for pic_ack apply here, too */ #ifdef CONFIG_OR1K_1200 mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->irq)); #else WARN(1, "Interrupt handling possibily broken\n"); mtspr(SPR_PICSR, (1UL << irq)); #endif } static int or1k_pic_set_type(struct irq_data *data, unsigned int flow_type) { /* There's nothing to do in the PIC configuration when changing * flow type. Level and edge-triggered interrupts are both * supported, but it's PIC-implementation specific which type * is handled. */ return irq_setup_alt_chip(data, flow_type); } static inline int pic_get_irq(int first) { int irq; irq = ffs(mfspr(SPR_PICSR) >> first); return irq ? irq + first - 1 : NO_IRQ; } static void __init or1k_irq_init(void) { struct irq_chip_generic *gc; struct irq_chip_type *ct; /* Disable all interrupts until explicitly requested */ mtspr(SPR_PICMR, (0UL)); gc = irq_alloc_generic_chip("or1k-PIC", 1, 0, 0, handle_level_irq); ct = gc->chip_types; ct->chip.irq_unmask = or1k_pic_unmask; ct->chip.irq_mask = or1k_pic_mask; ct->chip.irq_ack = or1k_pic_ack; ct->chip.irq_mask_ack = or1k_pic_mask_ack; ct->chip.irq_set_type = or1k_pic_set_type; /* The OR1K PIC can handle both level and edge trigged * interrupts in roughly the same manner */ #if 0 /* FIXME: chip.type??? */ ct->chip.type = IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_MASK; #endif irq_setup_generic_chip(gc, IRQ_MSK(NR_IRQS), 0, IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); } void __init init_IRQ(void) { or1k_irq_init(); } void __irq_entry do_IRQ(struct pt_regs *regs) { int irq = -1; struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); while ((irq = pic_get_irq(irq + 1)) != NO_IRQ) generic_handle_irq(irq); irq_exit(); set_irq_regs(old_regs); } unsigned int irq_create_of_mapping(struct device_node *controller, const u32 *intspec, unsigned int intsize) { return intspec[0]; } EXPORT_SYMBOL_GPL(irq_create_of_mapping);
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_lge_mako
arch/arm/mach-at91/board-sam9260ek.c
4730
7954
/* * linux/arch/arm/mach-at91/board-sam9260ek.c * * Copyright (C) 2005 SAN People * Copyright (C) 2006 Atmel * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/at73c213.h> #include <linux/clk.h> #include <linux/i2c/at24.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/at91sam9_smc.h> #include <mach/at91_shdwc.h> #include <mach/system_rev.h> #include "sam9_smc.h" #include "generic.h" static void __init ek_init_early(void) { /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } /* * USB Host port */ static struct at91_usbh_data __initdata ek_usbh_data = { .ports = 2, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; /* * USB Device port */ static struct at91_udc_data __initdata ek_udc_data = { .vbus_pin = AT91_PIN_PC5, .pullup_pin = -EINVAL, /* pull-up driven by UDC */ }; /* * Audio */ static struct at73c213_board_info at73c213_data = { .ssc_id = 0, .shortname = "AT91SAM9260-EK external DAC", }; #if defined(CONFIG_SND_AT73C213) || defined(CONFIG_SND_AT73C213_MODULE) static void __init at73c213_set_clk(struct at73c213_board_info *info) { struct clk *pck0; struct clk *plla; pck0 = clk_get(NULL, "pck0"); plla = clk_get(NULL, "plla"); /* AT73C213 MCK Clock */ at91_set_B_periph(AT91_PIN_PC1, 0); /* PCK0 */ clk_set_parent(pck0, plla); clk_put(plla); info->dac_clk = pck0; } #else static void __init at73c213_set_clk(struct at73c213_board_info *info) {} #endif /* * SPI devices. */ static struct spi_board_info ek_spi_devices[] = { #if !defined(CONFIG_MMC_AT91) { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 1, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #if defined(CONFIG_MTD_AT91_DATAFLASH_CARD) { /* DataFlash card */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #endif #endif #if defined(CONFIG_SND_AT73C213) || defined(CONFIG_SND_AT73C213_MODULE) { /* AT73C213 DAC */ .modalias = "at73c213", .chip_select = 0, .max_speed_hz = 10 * 1000 * 1000, .bus_num = 1, .mode = SPI_MODE_1, .platform_data = &at73c213_data, }, #endif }; /* * MACB Ethernet device */ static struct macb_platform_data __initdata ek_macb_data = { .phy_irq_pin = AT91_PIN_PA7, .is_rmii = 1, }; /* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { { .name = "Partition 1", .offset = 0, .size = SZ_256K, }, { .name = "Partition 2", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct atmel_nand_data __initdata ek_nand_data = { .ale = 21, .cle = 22, .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, .ecc_mode = NAND_ECC_SOFT, .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; static struct sam9_smc_config __initdata ek_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 3, .nrd_pulse = 3, .ncs_write_pulse = 3, .nwe_pulse = 3, .read_cycle = 5, .write_cycle = 5, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE, .tdf_cycles = 2, }; static void __init ek_add_device_nand(void) { ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; else ek_nand_smc_config.mode |= AT91_SMC_DBW_8; /* configure chip-select 3 (NAND) */ sam9_smc_configure(0, 3, &ek_nand_smc_config); at91_add_device_nand(&ek_nand_data); } /* * MCI (SD/MMC) */ static struct at91_mmc_data __initdata ek_mmc_data = { .slot_b = 1, .wire4 = 1, .det_pin = -EINVAL, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; /* * LEDs */ static struct gpio_led ek_leds[] = { { /* "bottom" led, green, userled1 to be defined */ .name = "ds5", .gpio = AT91_PIN_PA6, .active_low = 1, .default_trigger = "none", }, { /* "power" led, yellow */ .name = "ds1", .gpio = AT91_PIN_PA9, .default_trigger = "heartbeat", } }; /* * I2C devices */ static struct at24_platform_data at24c512 = { .byte_len = SZ_512K / 8, .page_size = 128, .flags = AT24_FLAG_ADDR16, }; static struct i2c_board_info __initdata ek_i2c_devices[] = { { I2C_BOARD_INFO("24c512", 0x50), .platform_data = &at24c512, }, /* more devices can be added using expansion connectors */ }; /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button ek_buttons[] = { { .gpio = AT91_PIN_PA30, .code = BTN_3, .desc = "Button 3", .active_low = 1, .wakeup = 1, }, { .gpio = AT91_PIN_PA31, .code = BTN_4, .desc = "Button 4", .active_low = 1, .wakeup = 1, } }; static struct gpio_keys_platform_data ek_button_data = { .buttons = ek_buttons, .nbuttons = ARRAY_SIZE(ek_buttons), }; static struct platform_device ek_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &ek_button_data, } }; static void __init ek_add_device_buttons(void) { at91_set_gpio_input(AT91_PIN_PA30, 1); /* btn3 */ at91_set_deglitch(AT91_PIN_PA30, 1); at91_set_gpio_input(AT91_PIN_PA31, 1); /* btn4 */ at91_set_deglitch(AT91_PIN_PA31, 1); platform_device_register(&ek_button_device); } #else static void __init ek_add_device_buttons(void) {} #endif static void __init ek_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&ek_usbh_data); /* USB Device */ at91_add_device_udc(&ek_udc_data); /* SPI */ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); /* NAND */ ek_add_device_nand(); /* Ethernet */ at91_add_device_eth(&ek_macb_data); /* MMC */ at91_add_device_mmc(0, &ek_mmc_data); /* I2C */ at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices)); /* SSC (to AT73C213) */ at73c213_set_clk(&at73c213_data); at91_add_device_ssc(AT91SAM9260_ID_SSC, ATMEL_SSC_TX); /* LEDs */ at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); /* Push Buttons */ ek_add_device_buttons(); } MACHINE_START(AT91SAM9260EK, "Atmel AT91SAM9260-EK") /* Maintainer: Atmel */ .timer = &at91sam926x_timer, .map_io = at91_map_io, .init_early = ek_init_early, .init_irq = at91_init_irq_default, .init_machine = ek_board_init, MACHINE_END
gpl-2.0
Stuxnet-Kernel/kernel_g3
arch/arm/mach-at91/board-rm9200dk.c
4730
5799
/* * linux/arch/arm/mach-at91/board-rm9200dk.c * * Copyright (C) 2005 SAN People * * Epson S1D framebuffer glue code is: * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/mtd/physmap.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/at91rm9200_mc.h> #include <mach/at91_ramc.h> #include "generic.h" static void __init dk_init_early(void) { /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); /* Setup the LEDs */ at91_init_leds(AT91_PIN_PB2, AT91_PIN_PB2); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART1 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91RM9200_ID_US1, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static struct macb_platform_data __initdata dk_eth_data = { .phy_irq_pin = AT91_PIN_PC4, .is_rmii = 1, }; static struct at91_usbh_data __initdata dk_usbh_data = { .ports = 2, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; static struct at91_udc_data __initdata dk_udc_data = { .vbus_pin = AT91_PIN_PD4, .pullup_pin = AT91_PIN_PD5, }; static struct at91_cf_data __initdata dk_cf_data = { .irq_pin = -EINVAL, .det_pin = AT91_PIN_PB0, .vcc_pin = -EINVAL, .rst_pin = AT91_PIN_PC5, }; #ifndef CONFIG_MTD_AT91_DATAFLASH_CARD static struct at91_mmc_data __initdata dk_mmc_data = { .slot_b = 0, .wire4 = 1, .det_pin = -EINVAL, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; #endif static struct spi_board_info dk_spi_devices[] = { { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, }, { /* UR6HCPS2-SP40 PS2-to-SPI adapter */ .modalias = "ur6hcps2", .chip_select = 1, .max_speed_hz = 250 * 1000, }, { /* TLV1504 ADC, 4 channels, 10 bits; one is a temp sensor */ .modalias = "tlv1504", .chip_select = 2, .max_speed_hz = 20 * 1000 * 1000, }, #ifdef CONFIG_MTD_AT91_DATAFLASH_CARD { /* DataFlash card */ .modalias = "mtd_dataflash", .chip_select = 3, .max_speed_hz = 15 * 1000 * 1000, } #endif }; static struct i2c_board_info __initdata dk_i2c_devices[] = { { I2C_BOARD_INFO("ics1523", 0x26), }, { I2C_BOARD_INFO("x9429", 0x28), }, { I2C_BOARD_INFO("24c1024", 0x50), } }; static struct mtd_partition __initdata dk_nand_partition[] = { { .name = "NAND Partition 1", .offset = 0, .size = MTDPART_SIZ_FULL, }, }; static struct atmel_nand_data __initdata dk_nand_data = { .ale = 22, .cle = 21, .det_pin = AT91_PIN_PB1, .rdy_pin = AT91_PIN_PC2, .enable_pin = -EINVAL, .ecc_mode = NAND_ECC_SOFT, .on_flash_bbt = 1, .parts = dk_nand_partition, .num_parts = ARRAY_SIZE(dk_nand_partition), }; #define DK_FLASH_BASE AT91_CHIPSELECT_0 #define DK_FLASH_SIZE SZ_2M static struct physmap_flash_data dk_flash_data = { .width = 2, }; static struct resource dk_flash_resource = { .start = DK_FLASH_BASE, .end = DK_FLASH_BASE + DK_FLASH_SIZE - 1, .flags = IORESOURCE_MEM, }; static struct platform_device dk_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &dk_flash_data, }, .resource = &dk_flash_resource, .num_resources = 1, }; static struct gpio_led dk_leds[] = { { .name = "led0", .gpio = AT91_PIN_PB2, .active_low = 1, .default_trigger = "heartbeat", } }; static void __init dk_board_init(void) { /* Serial */ at91_add_device_serial(); /* Ethernet */ at91_add_device_eth(&dk_eth_data); /* USB Host */ at91_add_device_usbh(&dk_usbh_data); /* USB Device */ at91_add_device_udc(&dk_udc_data); at91_set_multi_drive(dk_udc_data.pullup_pin, 1); /* pullup_pin is connected to reset */ /* Compact Flash */ at91_add_device_cf(&dk_cf_data); /* I2C */ at91_add_device_i2c(dk_i2c_devices, ARRAY_SIZE(dk_i2c_devices)); /* SPI */ at91_add_device_spi(dk_spi_devices, ARRAY_SIZE(dk_spi_devices)); #ifdef CONFIG_MTD_AT91_DATAFLASH_CARD /* DataFlash card */ at91_set_gpio_output(AT91_PIN_PB7, 0); #else /* MMC */ at91_set_gpio_output(AT91_PIN_PB7, 1); /* this MMC card slot can optionally use SPI signaling (CS3). */ at91_add_device_mmc(0, &dk_mmc_data); #endif /* NAND */ at91_add_device_nand(&dk_nand_data); /* NOR Flash */ platform_device_register(&dk_flash); /* LEDs */ at91_gpio_leds(dk_leds, ARRAY_SIZE(dk_leds)); /* VGA */ // dk_add_device_video(); } MACHINE_START(AT91RM9200DK, "Atmel AT91RM9200-DK") /* Maintainer: SAN People/Atmel */ .timer = &at91rm9200_timer, .map_io = at91_map_io, .init_early = dk_init_early, .init_irq = at91_init_irq_default, .init_machine = dk_board_init, MACHINE_END
gpl-2.0
Team-Blackout/Blackout_Ville_plus
arch/arm/mach-iop13xx/iq81340sc.c
4730
2656
/* * iq81340sc board support * Copyright (c) 2005-2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/pci.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pci.h> #include <asm/mach/time.h> #include <mach/time.h> extern int init_atu; static int __init iq81340sc_atux_map_irq(struct pci_dev *dev, u8 idsel, u8 pin) { WARN_ON(idsel < 1 || idsel > 2); switch (idsel) { case 1: switch (pin) { case 1: return ATUX_INTB; case 2: return ATUX_INTC; case 3: return ATUX_INTD; case 4: return ATUX_INTA; default: return -1; } case 2: switch (pin) { case 1: return ATUX_INTC; case 2: return ATUX_INTC; case 3: return ATUX_INTC; case 4: return ATUX_INTC; default: return -1; } default: return -1; } } static struct hw_pci iq81340sc_pci __initdata = { .swizzle = pci_std_swizzle, .nr_controllers = 0, .setup = iop13xx_pci_setup, .scan = iop13xx_scan_bus, .map_irq = iq81340sc_atux_map_irq, .preinit = iop13xx_pci_init }; static int __init iq81340sc_pci_init(void) { iop13xx_atu_select(&iq81340sc_pci); pci_common_init(&iq81340sc_pci); iop13xx_map_pci_memory(); return 0; } static void __init iq81340sc_init(void) { iop13xx_platform_init(); iq81340sc_pci_init(); iop13xx_add_tpmi_devices(); } static void __init iq81340sc_timer_init(void) { unsigned long bus_freq = iop13xx_core_freq() / iop13xx_xsi_bus_ratio(); printk(KERN_DEBUG "%s: bus frequency: %lu\n", __func__, bus_freq); iop_init_time(bus_freq); } static struct sys_timer iq81340sc_timer = { .init = iq81340sc_timer_init, }; MACHINE_START(IQ81340SC, "Intel IQ81340SC") /* Maintainer: Dan Williams <dan.j.williams@intel.com> */ .atag_offset = 0x100, .init_early = iop13xx_init_early, .map_io = iop13xx_map_io, .init_irq = iop13xx_init_irq, .timer = &iq81340sc_timer, .init_machine = iq81340sc_init, .restart = iop13xx_restart, MACHINE_END
gpl-2.0
TeamTwisted/hells-Core-N5
arch/arm/mach-ixp4xx/goramo_mlr.c
4730
12484
/* * Goramo MultiLink router platform code * Copyright (C) 2006-2009 Krzysztof Halasa <khc@pm.waw.pl> */ #include <linux/delay.h> #include <linux/hdlc.h> #include <linux/i2c-gpio.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/serial_8250.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/pci.h> #define SLOT_ETHA 0x0B /* IDSEL = AD21 */ #define SLOT_ETHB 0x0C /* IDSEL = AD20 */ #define SLOT_MPCI 0x0D /* IDSEL = AD19 */ #define SLOT_NEC 0x0E /* IDSEL = AD18 */ /* GPIO lines */ #define GPIO_SCL 0 #define GPIO_SDA 1 #define GPIO_STR 2 #define GPIO_IRQ_NEC 3 #define GPIO_IRQ_ETHA 4 #define GPIO_IRQ_ETHB 5 #define GPIO_HSS0_DCD_N 6 #define GPIO_HSS1_DCD_N 7 #define GPIO_UART0_DCD 8 #define GPIO_UART1_DCD 9 #define GPIO_HSS0_CTS_N 10 #define GPIO_HSS1_CTS_N 11 #define GPIO_IRQ_MPCI 12 #define GPIO_HSS1_RTS_N 13 #define GPIO_HSS0_RTS_N 14 /* GPIO15 is not connected */ /* Control outputs from 74HC4094 */ #define CONTROL_HSS0_CLK_INT 0 #define CONTROL_HSS1_CLK_INT 1 #define CONTROL_HSS0_DTR_N 2 #define CONTROL_HSS1_DTR_N 3 #define CONTROL_EXT 4 #define CONTROL_AUTO_RESET 5 #define CONTROL_PCI_RESET_N 6 #define CONTROL_EEPROM_WC_N 7 /* offsets from start of flash ROM = 0x50000000 */ #define CFG_ETH0_ADDRESS 0x40 /* 6 bytes */ #define CFG_ETH1_ADDRESS 0x46 /* 6 bytes */ #define CFG_REV 0x4C /* u32 */ #define CFG_SDRAM_SIZE 0x50 /* u32 */ #define CFG_SDRAM_CONF 0x54 /* u32 */ #define CFG_SDRAM_MODE 0x58 /* u32 */ #define CFG_SDRAM_REFRESH 0x5C /* u32 */ #define CFG_HW_BITS 0x60 /* u32 */ #define CFG_HW_USB_PORTS 0x00000007 /* 0 = no NEC chip, 1-5 = ports # */ #define CFG_HW_HAS_PCI_SLOT 0x00000008 #define CFG_HW_HAS_ETH0 0x00000010 #define CFG_HW_HAS_ETH1 0x00000020 #define CFG_HW_HAS_HSS0 0x00000040 #define CFG_HW_HAS_HSS1 0x00000080 #define CFG_HW_HAS_UART0 0x00000100 #define CFG_HW_HAS_UART1 0x00000200 #define CFG_HW_HAS_EEPROM 0x00000400 #define FLASH_CMD_READ_ARRAY 0xFF #define FLASH_CMD_READ_ID 0x90 #define FLASH_SER_OFF 0x102 /* 0x81 in 16-bit mode */ static u32 hw_bits = 0xFFFFFFFD; /* assume all hardware present */; static u8 control_value; static void set_scl(u8 value) { gpio_line_set(GPIO_SCL, !!value); udelay(3); } static void set_sda(u8 value) { gpio_line_set(GPIO_SDA, !!value); udelay(3); } static void set_str(u8 value) { gpio_line_set(GPIO_STR, !!value); udelay(3); } static inline void set_control(int line, int value) { if (value) control_value |= (1 << line); else control_value &= ~(1 << line); } static void output_control(void) { int i; gpio_line_config(GPIO_SCL, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_SDA, IXP4XX_GPIO_OUT); for (i = 0; i < 8; i++) { set_scl(0); set_sda(control_value & (0x80 >> i)); /* MSB first */ set_scl(1); /* active edge */ } set_str(1); set_str(0); set_scl(0); set_sda(1); /* Be ready for START */ set_scl(1); } static void (*set_carrier_cb_tab[2])(void *pdev, int carrier); static int hss_set_clock(int port, unsigned int clock_type) { int ctrl_int = port ? CONTROL_HSS1_CLK_INT : CONTROL_HSS0_CLK_INT; switch (clock_type) { case CLOCK_DEFAULT: case CLOCK_EXT: set_control(ctrl_int, 0); output_control(); return CLOCK_EXT; case CLOCK_INT: set_control(ctrl_int, 1); output_control(); return CLOCK_INT; default: return -EINVAL; } } static irqreturn_t hss_dcd_irq(int irq, void *pdev) { int i, port = (irq == IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N)); gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i); set_carrier_cb_tab[port](pdev, !i); return IRQ_HANDLED; } static int hss_open(int port, void *pdev, void (*set_carrier_cb)(void *pdev, int carrier)) { int i, irq; if (!port) irq = IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N); else irq = IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N); gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i); set_carrier_cb(pdev, !i); set_carrier_cb_tab[!!port] = set_carrier_cb; if ((i = request_irq(irq, hss_dcd_irq, 0, "IXP4xx HSS", pdev)) != 0) { printk(KERN_ERR "ixp4xx_hss: failed to request IRQ%i (%i)\n", irq, i); return i; } set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 0); output_control(); gpio_line_set(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 0); return 0; } static void hss_close(int port, void *pdev) { free_irq(port ? IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N) : IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), pdev); set_carrier_cb_tab[!!port] = NULL; /* catch bugs */ set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 1); output_control(); gpio_line_set(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 1); } /* Flash memory */ static struct flash_platform_data flash_data = { .map_name = "cfi_probe", .width = 2, }; static struct resource flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device device_flash = { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &flash_data }, .num_resources = 1, .resource = &flash_resource, }; /* I^2C interface */ static struct i2c_gpio_platform_data i2c_data = { .sda_pin = GPIO_SDA, .scl_pin = GPIO_SCL, }; static struct platform_device device_i2c = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &i2c_data }, }; /* IXP425 2 UART ports */ static struct resource uart_resources[] = { { .start = IXP4XX_UART1_BASE_PHYS, .end = IXP4XX_UART1_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, } }; static struct plat_serial8250_port uart_data[] = { { .mapbase = IXP4XX_UART1_BASE_PHYS, .membase = (char __iomem *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART1, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char __iomem *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { }, }; static struct platform_device device_uarts = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev.platform_data = uart_data, .num_resources = 2, .resource = uart_resources, }; /* Built-in 10/100 Ethernet MAC interfaces */ static struct eth_plat_info eth_plat[] = { { .phy = 0, .rxq = 3, .txreadyq = 32, }, { .phy = 1, .rxq = 4, .txreadyq = 33, } }; static struct platform_device device_eth_tab[] = { { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEB, .dev.platform_data = eth_plat, }, { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEC, .dev.platform_data = eth_plat + 1, } }; /* IXP425 2 synchronous serial ports */ static struct hss_plat_info hss_plat[] = { { .set_clock = hss_set_clock, .open = hss_open, .close = hss_close, .txreadyq = 34, }, { .set_clock = hss_set_clock, .open = hss_open, .close = hss_close, .txreadyq = 35, } }; static struct platform_device device_hss_tab[] = { { .name = "ixp4xx_hss", .id = 0, .dev.platform_data = hss_plat, }, { .name = "ixp4xx_hss", .id = 1, .dev.platform_data = hss_plat + 1, } }; static struct platform_device *device_tab[6] __initdata = { &device_flash, /* index 0 */ }; static inline u8 __init flash_readb(u8 __iomem *flash, u32 addr) { #ifdef __ARMEB__ return __raw_readb(flash + addr); #else return __raw_readb(flash + (addr ^ 3)); #endif } static inline u16 __init flash_readw(u8 __iomem *flash, u32 addr) { #ifdef __ARMEB__ return __raw_readw(flash + addr); #else return __raw_readw(flash + (addr ^ 2)); #endif } static void __init gmlr_init(void) { u8 __iomem *flash; int i, devices = 1; /* flash */ ixp4xx_sys_init(); if ((flash = ioremap(IXP4XX_EXP_BUS_BASE_PHYS, 0x80)) == NULL) printk(KERN_ERR "goramo-mlr: unable to access system" " configuration data\n"); else { system_rev = __raw_readl(flash + CFG_REV); hw_bits = __raw_readl(flash + CFG_HW_BITS); for (i = 0; i < ETH_ALEN; i++) { eth_plat[0].hwaddr[i] = flash_readb(flash, CFG_ETH0_ADDRESS + i); eth_plat[1].hwaddr[i] = flash_readb(flash, CFG_ETH1_ADDRESS + i); } __raw_writew(FLASH_CMD_READ_ID, flash); system_serial_high = flash_readw(flash, FLASH_SER_OFF); system_serial_high <<= 16; system_serial_high |= flash_readw(flash, FLASH_SER_OFF + 2); system_serial_low = flash_readw(flash, FLASH_SER_OFF + 4); system_serial_low <<= 16; system_serial_low |= flash_readw(flash, FLASH_SER_OFF + 6); __raw_writew(FLASH_CMD_READ_ARRAY, flash); iounmap(flash); } switch (hw_bits & (CFG_HW_HAS_UART0 | CFG_HW_HAS_UART1)) { case CFG_HW_HAS_UART0: memset(&uart_data[1], 0, sizeof(uart_data[1])); device_uarts.num_resources = 1; break; case CFG_HW_HAS_UART1: device_uarts.dev.platform_data = &uart_data[1]; device_uarts.resource = &uart_resources[1]; device_uarts.num_resources = 1; break; } if (hw_bits & (CFG_HW_HAS_UART0 | CFG_HW_HAS_UART1)) device_tab[devices++] = &device_uarts; /* max index 1 */ if (hw_bits & CFG_HW_HAS_ETH0) device_tab[devices++] = &device_eth_tab[0]; /* max index 2 */ if (hw_bits & CFG_HW_HAS_ETH1) device_tab[devices++] = &device_eth_tab[1]; /* max index 3 */ if (hw_bits & CFG_HW_HAS_HSS0) device_tab[devices++] = &device_hss_tab[0]; /* max index 4 */ if (hw_bits & CFG_HW_HAS_HSS1) device_tab[devices++] = &device_hss_tab[1]; /* max index 5 */ if (hw_bits & CFG_HW_HAS_EEPROM) device_tab[devices++] = &device_i2c; /* max index 6 */ gpio_line_config(GPIO_SCL, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_SDA, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_STR, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_HSS0_RTS_N, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_HSS1_RTS_N, IXP4XX_GPIO_OUT); gpio_line_config(GPIO_HSS0_DCD_N, IXP4XX_GPIO_IN); gpio_line_config(GPIO_HSS1_DCD_N, IXP4XX_GPIO_IN); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH); set_control(CONTROL_HSS0_DTR_N, 1); set_control(CONTROL_HSS1_DTR_N, 1); set_control(CONTROL_EEPROM_WC_N, 1); set_control(CONTROL_PCI_RESET_N, 1); output_control(); msleep(1); /* Wait for PCI devices to initialize */ flash_resource.start = IXP4XX_EXP_BUS_BASE(0); flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1; platform_add_devices(device_tab, devices); } #ifdef CONFIG_PCI static void __init gmlr_pci_preinit(void) { irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static void __init gmlr_pci_postinit(void) { if ((hw_bits & CFG_HW_USB_PORTS) >= 2 && (hw_bits & CFG_HW_USB_PORTS) < 5) { /* need to adjust number of USB ports on NEC chip */ u32 value, addr = BIT(32 - SLOT_NEC) | 0xE0; if (!ixp4xx_pci_read(addr, NP_CMD_CONFIGREAD, &value)) { value &= ~7; value |= (hw_bits & CFG_HW_USB_PORTS); ixp4xx_pci_write(addr, NP_CMD_CONFIGWRITE, value); } } } static int __init gmlr_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch(slot) { case SLOT_ETHA: return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA); case SLOT_ETHB: return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB); case SLOT_NEC: return IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC); default: return IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI); } } static struct hw_pci gmlr_hw_pci __initdata = { .nr_controllers = 1, .preinit = gmlr_pci_preinit, .postinit = gmlr_pci_postinit, .swizzle = pci_std_swizzle, .setup = ixp4xx_setup, .scan = ixp4xx_scan_bus, .map_irq = gmlr_map_irq, }; static int __init gmlr_pci_init(void) { if (machine_is_goramo_mlr() && (hw_bits & (CFG_HW_USB_PORTS | CFG_HW_HAS_PCI_SLOT))) pci_common_init(&gmlr_hw_pci); return 0; } subsys_initcall(gmlr_pci_init); #endif /* CONFIG_PCI */ MACHINE_START(GORAMO_MLR, "MultiLink") /* Maintainer: Krzysztof Halasa */ .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .atag_offset = 0x100, .init_machine = gmlr_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif .restart = ixp4xx_restart, MACHINE_END
gpl-2.0
civato/9005-LL-DEV
arch/arm/mach-at91/at91sam9260.c
4730
10951
/* * arch/arm/mach-at91/at91sam9260.c * * Copyright (C) 2006 SAN People * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/module.h> #include <asm/proc-fns.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/system_misc.h> #include <mach/cpu.h> #include <mach/at91_dbgu.h> #include <mach/at91sam9260.h> #include <mach/at91_pmc.h> #include <mach/at91_rstc.h> #include "soc.h" #include "generic.h" #include "clock.h" #include "sam9_smc.h" /* -------------------------------------------------------------------- * Clocks * -------------------------------------------------------------------- */ /* * The peripheral clocks. */ static struct clk pioA_clk = { .name = "pioA_clk", .pmc_mask = 1 << AT91SAM9260_ID_PIOA, .type = CLK_TYPE_PERIPHERAL, }; static struct clk pioB_clk = { .name = "pioB_clk", .pmc_mask = 1 << AT91SAM9260_ID_PIOB, .type = CLK_TYPE_PERIPHERAL, }; static struct clk pioC_clk = { .name = "pioC_clk", .pmc_mask = 1 << AT91SAM9260_ID_PIOC, .type = CLK_TYPE_PERIPHERAL, }; static struct clk adc_clk = { .name = "adc_clk", .pmc_mask = 1 << AT91SAM9260_ID_ADC, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart0_clk = { .name = "usart0_clk", .pmc_mask = 1 << AT91SAM9260_ID_US0, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart1_clk = { .name = "usart1_clk", .pmc_mask = 1 << AT91SAM9260_ID_US1, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart2_clk = { .name = "usart2_clk", .pmc_mask = 1 << AT91SAM9260_ID_US2, .type = CLK_TYPE_PERIPHERAL, }; static struct clk mmc_clk = { .name = "mci_clk", .pmc_mask = 1 << AT91SAM9260_ID_MCI, .type = CLK_TYPE_PERIPHERAL, }; static struct clk udc_clk = { .name = "udc_clk", .pmc_mask = 1 << AT91SAM9260_ID_UDP, .type = CLK_TYPE_PERIPHERAL, }; static struct clk twi_clk = { .name = "twi_clk", .pmc_mask = 1 << AT91SAM9260_ID_TWI, .type = CLK_TYPE_PERIPHERAL, }; static struct clk spi0_clk = { .name = "spi0_clk", .pmc_mask = 1 << AT91SAM9260_ID_SPI0, .type = CLK_TYPE_PERIPHERAL, }; static struct clk spi1_clk = { .name = "spi1_clk", .pmc_mask = 1 << AT91SAM9260_ID_SPI1, .type = CLK_TYPE_PERIPHERAL, }; static struct clk ssc_clk = { .name = "ssc_clk", .pmc_mask = 1 << AT91SAM9260_ID_SSC, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc0_clk = { .name = "tc0_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC0, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc1_clk = { .name = "tc1_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC1, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc2_clk = { .name = "tc2_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC2, .type = CLK_TYPE_PERIPHERAL, }; static struct clk ohci_clk = { .name = "ohci_clk", .pmc_mask = 1 << AT91SAM9260_ID_UHP, .type = CLK_TYPE_PERIPHERAL, }; static struct clk macb_clk = { .name = "pclk", .pmc_mask = 1 << AT91SAM9260_ID_EMAC, .type = CLK_TYPE_PERIPHERAL, }; static struct clk isi_clk = { .name = "isi_clk", .pmc_mask = 1 << AT91SAM9260_ID_ISI, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart3_clk = { .name = "usart3_clk", .pmc_mask = 1 << AT91SAM9260_ID_US3, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart4_clk = { .name = "usart4_clk", .pmc_mask = 1 << AT91SAM9260_ID_US4, .type = CLK_TYPE_PERIPHERAL, }; static struct clk usart5_clk = { .name = "usart5_clk", .pmc_mask = 1 << AT91SAM9260_ID_US5, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc3_clk = { .name = "tc3_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC3, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc4_clk = { .name = "tc4_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC4, .type = CLK_TYPE_PERIPHERAL, }; static struct clk tc5_clk = { .name = "tc5_clk", .pmc_mask = 1 << AT91SAM9260_ID_TC5, .type = CLK_TYPE_PERIPHERAL, }; static struct clk *periph_clocks[] __initdata = { &pioA_clk, &pioB_clk, &pioC_clk, &adc_clk, &usart0_clk, &usart1_clk, &usart2_clk, &mmc_clk, &udc_clk, &twi_clk, &spi0_clk, &spi1_clk, &ssc_clk, &tc0_clk, &tc1_clk, &tc2_clk, &ohci_clk, &macb_clk, &isi_clk, &usart3_clk, &usart4_clk, &usart5_clk, &tc3_clk, &tc4_clk, &tc5_clk, // irq0 .. irq2 }; static struct clk_lookup periph_clocks_lookups[] = { /* One additional fake clock for macb_hclk */ CLKDEV_CON_ID("hclk", &macb_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk), CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk), CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk), CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk), CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk), /* more usart lookup table for DT entries */ CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck), CLKDEV_CON_DEV_ID("usart", "fffb0000.serial", &usart0_clk), CLKDEV_CON_DEV_ID("usart", "fffb4000.serial", &usart1_clk), CLKDEV_CON_DEV_ID("usart", "fffb8000.serial", &usart2_clk), CLKDEV_CON_DEV_ID("usart", "fffd0000.serial", &usart3_clk), CLKDEV_CON_DEV_ID("usart", "fffd4000.serial", &usart4_clk), CLKDEV_CON_DEV_ID("usart", "fffd8000.serial", &usart5_clk), /* more tc lookup table for DT entries */ CLKDEV_CON_DEV_ID("t0_clk", "fffa0000.timer", &tc0_clk), CLKDEV_CON_DEV_ID("t1_clk", "fffa0000.timer", &tc1_clk), CLKDEV_CON_DEV_ID("t2_clk", "fffa0000.timer", &tc2_clk), CLKDEV_CON_DEV_ID("t0_clk", "fffdc000.timer", &tc3_clk), CLKDEV_CON_DEV_ID("t1_clk", "fffdc000.timer", &tc4_clk), CLKDEV_CON_DEV_ID("t2_clk", "fffdc000.timer", &tc5_clk), CLKDEV_CON_DEV_ID("hclk", "500000.ohci", &ohci_clk), /* fake hclk clock */ CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk), CLKDEV_CON_ID("pioA", &pioA_clk), CLKDEV_CON_ID("pioB", &pioB_clk), CLKDEV_CON_ID("pioC", &pioC_clk), }; static struct clk_lookup usart_clocks_lookups[] = { CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck), CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk), CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk), CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk), CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk), CLKDEV_CON_DEV_ID("usart", "atmel_usart.5", &usart4_clk), CLKDEV_CON_DEV_ID("usart", "atmel_usart.6", &usart5_clk), }; /* * The two programmable clocks. * You must configure pin multiplexing to bring these signals out. */ static struct clk pck0 = { .name = "pck0", .pmc_mask = AT91_PMC_PCK0, .type = CLK_TYPE_PROGRAMMABLE, .id = 0, }; static struct clk pck1 = { .name = "pck1", .pmc_mask = AT91_PMC_PCK1, .type = CLK_TYPE_PROGRAMMABLE, .id = 1, }; static void __init at91sam9260_register_clocks(void) { int i; for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) clk_register(periph_clocks[i]); clkdev_add_table(periph_clocks_lookups, ARRAY_SIZE(periph_clocks_lookups)); clkdev_add_table(usart_clocks_lookups, ARRAY_SIZE(usart_clocks_lookups)); clk_register(&pck0); clk_register(&pck1); } static struct clk_lookup console_clock_lookup; void __init at91sam9260_set_console_clock(int id) { if (id >= ARRAY_SIZE(usart_clocks_lookups)) return; console_clock_lookup.con_id = "usart"; console_clock_lookup.clk = usart_clocks_lookups[id].clk; clkdev_add(&console_clock_lookup); } /* -------------------------------------------------------------------- * GPIO * -------------------------------------------------------------------- */ static struct at91_gpio_bank at91sam9260_gpio[] __initdata = { { .id = AT91SAM9260_ID_PIOA, .regbase = AT91SAM9260_BASE_PIOA, }, { .id = AT91SAM9260_ID_PIOB, .regbase = AT91SAM9260_BASE_PIOB, }, { .id = AT91SAM9260_ID_PIOC, .regbase = AT91SAM9260_BASE_PIOC, } }; /* -------------------------------------------------------------------- * AT91SAM9260 processor initialization * -------------------------------------------------------------------- */ static void __init at91sam9xe_map_io(void) { unsigned long sram_size; switch (at91_soc_initdata.cidr & AT91_CIDR_SRAMSIZ) { case AT91_CIDR_SRAMSIZ_32K: sram_size = 2 * SZ_16K; break; case AT91_CIDR_SRAMSIZ_16K: default: sram_size = SZ_16K; } at91_init_sram(0, AT91SAM9XE_SRAM_BASE, sram_size); } static void __init at91sam9260_map_io(void) { if (cpu_is_at91sam9xe()) at91sam9xe_map_io(); else if (cpu_is_at91sam9g20()) at91_init_sram(0, AT91SAM9G20_SRAM_BASE, AT91SAM9G20_SRAM_SIZE); else at91_init_sram(0, AT91SAM9260_SRAM_BASE, AT91SAM9260_SRAM_SIZE); } static void __init at91sam9260_ioremap_registers(void) { at91_ioremap_shdwc(AT91SAM9260_BASE_SHDWC); at91_ioremap_rstc(AT91SAM9260_BASE_RSTC); at91_ioremap_ramc(0, AT91SAM9260_BASE_SDRAMC, 512); at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT); at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC); at91_ioremap_matrix(AT91SAM9260_BASE_MATRIX); } static void __init at91sam9260_initialize(void) { arm_pm_idle = at91sam9_idle; arm_pm_restart = at91sam9_alt_restart; at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1) | (1 << AT91SAM9260_ID_IRQ2); /* Register GPIO subsystem */ at91_gpio_init(at91sam9260_gpio, 3); } /* -------------------------------------------------------------------- * Interrupt initialization * -------------------------------------------------------------------- */ /* * The default interrupt priority levels (0 = lowest, 7 = highest). */ static unsigned int at91sam9260_default_irq_priority[NR_AIC_IRQS] __initdata = { 7, /* Advanced Interrupt Controller */ 7, /* System Peripherals */ 1, /* Parallel IO Controller A */ 1, /* Parallel IO Controller B */ 1, /* Parallel IO Controller C */ 0, /* Analog-to-Digital Converter */ 5, /* USART 0 */ 5, /* USART 1 */ 5, /* USART 2 */ 0, /* Multimedia Card Interface */ 2, /* USB Device Port */ 6, /* Two-Wire Interface */ 5, /* Serial Peripheral Interface 0 */ 5, /* Serial Peripheral Interface 1 */ 5, /* Serial Synchronous Controller */ 0, 0, 0, /* Timer Counter 0 */ 0, /* Timer Counter 1 */ 0, /* Timer Counter 2 */ 2, /* USB Host port */ 3, /* Ethernet */ 0, /* Image Sensor Interface */ 5, /* USART 3 */ 5, /* USART 4 */ 5, /* USART 5 */ 0, /* Timer Counter 3 */ 0, /* Timer Counter 4 */ 0, /* Timer Counter 5 */ 0, /* Advanced Interrupt Controller */ 0, /* Advanced Interrupt Controller */ 0, /* Advanced Interrupt Controller */ }; struct at91_init_soc __initdata at91sam9260_soc = { .map_io = at91sam9260_map_io, .default_irq_priority = at91sam9260_default_irq_priority, .ioremap_registers = at91sam9260_ioremap_registers, .register_clocks = at91sam9260_register_clocks, .init = at91sam9260_initialize, };
gpl-2.0
CAF-victara/kernel_msm
arch/arm/mach-at91/board-neocore926.c
4730
9025
/* * linux/arch/arm/mach-at91/board-neocore926.c * * Copyright (C) 2005 SAN People * Copyright (C) 2007 Atmel Corporation * Copyright (C) 2008 ADENEO. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/fb.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <video/atmel_lcdc.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/at91sam9_smc.h> #include "sam9_smc.h" #include "generic.h" static void __init neocore926_init_early(void) { /* Initialize processor: 20 MHz crystal */ at91_initialize(20000000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9263_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } /* * USB Host port */ static struct at91_usbh_data __initdata neocore926_usbh_data = { .ports = 2, .vbus_pin = { AT91_PIN_PA24, AT91_PIN_PA21 }, .overcurrent_pin= {-EINVAL, -EINVAL}, }; /* * USB Device port */ static struct at91_udc_data __initdata neocore926_udc_data = { .vbus_pin = AT91_PIN_PA25, .pullup_pin = -EINVAL, /* pull-up driven by UDC */ }; /* * ADS7846 Touchscreen */ #if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) static int ads7843_pendown_state(void) { return !at91_get_gpio_value(AT91_PIN_PA15); /* Touchscreen PENIRQ */ } static struct ads7846_platform_data ads_info = { .model = 7843, .x_min = 150, .x_max = 3830, .y_min = 190, .y_max = 3830, .vref_delay_usecs = 100, .x_plate_ohms = 450, .y_plate_ohms = 250, .pressure_max = 15000, .debounce_max = 1, .debounce_rep = 0, .debounce_tol = (~0), .get_pendown_state = ads7843_pendown_state, }; static void __init neocore926_add_device_ts(void) { at91_set_B_periph(AT91_PIN_PA15, 1); /* External IRQ1, with pullup */ at91_set_gpio_input(AT91_PIN_PC13, 1); /* Touchscreen BUSY signal */ } #else static void __init neocore926_add_device_ts(void) {} #endif /* * SPI devices. */ static struct spi_board_info neocore926_spi_devices[] = { #if defined(CONFIG_MTD_AT91_DATAFLASH_CARD) { /* DataFlash card */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #endif #if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) { .modalias = "ads7846", .chip_select = 1, .max_speed_hz = 125000 * 16, .bus_num = 0, .platform_data = &ads_info, .irq = AT91SAM9263_ID_IRQ1, }, #endif }; /* * MCI (SD/MMC) */ static struct at91_mmc_data __initdata neocore926_mmc_data = { .wire4 = 1, .det_pin = AT91_PIN_PE18, .wp_pin = AT91_PIN_PE19, .vcc_pin = -EINVAL, }; /* * MACB Ethernet device */ static struct macb_platform_data __initdata neocore926_macb_data = { .phy_irq_pin = AT91_PIN_PE31, .is_rmii = 1, }; /* * NAND flash */ static struct mtd_partition __initdata neocore926_nand_partition[] = { { .name = "Linux Kernel", /* "Partition 1", */ .offset = 0, .size = SZ_8M, }, { .name = "Filesystem", /* "Partition 2", */ .offset = MTDPART_OFS_NXTBLK, .size = SZ_32M, }, { .name = "Free", /* "Partition 3", */ .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct atmel_nand_data __initdata neocore926_nand_data = { .ale = 21, .cle = 22, .rdy_pin = AT91_PIN_PB19, .rdy_pin_active_low = 1, .enable_pin = AT91_PIN_PD15, .ecc_mode = NAND_ECC_SOFT, .parts = neocore926_nand_partition, .num_parts = ARRAY_SIZE(neocore926_nand_partition), .det_pin = -EINVAL, }; static struct sam9_smc_config __initdata neocore926_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 4, .nrd_pulse = 4, .ncs_write_pulse = 4, .nwe_pulse = 4, .read_cycle = 6, .write_cycle = 6, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, .tdf_cycles = 2, }; static void __init neocore926_add_device_nand(void) { /* configure chip-select 3 (NAND) */ sam9_smc_configure(0, 3, &neocore926_nand_smc_config); at91_add_device_nand(&neocore926_nand_data); } /* * LCD Controller */ #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static struct fb_videomode at91_tft_vga_modes[] = { { .name = "TX09D50VM1CCA @ 60", .refresh = 60, .xres = 240, .yres = 320, .pixclock = KHZ2PICOS(5000), .left_margin = 1, .right_margin = 33, .upper_margin = 1, .lower_margin = 0, .hsync_len = 5, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs at91fb_default_monspecs = { .manufacturer = "HIT", .monitor = "TX09D70VM1CCA", .modedb = at91_tft_vga_modes, .modedb_len = ARRAY_SIZE(at91_tft_vga_modes), .hfmin = 15000, .hfmax = 64000, .vfmin = 50, .vfmax = 150, }; #define AT91SAM9263_DEFAULT_LCDCON2 (ATMEL_LCDC_MEMOR_LITTLE \ | ATMEL_LCDC_DISTYPE_TFT \ | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE) static void at91_lcdc_power_control(int on) { at91_set_gpio_value(AT91_PIN_PA30, on); } /* Driver datas */ static struct atmel_lcdfb_info __initdata neocore926_lcdc_data = { .lcdcon_is_backlight = true, .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN, .default_lcdcon2 = AT91SAM9263_DEFAULT_LCDCON2, .default_monspecs = &at91fb_default_monspecs, .atmel_lcdfb_power_control = at91_lcdc_power_control, .guard_time = 1, .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB555, }; #else static struct atmel_lcdfb_info __initdata neocore926_lcdc_data; #endif /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button neocore926_buttons[] = { { /* BP1, "leftclic" */ .code = BTN_LEFT, .gpio = AT91_PIN_PC5, .active_low = 1, .desc = "left_click", .wakeup = 1, }, { /* BP2, "rightclic" */ .code = BTN_RIGHT, .gpio = AT91_PIN_PC4, .active_low = 1, .desc = "right_click", .wakeup = 1, }, }; static struct gpio_keys_platform_data neocore926_button_data = { .buttons = neocore926_buttons, .nbuttons = ARRAY_SIZE(neocore926_buttons), }; static struct platform_device neocore926_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &neocore926_button_data, } }; static void __init neocore926_add_device_buttons(void) { at91_set_GPIO_periph(AT91_PIN_PC5, 0); /* left button */ at91_set_deglitch(AT91_PIN_PC5, 1); at91_set_GPIO_periph(AT91_PIN_PC4, 0); /* right button */ at91_set_deglitch(AT91_PIN_PC4, 1); platform_device_register(&neocore926_button_device); } #else static void __init neocore926_add_device_buttons(void) {} #endif /* * AC97 */ static struct ac97c_platform_data neocore926_ac97_data = { .reset_pin = AT91_PIN_PA13, }; static void __init neocore926_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&neocore926_usbh_data); /* USB Device */ at91_add_device_udc(&neocore926_udc_data); /* SPI */ at91_set_gpio_output(AT91_PIN_PE20, 1); /* select spi0 clock */ at91_add_device_spi(neocore926_spi_devices, ARRAY_SIZE(neocore926_spi_devices)); /* Touchscreen */ neocore926_add_device_ts(); /* MMC */ at91_add_device_mmc(1, &neocore926_mmc_data); /* Ethernet */ at91_add_device_eth(&neocore926_macb_data); /* NAND */ neocore926_add_device_nand(); /* I2C */ at91_add_device_i2c(NULL, 0); /* LCD Controller */ at91_add_device_lcdc(&neocore926_lcdc_data); /* Push Buttons */ neocore926_add_device_buttons(); /* AC97 */ at91_add_device_ac97(&neocore926_ac97_data); } MACHINE_START(NEOCORE926, "ADENEO NEOCORE 926") /* Maintainer: ADENEO */ .timer = &at91sam926x_timer, .map_io = at91_map_io, .init_early = neocore926_init_early, .init_irq = at91_init_irq_default, .init_machine = neocore926_board_init, MACHINE_END
gpl-2.0
RepoBackups/android_kernel_caf_msm8960
arch/arm/mach-shmobile/board-bonito.c
4730
11439
/* * bonito board support * * Copyright (C) 2011 Renesas Solutions Corp. * Copyright (C) 2011 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/smsc911x.h> #include <linux/videodev2.h> #include <mach/common.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <asm/hardware/cache-l2x0.h> #include <mach/r8a7740.h> #include <mach/irqs.h> #include <video/sh_mobile_lcdc.h> /* * CS Address device note *---------------------------------------------------------------- * 0 0x0000_0000 NOR Flash (64MB) SW12 : bit3 = OFF * 2 0x0800_0000 ExtNOR (64MB) SW12 : bit3 = OFF * 4 - * 5A - * 5B 0x1600_0000 SRAM (8MB) * 6 0x1800_0000 FPGA (64K) * 0x1801_0000 Ether (4KB) * 0x1801_1000 USB (4KB) */ /* * SW12 * * bit1 bit2 bit3 *---------------------------------------------------------------------------- * ON NOR WriteProtect NAND WriteProtect CS0 ExtNOR / CS2 NOR * OFF NOR Not WriteProtect NAND Not WriteProtect CS0 NOR / CS2 ExtNOR */ /* * SCIFA5 (CN42) * * S38.3 = ON * S39.6 = ON * S43.1 = ON */ /* * LCDC0 (CN3/CN4/CN7) * * S38.1 = OFF * S38.2 = OFF */ /* * FPGA */ #define IRQSR0 0x0020 #define IRQSR1 0x0022 #define IRQMR0 0x0030 #define IRQMR1 0x0032 #define BUSSWMR1 0x0070 #define BUSSWMR2 0x0072 #define BUSSWMR3 0x0074 #define BUSSWMR4 0x0076 #define LCDCR 0x10B4 #define DEVRSTCR1 0x10D0 #define DEVRSTCR2 0x10D2 #define A1MDSR 0x10E0 #define BVERR 0x1100 /* FPGA IRQ */ #define FPGA_IRQ_BASE (512) #define FPGA_IRQ0 (FPGA_IRQ_BASE) #define FPGA_IRQ1 (FPGA_IRQ_BASE + 16) #define FPGA_ETH_IRQ (FPGA_IRQ0 + 15) static u16 bonito_fpga_read(u32 offset) { return __raw_readw(0xf0003000 + offset); } static void bonito_fpga_write(u32 offset, u16 val) { __raw_writew(val, 0xf0003000 + offset); } static void bonito_fpga_irq_disable(struct irq_data *data) { unsigned int irq = data->irq; u32 addr = (irq < 1016) ? IRQMR0 : IRQMR1; int shift = irq % 16; bonito_fpga_write(addr, bonito_fpga_read(addr) | (1 << shift)); } static void bonito_fpga_irq_enable(struct irq_data *data) { unsigned int irq = data->irq; u32 addr = (irq < 1016) ? IRQMR0 : IRQMR1; int shift = irq % 16; bonito_fpga_write(addr, bonito_fpga_read(addr) & ~(1 << shift)); } static struct irq_chip bonito_fpga_irq_chip __read_mostly = { .name = "bonito FPGA", .irq_mask = bonito_fpga_irq_disable, .irq_unmask = bonito_fpga_irq_enable, }; static void bonito_fpga_irq_demux(unsigned int irq, struct irq_desc *desc) { u32 val = bonito_fpga_read(IRQSR1) << 16 | bonito_fpga_read(IRQSR0); u32 mask = bonito_fpga_read(IRQMR1) << 16 | bonito_fpga_read(IRQMR0); int i; val &= ~mask; for (i = 0; i < 32; i++) { if (!(val & (1 << i))) continue; generic_handle_irq(FPGA_IRQ_BASE + i); } } static void bonito_fpga_init(void) { int i; bonito_fpga_write(IRQMR0, 0xffff); /* mask all */ bonito_fpga_write(IRQMR1, 0xffff); /* mask all */ /* Device reset */ bonito_fpga_write(DEVRSTCR1, (1 << 2)); /* Eth */ /* FPGA irq require special handling */ for (i = FPGA_IRQ_BASE; i < FPGA_IRQ_BASE + 32; i++) { irq_set_chip_and_handler_name(i, &bonito_fpga_irq_chip, handle_level_irq, "level"); set_irq_flags(i, IRQF_VALID); /* yuck */ } irq_set_chained_handler(evt2irq(0x0340), bonito_fpga_irq_demux); irq_set_irq_type(evt2irq(0x0340), IRQ_TYPE_LEVEL_LOW); } /* * PMIC settings * * FIXME * * bonito board needs some settings by pmic which use i2c access. * pmic settings use device_initcall() here for use it. */ static __u8 *pmic_settings = NULL; static __u8 pmic_do_2A[] = { 0x1C, 0x09, 0x1A, 0x80, 0xff, 0xff, }; static int __init pmic_init(void) { struct i2c_adapter *a = i2c_get_adapter(0); struct i2c_msg msg; __u8 buf[2]; int i, ret; if (!pmic_settings) return 0; if (!a) return 0; msg.addr = 0x46; msg.buf = buf; msg.len = 2; msg.flags = 0; for (i = 0; ; i += 2) { buf[0] = pmic_settings[i + 0]; buf[1] = pmic_settings[i + 1]; if ((0xff == buf[0]) && (0xff == buf[1])) break; ret = i2c_transfer(a, &msg, 1); if (ret < 0) { pr_err("i2c transfer fail\n"); break; } } return 0; } device_initcall(pmic_init); /* * LCDC0 */ static const struct fb_videomode lcdc0_mode = { .name = "WVGA Panel", .xres = 800, .yres = 480, .left_margin = 88, .right_margin = 40, .hsync_len = 128, .upper_margin = 20, .lower_margin = 5, .vsync_len = 5, .sync = 0, }; static struct sh_mobile_lcdc_info lcdc0_info = { .clock_source = LCDC_CLK_BUS, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .interface_type = RGB24, .clock_divider = 5, .flags = 0, .lcd_modes = &lcdc0_mode, .num_modes = 1, .panel_cfg = { .width = 152, .height = 91, }, }, }; static struct resource lcdc0_resources[] = { [0] = { .name = "LCDC0", .start = 0xfe940000, .end = 0xfe943fff, .flags = IORESOURCE_MEM, }, [1] = { .start = intcs_evt2irq(0x0580), .flags = IORESOURCE_IRQ, }, }; static struct platform_device lcdc0_device = { .name = "sh_mobile_lcdc_fb", .id = 0, .resource = lcdc0_resources, .num_resources = ARRAY_SIZE(lcdc0_resources), .dev = { .platform_data = &lcdc0_info, .coherent_dma_mask = ~0, }, }; /* * SMSC 9221 */ static struct resource smsc_resources[] = { [0] = { .start = 0x18010000, .end = 0x18011000 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = FPGA_ETH_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct smsc911x_platform_config smsc_platdata = { .flags = SMSC911X_USE_16BIT, .phy_interface = PHY_INTERFACE_MODE_MII, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, }; static struct platform_device smsc_device = { .name = "smsc911x", .dev = { .platform_data = &smsc_platdata, }, .resource = smsc_resources, .num_resources = ARRAY_SIZE(smsc_resources), }; /* * core board devices */ static struct platform_device *bonito_core_devices[] __initdata = { }; /* * base board devices */ static struct platform_device *bonito_base_devices[] __initdata = { &lcdc0_device, &smsc_device, }; /* * map I/O */ static struct map_desc bonito_io_desc[] __initdata = { /* * for FPGA (0x1800000-0x19ffffff) * 0x18000000-0x18002000 -> 0xf0003000-0xf0005000 */ { .virtual = 0xf0003000, .pfn = __phys_to_pfn(0x18000000), .length = PAGE_SIZE * 2, .type = MT_DEVICE_NONSHARED } }; static void __init bonito_map_io(void) { r8a7740_map_io(); iotable_init(bonito_io_desc, ARRAY_SIZE(bonito_io_desc)); } /* * board init */ #define BIT_ON(sw, bit) (sw & (1 << bit)) #define BIT_OFF(sw, bit) (!(sw & (1 << bit))) #define VCCQ1CR 0xE6058140 #define VCCQ1LCDCR 0xE6058186 static void __init bonito_init(void) { u16 val; r8a7740_pinmux_init(); bonito_fpga_init(); pmic_settings = pmic_do_2A; /* * core board settings */ #ifdef CONFIG_CACHE_L2X0 /* Early BRESP enable, Shared attribute override enable, 32K*8way */ l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff); #endif r8a7740_add_standard_devices(); platform_add_devices(bonito_core_devices, ARRAY_SIZE(bonito_core_devices)); /* * base board settings */ gpio_request(GPIO_PORT176, NULL); gpio_direction_input(GPIO_PORT176); if (!gpio_get_value(GPIO_PORT176)) { u16 bsw2; u16 bsw3; u16 bsw4; /* * FPGA */ gpio_request(GPIO_FN_CS5B, NULL); gpio_request(GPIO_FN_CS6A, NULL); gpio_request(GPIO_FN_CS5A_PORT105, NULL); gpio_request(GPIO_FN_IRQ10, NULL); val = bonito_fpga_read(BVERR); pr_info("bonito version: cpu %02x, base %02x\n", ((val >> 8) & 0xFF), ((val >> 0) & 0xFF)); bsw2 = bonito_fpga_read(BUSSWMR2); bsw3 = bonito_fpga_read(BUSSWMR3); bsw4 = bonito_fpga_read(BUSSWMR4); /* * SCIFA5 (CN42) */ if (BIT_OFF(bsw2, 1) && /* S38.3 = ON */ BIT_OFF(bsw3, 9) && /* S39.6 = ON */ BIT_OFF(bsw4, 4)) { /* S43.1 = ON */ gpio_request(GPIO_FN_SCIFA5_TXD_PORT91, NULL); gpio_request(GPIO_FN_SCIFA5_RXD_PORT92, NULL); } /* * LCDC0 (CN3) */ if (BIT_ON(bsw2, 3) && /* S38.1 = OFF */ BIT_ON(bsw2, 2)) { /* S38.2 = OFF */ gpio_request(GPIO_FN_LCDC0_SELECT, NULL); gpio_request(GPIO_FN_LCD0_D0, NULL); gpio_request(GPIO_FN_LCD0_D1, NULL); gpio_request(GPIO_FN_LCD0_D2, NULL); gpio_request(GPIO_FN_LCD0_D3, NULL); gpio_request(GPIO_FN_LCD0_D4, NULL); gpio_request(GPIO_FN_LCD0_D5, NULL); gpio_request(GPIO_FN_LCD0_D6, NULL); gpio_request(GPIO_FN_LCD0_D7, NULL); gpio_request(GPIO_FN_LCD0_D8, NULL); gpio_request(GPIO_FN_LCD0_D9, NULL); gpio_request(GPIO_FN_LCD0_D10, NULL); gpio_request(GPIO_FN_LCD0_D11, NULL); gpio_request(GPIO_FN_LCD0_D12, NULL); gpio_request(GPIO_FN_LCD0_D13, NULL); gpio_request(GPIO_FN_LCD0_D14, NULL); gpio_request(GPIO_FN_LCD0_D15, NULL); gpio_request(GPIO_FN_LCD0_D16, NULL); gpio_request(GPIO_FN_LCD0_D17, NULL); gpio_request(GPIO_FN_LCD0_D18_PORT163, NULL); gpio_request(GPIO_FN_LCD0_D19_PORT162, NULL); gpio_request(GPIO_FN_LCD0_D20_PORT161, NULL); gpio_request(GPIO_FN_LCD0_D21_PORT158, NULL); gpio_request(GPIO_FN_LCD0_D22_PORT160, NULL); gpio_request(GPIO_FN_LCD0_D23_PORT159, NULL); gpio_request(GPIO_FN_LCD0_DCK, NULL); gpio_request(GPIO_FN_LCD0_VSYN, NULL); gpio_request(GPIO_FN_LCD0_HSYN, NULL); gpio_request(GPIO_FN_LCD0_DISP, NULL); gpio_request(GPIO_FN_LCD0_LCLK_PORT165, NULL); gpio_request(GPIO_PORT61, NULL); /* LCDDON */ gpio_direction_output(GPIO_PORT61, 1); /* backlight on */ bonito_fpga_write(LCDCR, 1); /* drivability Max */ __raw_writew(0x00FF , VCCQ1LCDCR); __raw_writew(0xFFFF , VCCQ1CR); } platform_add_devices(bonito_base_devices, ARRAY_SIZE(bonito_base_devices)); } } static void __init bonito_earlytimer_init(void) { u16 val; u8 md_ck = 0; /* read MD_CK value */ val = bonito_fpga_read(A1MDSR); if (val & (1 << 10)) md_ck |= MD_CK2; if (val & (1 << 9)) md_ck |= MD_CK1; if (val & (1 << 8)) md_ck |= MD_CK0; r8a7740_clock_init(md_ck); shmobile_earlytimer_init(); } void __init bonito_add_early_devices(void) { r8a7740_add_early_devices(); /* override timer setup with board-specific code */ shmobile_timer.init = bonito_earlytimer_init; } MACHINE_START(BONITO, "bonito") .map_io = bonito_map_io, .init_early = bonito_add_early_devices, .init_irq = r8a7740_init_irq, .handle_irq = shmobile_handle_irq_intc, .init_machine = bonito_init, .timer = &shmobile_timer, MACHINE_END
gpl-2.0
IdeosDev/semc_urushi_kernel_3.0
drivers/net/ixp2000/ixp2400-msf.c
9850
5416
/* * Generic library functions for the MSF (Media and Switch Fabric) unit * found on the Intel IXP2400 network processor. * * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> * Dedicated to Marija Kulikova. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of the * License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <mach/hardware.h> #include <mach/ixp2000-regs.h> #include <asm/delay.h> #include <asm/io.h> #include "ixp2400-msf.h" /* * This is the Intel recommended PLL init procedure as described on * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual. */ static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp) { int rx_dual_clock; int tx_dual_clock; u32 value; /* * If the RX mode is not 1x32, we have to enable both RX PLLs * (#0 and #1.) The same thing for the TX direction. */ rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK); tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK); /* * Read initial value. */ value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL); /* * Put PLLs in powerdown and bypass mode. */ value |= 0x0000f0f0; ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); /* * Set single or dual clock mode bits. */ value &= ~0x03000000; value |= (rx_dual_clock << 24) | (tx_dual_clock << 25); /* * Set multipliers. */ value &= ~0x00ff0000; value |= mp->rxclk01_multiplier << 16; value |= mp->rxclk23_multiplier << 18; value |= mp->txclk01_multiplier << 20; value |= mp->txclk23_multiplier << 22; /* * And write value. */ ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); /* * Disable PLL bypass mode. */ value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15); ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); /* * Turn on PLLs. */ value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7); ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value); /* * Wait for PLLs to lock. There are lock status bits, but IXP2400 * erratum #65 says that these lock bits should not be relied upon * as they might not accurately reflect the true state of the PLLs. */ udelay(100); } /* * Needed according to p480 of Programmer's Reference Manual. */ static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp) { int size_bits; int i; /* * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer * corruption) in the Intel-recommended way: do not add the RBUF * elements susceptible to corruption to the freelist. */ size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK; if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) { for (i = 1; i < 128; i++) { if (i == 9 || i == 18 || i == 27) continue; ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i); } } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) { for (i = 1; i < 64; i++) { if (i == 4 || i == 9 || i == 13) continue; ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i); } } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) { for (i = 1; i < 32; i++) { if (i == 2 || i == 4 || i == 6) continue; ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i); } } } static u32 ixp2400_msf_valid_channels(u32 reg) { u32 channels; channels = 0; switch (reg & IXP2400_RX_MODE_WIDTH_MASK) { case IXP2400_RX_MODE_1x32: channels = 0x1; if (reg & IXP2400_RX_MODE_MPHY && !(reg & IXP2400_RX_MODE_MPHY_32)) channels = 0xf; break; case IXP2400_RX_MODE_2x16: channels = 0x5; break; case IXP2400_RX_MODE_4x8: channels = 0xf; break; case IXP2400_RX_MODE_1x16_2x8: channels = 0xd; break; } return channels; } static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp) { u32 value; value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff; value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28; ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value); } static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp) { u32 value; value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff; value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28; ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value); } void ixp2400_msf_init(struct ixp2400_msf_parameters *mp) { u32 value; int i; /* * Init the RX/TX PLLs based on the passed parameter block. */ ixp2400_pll_init(mp); /* * Reset MSF. Bit 7 in IXP_RESET_0 resets the MSF. */ value = ixp2000_reg_read(IXP2000_RESET0); ixp2000_reg_write(IXP2000_RESET0, value | 0x80); ixp2000_reg_write(IXP2000_RESET0, value & ~0x80); /* * Initialise the RX section. */ ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1); ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode); for (i = 0; i < 4; i++) { ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i, mp->rx_channel_mode[i]); } ixp2400_msf_free_rbuf_entries(mp); ixp2400_msf_enable_rx(mp); /* * Initialise the TX section. */ ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1); ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode); for (i = 0; i < 4; i++) { ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i, mp->tx_channel_mode[i]); } ixp2400_msf_enable_tx(mp); }
gpl-2.0
Declipe/ElunaTrinityWotlk
dep/efsw/src/efsw/String.cpp
123
15106
#include <iterator> #include <efsw/String.hpp> #include <efsw/Utf.hpp> namespace efsw { const std::size_t String::InvalidPos = StringType::npos; std::vector < std::string > String::split ( const std::string& str, const char& splitchar, const bool& pushEmptyString ) { std::vector < std::string > tmp; std::string tmpstr; for ( size_t i = 0; i < str.size(); i++ ) { if ( str[i] == splitchar ) { if ( pushEmptyString || tmpstr.size() ) { tmp.push_back(tmpstr); tmpstr = ""; } } else { tmpstr += str[i]; } } if ( tmpstr.size() ) { tmp.push_back( tmpstr ); } return tmp; } std::vector < String > String::split ( const String& str, const Uint32& splitchar, const bool& pushEmptyString ) { std::vector < String > tmp; String tmpstr; for ( size_t i = 0; i < str.size(); i++ ) { if ( str[i] == splitchar ) { if ( pushEmptyString || tmpstr.size() ) { tmp.push_back(tmpstr); tmpstr = ""; } } else { tmpstr += str[i]; } } if ( tmpstr.size() ) { tmp.push_back( tmpstr ); } return tmp; } int String::strStartsWith( const std::string& start, const std::string& str ) { int pos = -1; size_t size = start.size(); if ( str.size() >= size ) { for ( std::size_t i = 0; i < size; i++ ) { if ( start[i] == str[i] ) { pos = (int)i; } else { pos = -1; break; } } } return pos; } int String::strStartsWith( const String& start, const String& str ) { int pos = -1; size_t size = start.size(); if ( str.size() >= size ) { for ( std::size_t i = 0; i < size; i++ ) { if ( start[i] == str[i] ) { pos = (int)i; } else { pos = -1; break; } } } return pos; } String::String() { } String::String(char ansiChar, const std::locale& locale) { mString += Utf32::DecodeAnsi(ansiChar, locale); } #ifndef EFSW_NO_WIDECHAR String::String(wchar_t wideChar) { mString += Utf32::DecodeWide(wideChar); } #endif String::String(StringBaseType utf32Char) { mString += utf32Char; } String::String( const char* uf8String ) { if (uf8String) { std::size_t length = strlen(uf8String); if (length > 0) { mString.reserve(length + 1); Utf8::ToUtf32(uf8String, uf8String + length, std::back_inserter(mString)); } } } String::String( const std::string& utf8String ) { mString.reserve( utf8String.length() + 1 ); Utf8::ToUtf32( utf8String.begin(), utf8String.end(), std::back_inserter( mString ) ); } String::String(const char* ansiString, const std::locale& locale) { if (ansiString) { std::size_t length = strlen(ansiString); if (length > 0) { mString.reserve(length + 1); Utf32::FromAnsi(ansiString, ansiString + length, std::back_inserter(mString), locale); } } } String::String(const std::string& ansiString, const std::locale& locale) { mString.reserve(ansiString.length() + 1); Utf32::FromAnsi(ansiString.begin(), ansiString.end(), std::back_inserter(mString), locale); } #ifndef EFSW_NO_WIDECHAR String::String(const wchar_t* wideString) { if (wideString) { std::size_t length = std::wcslen(wideString); if (length > 0) { mString.reserve(length + 1); Utf32::FromWide(wideString, wideString + length, std::back_inserter(mString)); } } } String::String(const std::wstring& wideString) { mString.reserve(wideString.length() + 1); Utf32::FromWide(wideString.begin(), wideString.end(), std::back_inserter(mString)); } #endif String::String(const StringBaseType* utf32String) { if (utf32String) mString = utf32String; } String::String(const StringType& utf32String) : mString(utf32String) { } String::String(const String& str) : mString(str.mString) { } String String::fromUtf8( const std::string& utf8String ) { String::StringType utf32; utf32.reserve( utf8String.length() + 1 ); Utf8::ToUtf32( utf8String.begin(), utf8String.end(), std::back_inserter( utf32 ) ); return String( utf32 ); } String::operator std::string() const { return toAnsiString(); } std::string String::toAnsiString(const std::locale& locale) const { // Prepare the output string std::string output; output.reserve(mString.length() + 1); // Convert Utf32::ToAnsi(mString.begin(), mString.end(), std::back_inserter(output), 0, locale); return output; } #ifndef EFSW_NO_WIDECHAR std::wstring String::toWideString() const { // Prepare the output string std::wstring output; output.reserve(mString.length() + 1); // Convert Utf32::ToWide(mString.begin(), mString.end(), std::back_inserter(output), 0); return output; } #endif std::string String::toUtf8() const { // Prepare the output string std::string output; output.reserve(mString.length() + 1); // Convert Utf32::toUtf8(mString.begin(), mString.end(), std::back_inserter(output) ); return output; } String& String::operator =(const String& right) { mString = right.mString; return *this; } String& String::operator =( const StringBaseType& right ) { mString = right; return *this; } String& String::operator +=(const String& right) { mString += right.mString; return *this; } String& String::operator +=( const StringBaseType& right ) { mString += right; return *this; } String::StringBaseType String::operator [](std::size_t index) const { return mString[index]; } String::StringBaseType& String::operator [](std::size_t index) { return mString[index]; } String::StringBaseType String::at( std::size_t index ) const { return mString.at( index ); } void String::push_back( StringBaseType c ) { mString.push_back( c ); } void String::swap ( String& str ) { mString.swap( str.mString ); } void String::clear() { mString.clear(); } std::size_t String::size() const { return mString.size(); } std::size_t String::length() const { return mString.length(); } bool String::empty() const { return mString.empty(); } void String::erase(std::size_t position, std::size_t count) { mString.erase(position, count); } String& String::insert(std::size_t position, const String& str) { mString.insert(position, str.mString); return *this; } String& String::insert( std::size_t pos1, const String& str, std::size_t pos2, std::size_t n ) { mString.insert( pos1, str.mString, pos2, n ); return *this; } String& String::insert ( size_t pos1, const char* s, size_t n ) { String tmp( s ); mString.insert( pos1, tmp.data(), n ); return *this; } String& String::insert ( size_t pos1, size_t n, char c ) { mString.insert( pos1, n, c ); return *this; } String& String::insert ( size_t pos1, const char* s ) { String tmp( s ); mString.insert( pos1, tmp.data() ); return *this; } String::Iterator String::insert ( Iterator p, char c ) { return mString.insert( p, c ); } void String::insert ( Iterator p, size_t n, char c ) { mString.insert( p, n, c ); } const String::StringBaseType* String::c_str() const { return mString.c_str(); } const String::StringBaseType* String::data() const { return mString.data(); } String::Iterator String::begin() { return mString.begin(); } String::ConstIterator String::begin() const { return mString.begin(); } String::Iterator String::end() { return mString.end(); } String::ConstIterator String::end() const { return mString.end(); } String::ReverseIterator String::rbegin() { return mString.rbegin(); } String::ConstReverseIterator String::rbegin() const { return mString.rbegin(); } String::ReverseIterator String::rend() { return mString.rend(); } String::ConstReverseIterator String::rend() const { return mString.rend(); } void String::resize( std::size_t n, StringBaseType c ) { mString.resize( n, c ); } void String::resize( std::size_t n ) { mString.resize( n ); } std::size_t String::max_size() const { return mString.max_size(); } void String::reserve( size_t res_arg ) { mString.reserve( res_arg ); } std::size_t String::capacity() const { return mString.capacity(); } String& String::assign ( const String& str ) { mString.assign( str.mString ); return *this; } String& String::assign ( const String& str, size_t pos, size_t n ) { mString.assign( str.mString, pos, n ); return *this; } String& String::assign ( const char* s, size_t n ) { String tmp( s ); mString.assign( tmp.mString ); return *this; } String& String::assign ( const char* s ) { String tmp( s ); mString.assign( tmp.mString ); return *this; } String& String::assign ( size_t n, char c ) { mString.assign( n, c ); return *this; } String& String::append ( const String& str ) { mString.append( str.mString ); return *this; } String& String::append ( const String& str, size_t pos, size_t n ) { mString.append( str.mString, pos, n ); return *this; } String& String::append ( const char* s, size_t n ) { String tmp( s ); mString.append( tmp.mString ); return *this; } String& String::append ( const char* s ) { String tmp( s ); mString.append( tmp.mString ); return *this; } String& String::append ( size_t n, char c ) { mString.append( n, c ); return *this; } String& String::append ( std::size_t n, StringBaseType c ) { mString.append( n, c ); return *this; } String& String::replace ( size_t pos1, size_t n1, const String& str ) { mString.replace( pos1, n1, str.mString ); return *this; } String& String::replace ( Iterator i1, Iterator i2, const String& str ) { mString.replace( i1, i2, str.mString ); return *this; } String& String::replace ( size_t pos1, size_t n1, const String& str, size_t pos2, size_t n2 ) { mString.replace( pos1, n1, str.mString, pos2, n2 ); return *this; } String& String::replace ( size_t pos1, size_t n1, const char* s, size_t n2 ) { String tmp( s ); mString.replace( pos1, n1, tmp.data(), n2 ); return *this; } String& String::replace ( Iterator i1, Iterator i2, const char* s, size_t n2 ) { String tmp( s ); mString.replace( i1, i2, tmp.data(), n2 ); return *this; } String& String::replace ( size_t pos1, size_t n1, const char* s ) { String tmp( s ); mString.replace( pos1, n1, tmp.mString ); return *this; } String& String::replace ( Iterator i1, Iterator i2, const char* s ) { String tmp( s ); mString.replace( i1, i2, tmp.mString ); return *this; } String& String::replace ( size_t pos1, size_t n1, size_t n2, char c ) { mString.replace( pos1, n1, n2, (StringBaseType)c ); return *this; } String& String::replace ( Iterator i1, Iterator i2, size_t n2, char c ) { mString.replace( i1, i2, n2, (StringBaseType)c ); return *this; } std::size_t String::find( const String& str, std::size_t start ) const { return mString.find( str.mString, start ); } std::size_t String::find ( const char* s, std::size_t pos, std::size_t n ) const { return find( String( s ), pos ); } std::size_t String::find ( const char* s, std::size_t pos ) const { return find( String( s ), pos ); } size_t String::find ( char c, std::size_t pos ) const { return mString.find( (StringBaseType)c, pos ); } std::size_t String::rfind ( const String& str, std::size_t pos ) const { return mString.rfind( str.mString, pos ); } std::size_t String::rfind ( const char* s, std::size_t pos, std::size_t n ) const { return rfind( String( s ), pos ); } std::size_t String::rfind ( const char* s, std::size_t pos ) const { return rfind( String( s ), pos ); } std::size_t String::rfind ( char c, std::size_t pos ) const { return mString.rfind( c, pos ); } std::size_t String::copy ( StringBaseType* s, std::size_t n, std::size_t pos ) const { return mString.copy( s, n, pos ); } String String::substr ( std::size_t pos, std::size_t n ) const { return String( mString.substr( pos, n ) ); } int String::compare ( const String& str ) const { return mString.compare( str.mString ); } int String::compare ( const char* s ) const { return compare( String( s ) ); } int String::compare ( std::size_t pos1, std::size_t n1, const String& str ) const { return mString.compare( pos1, n1, str.mString ); } int String::compare ( std::size_t pos1, std::size_t n1, const char* s) const { return compare( pos1, n1, String( s ) ); } int String::compare ( std::size_t pos1, std::size_t n1, const String& str, std::size_t pos2, std::size_t n2 ) const { return mString.compare( pos1, n1, str.mString, pos2, n2 ); } int String::compare ( std::size_t pos1, std::size_t n1, const char* s, std::size_t n2) const { return compare( pos1, n1, String( s ), 0, n2 ); } std::size_t String::find_first_of ( const String& str, std::size_t pos ) const { return mString.find_first_of( str.mString, pos ); } std::size_t String::find_first_of ( const char* s, std::size_t pos, std::size_t n ) const { return find_first_of( String( s ), pos ); } std::size_t String::find_first_of ( const char* s, std::size_t pos ) const { return find_first_of( String( s ), pos ); } std::size_t String::find_first_of ( StringBaseType c, std::size_t pos ) const { return mString.find_first_of( c, pos ); } std::size_t String::find_last_of ( const String& str, std::size_t pos ) const { return mString.find_last_of( str.mString, pos ); } std::size_t String::find_last_of ( const char* s, std::size_t pos, std::size_t n ) const { return find_last_of( String( s ), pos ); } std::size_t String::find_last_of ( const char* s, std::size_t pos ) const { return find_last_of( String( s ), pos ); } std::size_t String::find_last_of ( StringBaseType c, std::size_t pos) const { return mString.find_last_of( c, pos ); } std::size_t String::find_first_not_of ( const String& str, std::size_t pos ) const { return mString.find_first_not_of( str.mString, pos ); } std::size_t String::find_first_not_of ( const char* s, std::size_t pos, std::size_t n ) const { return find_first_not_of( String( s ), pos ); } std::size_t String::find_first_not_of ( const char* s, std::size_t pos ) const { return find_first_not_of( String( s ), pos ); } std::size_t String::find_first_not_of ( StringBaseType c, std::size_t pos ) const { return mString.find_first_not_of( c, pos ); } std::size_t String::find_last_not_of ( const String& str, std::size_t pos ) const { return mString.find_last_not_of( str.mString, pos ); } std::size_t String::find_last_not_of ( const char* s, std::size_t pos, std::size_t n ) const { return find_last_not_of( String( s ), pos ); } std::size_t String::find_last_not_of ( const char* s, std::size_t pos ) const { return find_last_not_of( String( s ), pos ); } std::size_t String::find_last_not_of ( StringBaseType c, std::size_t pos ) const { return mString.find_last_not_of( c, pos ); } bool operator ==(const String& left, const String& right) { return left.mString == right.mString; } bool operator !=(const String& left, const String& right) { return !(left == right); } bool operator <(const String& left, const String& right) { return left.mString < right.mString; } bool operator >(const String& left, const String& right) { return right < left; } bool operator <=(const String& left, const String& right) { return !(right < left); } bool operator >=(const String& left, const String& right) { return !(left < right); } String operator +(const String& left, const String& right) { String string = left; string += right; return string; } }
gpl-2.0