repo_name
string
path
string
copies
string
size
string
content
string
license
string
jawad6233/bindu-kernel-base
sound/isa/galaxy/galaxy.c
5077
14941
/* * Aztech AZT1605/AZT2316 Driver * Copyright (C) 2007,2010 Rene Herman * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/isa.h> #include <linux/delay.h> #include <linux/io.h> #include <asm/processor.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/wss.h> #include <sound/mpu401.h> #include <sound/opl3.h> MODULE_DESCRIPTION(CRD_NAME); MODULE_AUTHOR("Rene Herman"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " CRD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " CRD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard."); static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static long wss_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver."); module_param_array(wss_port, long, NULL, 0444); MODULE_PARM_DESC(wss_port, "WSS port # for " CRD_NAME " driver."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port # for " CRD_NAME " driver."); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM port # for " CRD_NAME " driver."); module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for " CRD_NAME " driver."); module_param_array(mpu_irq, int, NULL, 0444); MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for " CRD_NAME " driver."); module_param_array(dma1, int, NULL, 0444); MODULE_PARM_DESC(dma1, "Playback DMA # for " CRD_NAME " driver."); module_param_array(dma2, int, NULL, 0444); MODULE_PARM_DESC(dma2, "Capture DMA # for " CRD_NAME " driver."); /* * Generic SB DSP support routines */ #define DSP_PORT_RESET 0x6 #define DSP_PORT_READ 0xa #define DSP_PORT_COMMAND 0xc #define DSP_PORT_STATUS 0xc #define DSP_PORT_DATA_AVAIL 0xe #define DSP_SIGNATURE 0xaa #define DSP_COMMAND_GET_VERSION 0xe1 static int __devinit dsp_get_byte(void __iomem *port, u8 *val) { int loops = 1000; while (!(ioread8(port + DSP_PORT_DATA_AVAIL) & 0x80)) { if (!loops--) return -EIO; cpu_relax(); } *val = ioread8(port + DSP_PORT_READ); return 0; } static int __devinit dsp_reset(void __iomem *port) { u8 val; iowrite8(1, port + DSP_PORT_RESET); udelay(10); iowrite8(0, port + DSP_PORT_RESET); if (dsp_get_byte(port, &val) < 0 || val != DSP_SIGNATURE) return -ENODEV; return 0; } static int __devinit dsp_command(void __iomem *port, u8 cmd) { int loops = 1000; while (ioread8(port + DSP_PORT_STATUS) & 0x80) { if (!loops--) return -EIO; cpu_relax(); } iowrite8(cmd, port + DSP_PORT_COMMAND); return 0; } static int __devinit dsp_get_version(void __iomem *port, u8 *major, u8 *minor) { int err; err = dsp_command(port, DSP_COMMAND_GET_VERSION); if (err < 0) return err; err = dsp_get_byte(port, major); if (err < 0) return err; err = dsp_get_byte(port, minor); if (err < 0) return err; return 0; } /* * Generic WSS support routines */ #define WSS_CONFIG_DMA_0 (1 << 0) #define WSS_CONFIG_DMA_1 (2 << 0) #define WSS_CONFIG_DMA_3 (3 << 0) #define WSS_CONFIG_DUPLEX (1 << 2) #define WSS_CONFIG_IRQ_7 (1 << 3) #define WSS_CONFIG_IRQ_9 (2 << 3) #define WSS_CONFIG_IRQ_10 (3 << 3) #define WSS_CONFIG_IRQ_11 (4 << 3) #define WSS_PORT_CONFIG 0 #define WSS_PORT_SIGNATURE 3 #define WSS_SIGNATURE 4 static int __devinit wss_detect(void __iomem *wss_port) { if ((ioread8(wss_port + WSS_PORT_SIGNATURE) & 0x3f) != WSS_SIGNATURE) return -ENODEV; return 0; } static void wss_set_config(void __iomem *wss_port, u8 wss_config) { iowrite8(wss_config, wss_port + WSS_PORT_CONFIG); } /* * Aztech Sound Galaxy specifics */ #define GALAXY_PORT_CONFIG 1024 #define CONFIG_PORT_SET 4 #define DSP_COMMAND_GALAXY_8 8 #define GALAXY_COMMAND_GET_TYPE 5 #define DSP_COMMAND_GALAXY_9 9 #define GALAXY_COMMAND_WSSMODE 0 #define GALAXY_COMMAND_SB8MODE 1 #define GALAXY_MODE_WSS GALAXY_COMMAND_WSSMODE #define GALAXY_MODE_SB8 GALAXY_COMMAND_SB8MODE struct snd_galaxy { void __iomem *port; void __iomem *config_port; void __iomem *wss_port; u32 config; struct resource *res_port; struct resource *res_config_port; struct resource *res_wss_port; }; static u32 config[SNDRV_CARDS]; static u8 wss_config[SNDRV_CARDS]; static int __devinit snd_galaxy_match(struct device *dev, unsigned int n) { if (!enable[n]) return 0; switch (port[n]) { case SNDRV_AUTO_PORT: dev_err(dev, "please specify port\n"); return 0; case 0x220: config[n] |= GALAXY_CONFIG_SBA_220; break; case 0x240: config[n] |= GALAXY_CONFIG_SBA_240; break; case 0x260: config[n] |= GALAXY_CONFIG_SBA_260; break; case 0x280: config[n] |= GALAXY_CONFIG_SBA_280; break; default: dev_err(dev, "invalid port %#lx\n", port[n]); return 0; } switch (wss_port[n]) { case SNDRV_AUTO_PORT: dev_err(dev, "please specify wss_port\n"); return 0; case 0x530: config[n] |= GALAXY_CONFIG_WSS_ENABLE | GALAXY_CONFIG_WSSA_530; break; case 0x604: config[n] |= GALAXY_CONFIG_WSS_ENABLE | GALAXY_CONFIG_WSSA_604; break; case 0xe80: config[n] |= GALAXY_CONFIG_WSS_ENABLE | GALAXY_CONFIG_WSSA_E80; break; case 0xf40: config[n] |= GALAXY_CONFIG_WSS_ENABLE | GALAXY_CONFIG_WSSA_F40; break; default: dev_err(dev, "invalid WSS port %#lx\n", wss_port[n]); return 0; } switch (irq[n]) { case SNDRV_AUTO_IRQ: dev_err(dev, "please specify irq\n"); return 0; case 7: wss_config[n] |= WSS_CONFIG_IRQ_7; break; case 2: irq[n] = 9; case 9: wss_config[n] |= WSS_CONFIG_IRQ_9; break; case 10: wss_config[n] |= WSS_CONFIG_IRQ_10; break; case 11: wss_config[n] |= WSS_CONFIG_IRQ_11; break; default: dev_err(dev, "invalid IRQ %d\n", irq[n]); return 0; } switch (dma1[n]) { case SNDRV_AUTO_DMA: dev_err(dev, "please specify dma1\n"); return 0; case 0: wss_config[n] |= WSS_CONFIG_DMA_0; break; case 1: wss_config[n] |= WSS_CONFIG_DMA_1; break; case 3: wss_config[n] |= WSS_CONFIG_DMA_3; break; default: dev_err(dev, "invalid playback DMA %d\n", dma1[n]); return 0; } if (dma2[n] == SNDRV_AUTO_DMA || dma2[n] == dma1[n]) { dma2[n] = -1; goto mpu; } wss_config[n] |= WSS_CONFIG_DUPLEX; switch (dma2[n]) { case 0: break; case 1: if (dma1[n] == 0) break; default: dev_err(dev, "invalid capture DMA %d\n", dma2[n]); return 0; } mpu: switch (mpu_port[n]) { case SNDRV_AUTO_PORT: dev_warn(dev, "mpu_port not specified; not using MPU-401\n"); mpu_port[n] = -1; goto fm; case 0x300: config[n] |= GALAXY_CONFIG_MPU_ENABLE | GALAXY_CONFIG_MPUA_300; break; case 0x330: config[n] |= GALAXY_CONFIG_MPU_ENABLE | GALAXY_CONFIG_MPUA_330; break; default: dev_err(dev, "invalid MPU port %#lx\n", mpu_port[n]); return 0; } switch (mpu_irq[n]) { case SNDRV_AUTO_IRQ: dev_warn(dev, "mpu_irq not specified: using polling mode\n"); mpu_irq[n] = -1; break; case 2: mpu_irq[n] = 9; case 9: config[n] |= GALAXY_CONFIG_MPUIRQ_2; break; #ifdef AZT1605 case 3: config[n] |= GALAXY_CONFIG_MPUIRQ_3; break; #endif case 5: config[n] |= GALAXY_CONFIG_MPUIRQ_5; break; case 7: config[n] |= GALAXY_CONFIG_MPUIRQ_7; break; #ifdef AZT2316 case 10: config[n] |= GALAXY_CONFIG_MPUIRQ_10; break; #endif default: dev_err(dev, "invalid MPU IRQ %d\n", mpu_irq[n]); return 0; } if (mpu_irq[n] == irq[n]) { dev_err(dev, "cannot share IRQ between WSS and MPU-401\n"); return 0; } fm: switch (fm_port[n]) { case SNDRV_AUTO_PORT: dev_warn(dev, "fm_port not specified: not using OPL3\n"); fm_port[n] = -1; break; case 0x388: break; default: dev_err(dev, "illegal FM port %#lx\n", fm_port[n]); return 0; } config[n] |= GALAXY_CONFIG_GAME_ENABLE; return 1; } static int __devinit galaxy_init(struct snd_galaxy *galaxy, u8 *type) { u8 major; u8 minor; int err; err = dsp_reset(galaxy->port); if (err < 0) return err; err = dsp_get_version(galaxy->port, &major, &minor); if (err < 0) return err; if (major != GALAXY_DSP_MAJOR || minor != GALAXY_DSP_MINOR) return -ENODEV; err = dsp_command(galaxy->port, DSP_COMMAND_GALAXY_8); if (err < 0) return err; err = dsp_command(galaxy->port, GALAXY_COMMAND_GET_TYPE); if (err < 0) return err; err = dsp_get_byte(galaxy->port, type); if (err < 0) return err; return 0; } static int __devinit galaxy_set_mode(struct snd_galaxy *galaxy, u8 mode) { int err; err = dsp_command(galaxy->port, DSP_COMMAND_GALAXY_9); if (err < 0) return err; err = dsp_command(galaxy->port, mode); if (err < 0) return err; #ifdef AZT1605 /* * Needed for MPU IRQ on AZT1605, but AZT2316 loses WSS again */ err = dsp_reset(galaxy->port); if (err < 0) return err; #endif return 0; } static void galaxy_set_config(struct snd_galaxy *galaxy, u32 config) { u8 tmp = ioread8(galaxy->config_port + CONFIG_PORT_SET); int i; iowrite8(tmp | 0x80, galaxy->config_port + CONFIG_PORT_SET); for (i = 0; i < GALAXY_CONFIG_SIZE; i++) { iowrite8(config, galaxy->config_port + i); config >>= 8; } iowrite8(tmp & 0x7f, galaxy->config_port + CONFIG_PORT_SET); msleep(10); } static void __devinit galaxy_config(struct snd_galaxy *galaxy, u32 config) { int i; for (i = GALAXY_CONFIG_SIZE; i; i--) { u8 tmp = ioread8(galaxy->config_port + i - 1); galaxy->config = (galaxy->config << 8) | tmp; } config |= galaxy->config & GALAXY_CONFIG_MASK; galaxy_set_config(galaxy, config); } static int __devinit galaxy_wss_config(struct snd_galaxy *galaxy, u8 wss_config) { int err; err = wss_detect(galaxy->wss_port); if (err < 0) return err; wss_set_config(galaxy->wss_port, wss_config); err = galaxy_set_mode(galaxy, GALAXY_MODE_WSS); if (err < 0) return err; return 0; } static void snd_galaxy_free(struct snd_card *card) { struct snd_galaxy *galaxy = card->private_data; if (galaxy->wss_port) { wss_set_config(galaxy->wss_port, 0); ioport_unmap(galaxy->wss_port); release_and_free_resource(galaxy->res_wss_port); } if (galaxy->config_port) { galaxy_set_config(galaxy, galaxy->config); ioport_unmap(galaxy->config_port); release_and_free_resource(galaxy->res_config_port); } if (galaxy->port) { ioport_unmap(galaxy->port); release_and_free_resource(galaxy->res_port); } } static int __devinit snd_galaxy_probe(struct device *dev, unsigned int n) { struct snd_galaxy *galaxy; struct snd_wss *chip; struct snd_card *card; u8 type; int err; err = snd_card_create(index[n], id[n], THIS_MODULE, sizeof *galaxy, &card); if (err < 0) return err; snd_card_set_dev(card, dev); card->private_free = snd_galaxy_free; galaxy = card->private_data; galaxy->res_port = request_region(port[n], 16, DRV_NAME); if (!galaxy->res_port) { dev_err(dev, "could not grab ports %#lx-%#lx\n", port[n], port[n] + 15); err = -EBUSY; goto error; } galaxy->port = ioport_map(port[n], 16); err = galaxy_init(galaxy, &type); if (err < 0) { dev_err(dev, "did not find a Sound Galaxy at %#lx\n", port[n]); goto error; } dev_info(dev, "Sound Galaxy (type %d) found at %#lx\n", type, port[n]); galaxy->res_config_port = request_region(port[n] + GALAXY_PORT_CONFIG, 16, DRV_NAME); if (!galaxy->res_config_port) { dev_err(dev, "could not grab ports %#lx-%#lx\n", port[n] + GALAXY_PORT_CONFIG, port[n] + GALAXY_PORT_CONFIG + 15); err = -EBUSY; goto error; } galaxy->config_port = ioport_map(port[n] + GALAXY_PORT_CONFIG, 16); galaxy_config(galaxy, config[n]); galaxy->res_wss_port = request_region(wss_port[n], 4, DRV_NAME); if (!galaxy->res_wss_port) { dev_err(dev, "could not grab ports %#lx-%#lx\n", wss_port[n], wss_port[n] + 3); err = -EBUSY; goto error; } galaxy->wss_port = ioport_map(wss_port[n], 4); err = galaxy_wss_config(galaxy, wss_config[n]); if (err < 0) { dev_err(dev, "could not configure WSS\n"); goto error; } strcpy(card->driver, DRV_NAME); strcpy(card->shortname, DRV_NAME); sprintf(card->longname, "%s at %#lx/%#lx, irq %d, dma %d/%d", card->shortname, port[n], wss_port[n], irq[n], dma1[n], dma2[n]); err = snd_wss_create(card, wss_port[n] + 4, -1, irq[n], dma1[n], dma2[n], WSS_HW_DETECT, 0, &chip); if (err < 0) goto error; err = snd_wss_pcm(chip, 0, NULL); if (err < 0) goto error; err = snd_wss_mixer(chip); if (err < 0) goto error; err = snd_wss_timer(chip, 0, NULL); if (err < 0) goto error; if (mpu_port[n] >= 0) { err = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, mpu_port[n], 0, mpu_irq[n], NULL); if (err < 0) goto error; } if (fm_port[n] >= 0) { struct snd_opl3 *opl3; err = snd_opl3_create(card, fm_port[n], fm_port[n] + 2, OPL3_HW_AUTO, 0, &opl3); if (err < 0) { dev_err(dev, "no OPL device at %#lx\n", fm_port[n]); goto error; } err = snd_opl3_timer_new(opl3, 1, 2); if (err < 0) goto error; err = snd_opl3_hwdep_new(opl3, 0, 1, NULL); if (err < 0) goto error; } err = snd_card_register(card); if (err < 0) goto error; dev_set_drvdata(dev, card); return 0; error: snd_card_free(card); return err; } static int __devexit snd_galaxy_remove(struct device *dev, unsigned int n) { snd_card_free(dev_get_drvdata(dev)); dev_set_drvdata(dev, NULL); return 0; } static struct isa_driver snd_galaxy_driver = { .match = snd_galaxy_match, .probe = snd_galaxy_probe, .remove = __devexit_p(snd_galaxy_remove), .driver = { .name = DEV_NAME } }; static int __init alsa_card_galaxy_init(void) { return isa_register_driver(&snd_galaxy_driver, SNDRV_CARDS); } static void __exit alsa_card_galaxy_exit(void) { isa_unregister_driver(&snd_galaxy_driver); } module_init(alsa_card_galaxy_init); module_exit(alsa_card_galaxy_exit);
gpl-2.0
venkatarajasekhar/kernel_raybst
fs/coda/psdev.c
5077
10465
/* * An implementation of a loadable kernel mode driver providing * multiple kernel/user space bidirectional communications links. * * Author: Alan Cox <alan@lxorguk.ukuu.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Adapted to become the Linux 2.0 Coda pseudo device * Peter Braam <braam@maths.ox.ac.uk> * Michael Callahan <mjc@emmy.smith.edu> * * Changes for Linux 2.1 * Copyright (c) 1997 Carnegie-Mellon University */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/time.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/device.h> #include <asm/io.h> #include <asm/poll.h> #include <asm/uaccess.h> #include <linux/coda.h> #include <linux/coda_psdev.h> #include "coda_linux.h" #include "coda_int.h" /* statistics */ int coda_hard; /* allows signals during upcalls */ unsigned long coda_timeout = 30; /* .. secs, then signals will dequeue */ struct venus_comm coda_comms[MAX_CODADEVS]; static struct class *coda_psdev_class; /* * Device operations */ static unsigned int coda_psdev_poll(struct file *file, poll_table * wait) { struct venus_comm *vcp = (struct venus_comm *) file->private_data; unsigned int mask = POLLOUT | POLLWRNORM; poll_wait(file, &vcp->vc_waitq, wait); mutex_lock(&vcp->vc_mutex); if (!list_empty(&vcp->vc_pending)) mask |= POLLIN | POLLRDNORM; mutex_unlock(&vcp->vc_mutex); return mask; } static long coda_psdev_ioctl(struct file * filp, unsigned int cmd, unsigned long arg) { unsigned int data; switch(cmd) { case CIOC_KERNEL_VERSION: data = CODA_KERNEL_VERSION; return put_user(data, (int __user *) arg); default: return -ENOTTY; } return 0; } /* * Receive a message written by Venus to the psdev */ static ssize_t coda_psdev_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *off) { struct venus_comm *vcp = (struct venus_comm *) file->private_data; struct upc_req *req = NULL; struct upc_req *tmp; struct list_head *lh; struct coda_in_hdr hdr; ssize_t retval = 0, count = 0; int error; /* Peek at the opcode, uniquefier */ if (copy_from_user(&hdr, buf, 2 * sizeof(u_long))) return -EFAULT; if (DOWNCALL(hdr.opcode)) { union outputArgs *dcbuf; int size = sizeof(*dcbuf); if ( nbytes < sizeof(struct coda_out_hdr) ) { printk("coda_downcall opc %d uniq %d, not enough!\n", hdr.opcode, hdr.unique); count = nbytes; goto out; } if ( nbytes > size ) { printk("Coda: downcall opc %d, uniq %d, too much!", hdr.opcode, hdr.unique); nbytes = size; } CODA_ALLOC(dcbuf, union outputArgs *, nbytes); if (copy_from_user(dcbuf, buf, nbytes)) { CODA_FREE(dcbuf, nbytes); retval = -EFAULT; goto out; } /* what downcall errors does Venus handle ? */ error = coda_downcall(vcp, hdr.opcode, dcbuf); CODA_FREE(dcbuf, nbytes); if (error) { printk("psdev_write: coda_downcall error: %d\n", error); retval = error; goto out; } count = nbytes; goto out; } /* Look for the message on the processing queue. */ mutex_lock(&vcp->vc_mutex); list_for_each(lh, &vcp->vc_processing) { tmp = list_entry(lh, struct upc_req , uc_chain); if (tmp->uc_unique == hdr.unique) { req = tmp; list_del(&req->uc_chain); break; } } mutex_unlock(&vcp->vc_mutex); if (!req) { printk("psdev_write: msg (%d, %d) not found\n", hdr.opcode, hdr.unique); retval = -ESRCH; goto out; } /* move data into response buffer. */ if (req->uc_outSize < nbytes) { printk("psdev_write: too much cnt: %d, cnt: %ld, opc: %d, uniq: %d.\n", req->uc_outSize, (long)nbytes, hdr.opcode, hdr.unique); nbytes = req->uc_outSize; /* don't have more space! */ } if (copy_from_user(req->uc_data, buf, nbytes)) { req->uc_flags |= CODA_REQ_ABORT; wake_up(&req->uc_sleep); retval = -EFAULT; goto out; } /* adjust outsize. is this useful ?? */ req->uc_outSize = nbytes; req->uc_flags |= CODA_REQ_WRITE; count = nbytes; /* Convert filedescriptor into a file handle */ if (req->uc_opcode == CODA_OPEN_BY_FD) { struct coda_open_by_fd_out *outp = (struct coda_open_by_fd_out *)req->uc_data; if (!outp->oh.result) outp->fh = fget(outp->fd); } wake_up(&req->uc_sleep); out: return(count ? count : retval); } /* * Read a message from the kernel to Venus */ static ssize_t coda_psdev_read(struct file * file, char __user * buf, size_t nbytes, loff_t *off) { DECLARE_WAITQUEUE(wait, current); struct venus_comm *vcp = (struct venus_comm *) file->private_data; struct upc_req *req; ssize_t retval = 0, count = 0; if (nbytes == 0) return 0; mutex_lock(&vcp->vc_mutex); add_wait_queue(&vcp->vc_waitq, &wait); set_current_state(TASK_INTERRUPTIBLE); while (list_empty(&vcp->vc_pending)) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } mutex_unlock(&vcp->vc_mutex); schedule(); mutex_lock(&vcp->vc_mutex); } set_current_state(TASK_RUNNING); remove_wait_queue(&vcp->vc_waitq, &wait); if (retval) goto out; req = list_entry(vcp->vc_pending.next, struct upc_req,uc_chain); list_del(&req->uc_chain); /* Move the input args into userspace */ count = req->uc_inSize; if (nbytes < req->uc_inSize) { printk ("psdev_read: Venus read %ld bytes of %d in message\n", (long)nbytes, req->uc_inSize); count = nbytes; } if (copy_to_user(buf, req->uc_data, count)) retval = -EFAULT; /* If request was not a signal, enqueue and don't free */ if (!(req->uc_flags & CODA_REQ_ASYNC)) { req->uc_flags |= CODA_REQ_READ; list_add_tail(&(req->uc_chain), &vcp->vc_processing); goto out; } CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); kfree(req); out: mutex_unlock(&vcp->vc_mutex); return (count ? count : retval); } static int coda_psdev_open(struct inode * inode, struct file * file) { struct venus_comm *vcp; int idx, err; idx = iminor(inode); if (idx < 0 || idx >= MAX_CODADEVS) return -ENODEV; err = -EBUSY; vcp = &coda_comms[idx]; mutex_lock(&vcp->vc_mutex); if (!vcp->vc_inuse) { vcp->vc_inuse++; INIT_LIST_HEAD(&vcp->vc_pending); INIT_LIST_HEAD(&vcp->vc_processing); init_waitqueue_head(&vcp->vc_waitq); vcp->vc_sb = NULL; vcp->vc_seq = 0; file->private_data = vcp; err = 0; } mutex_unlock(&vcp->vc_mutex); return err; } static int coda_psdev_release(struct inode * inode, struct file * file) { struct venus_comm *vcp = (struct venus_comm *) file->private_data; struct upc_req *req, *tmp; if (!vcp || !vcp->vc_inuse ) { printk("psdev_release: Not open.\n"); return -1; } mutex_lock(&vcp->vc_mutex); /* Wakeup clients so they can return. */ list_for_each_entry_safe(req, tmp, &vcp->vc_pending, uc_chain) { list_del(&req->uc_chain); /* Async requests need to be freed here */ if (req->uc_flags & CODA_REQ_ASYNC) { CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); kfree(req); continue; } req->uc_flags |= CODA_REQ_ABORT; wake_up(&req->uc_sleep); } list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) { list_del(&req->uc_chain); req->uc_flags |= CODA_REQ_ABORT; wake_up(&req->uc_sleep); } file->private_data = NULL; vcp->vc_inuse--; mutex_unlock(&vcp->vc_mutex); return 0; } static const struct file_operations coda_psdev_fops = { .owner = THIS_MODULE, .read = coda_psdev_read, .write = coda_psdev_write, .poll = coda_psdev_poll, .unlocked_ioctl = coda_psdev_ioctl, .open = coda_psdev_open, .release = coda_psdev_release, .llseek = noop_llseek, }; static int init_coda_psdev(void) { int i, err = 0; if (register_chrdev(CODA_PSDEV_MAJOR, "coda", &coda_psdev_fops)) { printk(KERN_ERR "coda_psdev: unable to get major %d\n", CODA_PSDEV_MAJOR); return -EIO; } coda_psdev_class = class_create(THIS_MODULE, "coda"); if (IS_ERR(coda_psdev_class)) { err = PTR_ERR(coda_psdev_class); goto out_chrdev; } for (i = 0; i < MAX_CODADEVS; i++) { mutex_init(&(&coda_comms[i])->vc_mutex); device_create(coda_psdev_class, NULL, MKDEV(CODA_PSDEV_MAJOR, i), NULL, "cfs%d", i); } coda_sysctl_init(); goto out; out_chrdev: unregister_chrdev(CODA_PSDEV_MAJOR, "coda"); out: return err; } MODULE_AUTHOR("Jan Harkes, Peter J. Braam"); MODULE_DESCRIPTION("Coda Distributed File System VFS interface"); MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR); MODULE_LICENSE("GPL"); MODULE_VERSION("6.6"); static int __init init_coda(void) { int status; int i; status = coda_init_inodecache(); if (status) goto out2; status = init_coda_psdev(); if ( status ) { printk("Problem (%d) in init_coda_psdev\n", status); goto out1; } status = register_filesystem(&coda_fs_type); if (status) { printk("coda: failed to register filesystem!\n"); goto out; } return 0; out: for (i = 0; i < MAX_CODADEVS; i++) device_destroy(coda_psdev_class, MKDEV(CODA_PSDEV_MAJOR, i)); class_destroy(coda_psdev_class); unregister_chrdev(CODA_PSDEV_MAJOR, "coda"); coda_sysctl_clean(); out1: coda_destroy_inodecache(); out2: return status; } static void __exit exit_coda(void) { int err, i; err = unregister_filesystem(&coda_fs_type); if ( err != 0 ) { printk("coda: failed to unregister filesystem\n"); } for (i = 0; i < MAX_CODADEVS; i++) device_destroy(coda_psdev_class, MKDEV(CODA_PSDEV_MAJOR, i)); class_destroy(coda_psdev_class); unregister_chrdev(CODA_PSDEV_MAJOR, "coda"); coda_sysctl_clean(); coda_destroy_inodecache(); } module_init(init_coda); module_exit(exit_coda);
gpl-2.0
talnoah/Leaping_Lemur
sound/isa/es1688/es1688.c
5077
10458
/* * Driver for generic ESS AudioDrive ESx688 soundcards * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/isapnp.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/module.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/es1688.h> #include <sound/mpu401.h> #include <sound/opl3.h> #define SNDRV_LEGACY_FIND_FREE_IRQ #define SNDRV_LEGACY_FIND_FREE_DMA #include <sound/initval.h> #define CRD_NAME "Generic ESS ES1688/ES688 AudioDrive" #define DEV_NAME "es1688" MODULE_DESCRIPTION(CRD_NAME); MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ESS,ES688 PnP AudioDrive,pnp:ESS0100}," "{ESS,ES1688 PnP AudioDrive,pnp:ESS0102}," "{ESS,ES688 AudioDrive,pnp:ESS6881}," "{ESS,ES1688 AudioDrive,pnp:ESS1681}}"); MODULE_ALIAS("snd_es968"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ #ifdef CONFIG_PNP static bool isapnp[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; #endif static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x220,0x240,0x260 */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* Usually 0x388 */ static long mpu_port[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -1}; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,10 */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,10 */ static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3 */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " CRD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " CRD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); #ifdef CONFIG_PNP module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard."); #endif MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard."); module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port # for " CRD_NAME " driver."); module_param_array(irq, int, NULL, 0444); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM port # for ES1688 driver."); MODULE_PARM_DESC(irq, "IRQ # for " CRD_NAME " driver."); module_param_array(mpu_irq, int, NULL, 0444); MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for " CRD_NAME " driver."); module_param_array(dma8, int, NULL, 0444); MODULE_PARM_DESC(dma8, "8-bit DMA # for " CRD_NAME " driver."); #ifdef CONFIG_PNP #define is_isapnp_selected(dev) isapnp[dev] #else #define is_isapnp_selected(dev) 0 #endif static int __devinit snd_es1688_match(struct device *dev, unsigned int n) { return enable[n] && !is_isapnp_selected(n); } static int __devinit snd_es1688_legacy_create(struct snd_card *card, struct device *dev, unsigned int n) { struct snd_es1688 *chip = card->private_data; static long possible_ports[] = {0x220, 0x240, 0x260}; static int possible_irqs[] = {5, 9, 10, 7, -1}; static int possible_dmas[] = {1, 3, 0, -1}; int i, error; if (irq[n] == SNDRV_AUTO_IRQ) { irq[n] = snd_legacy_find_free_irq(possible_irqs); if (irq[n] < 0) { dev_err(dev, "unable to find a free IRQ\n"); return -EBUSY; } } if (dma8[n] == SNDRV_AUTO_DMA) { dma8[n] = snd_legacy_find_free_dma(possible_dmas); if (dma8[n] < 0) { dev_err(dev, "unable to find a free DMA\n"); return -EBUSY; } } if (port[n] != SNDRV_AUTO_PORT) return snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); i = 0; do { port[n] = possible_ports[i]; error = snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); } while (error < 0 && ++i < ARRAY_SIZE(possible_ports)); return error; } static int __devinit snd_es1688_probe(struct snd_card *card, unsigned int n) { struct snd_es1688 *chip = card->private_data; struct snd_opl3 *opl3; struct snd_pcm *pcm; int error; error = snd_es1688_pcm(card, chip, 0, &pcm); if (error < 0) return error; error = snd_es1688_mixer(card, chip); if (error < 0) return error; strlcpy(card->driver, "ES1688", sizeof(card->driver)); strlcpy(card->shortname, pcm->name, sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s at 0x%lx, irq %i, dma %i", pcm->name, chip->port, chip->irq, chip->dma8); if (fm_port[n] == SNDRV_AUTO_PORT) fm_port[n] = port[n]; /* share the same port */ if (fm_port[n] > 0) { if (snd_opl3_create(card, fm_port[n], fm_port[n] + 2, OPL3_HW_OPL3, 0, &opl3) < 0) dev_warn(card->dev, "opl3 not detected at 0x%lx\n", fm_port[n]); else { error = snd_opl3_hwdep_new(opl3, 0, 1, NULL); if (error < 0) return error; } } if (mpu_irq[n] >= 0 && mpu_irq[n] != SNDRV_AUTO_IRQ && chip->mpu_port > 0) { error = snd_mpu401_uart_new(card, 0, MPU401_HW_ES1688, chip->mpu_port, 0, mpu_irq[n], NULL); if (error < 0) return error; } return snd_card_register(card); } static int __devinit snd_es1688_isa_probe(struct device *dev, unsigned int n) { struct snd_card *card; int error; error = snd_card_create(index[n], id[n], THIS_MODULE, sizeof(struct snd_es1688), &card); if (error < 0) return error; error = snd_es1688_legacy_create(card, dev, n); if (error < 0) goto out; snd_card_set_dev(card, dev); error = snd_es1688_probe(card, n); if (error < 0) goto out; dev_set_drvdata(dev, card); return 0; out: snd_card_free(card); return error; } static int __devexit snd_es1688_isa_remove(struct device *dev, unsigned int n) { snd_card_free(dev_get_drvdata(dev)); dev_set_drvdata(dev, NULL); return 0; } static struct isa_driver snd_es1688_driver = { .match = snd_es1688_match, .probe = snd_es1688_isa_probe, .remove = __devexit_p(snd_es1688_isa_remove), #if 0 /* FIXME */ .suspend = snd_es1688_suspend, .resume = snd_es1688_resume, #endif .driver = { .name = DEV_NAME } }; static int snd_es968_pnp_is_probed; #ifdef CONFIG_PNP static int __devinit snd_card_es968_pnp(struct snd_card *card, unsigned int n, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { struct snd_es1688 *chip = card->private_data; struct pnp_dev *pdev; int error; pdev = pnp_request_card_device(pcard, pid->devs[0].id, NULL); if (pdev == NULL) return -ENODEV; error = pnp_activate_dev(pdev); if (error < 0) { snd_printk(KERN_ERR "ES968 pnp configure failure\n"); return error; } port[n] = pnp_port_start(pdev, 0); dma8[n] = pnp_dma(pdev, 0); irq[n] = pnp_irq(pdev, 0); return snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); } static int __devinit snd_es968_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { struct snd_card *card; static unsigned int dev; int error; struct snd_es1688 *chip; if (snd_es968_pnp_is_probed) return -EBUSY; for ( ; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev == SNDRV_CARDS) return -ENODEV; error = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_es1688), &card); if (error < 0) return error; chip = card->private_data; error = snd_card_es968_pnp(card, dev, pcard, pid); if (error < 0) { snd_card_free(card); return error; } snd_card_set_dev(card, &pcard->card->dev); error = snd_es1688_probe(card, dev); if (error < 0) return error; pnp_set_card_drvdata(pcard, card); snd_es968_pnp_is_probed = 1; return 0; } static void __devexit snd_es968_pnp_remove(struct pnp_card_link * pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); snd_es968_pnp_is_probed = 0; } #ifdef CONFIG_PM static int snd_es968_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_es1688 *chip = card->private_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); return 0; } static int snd_es968_pnp_resume(struct pnp_card_link *pcard) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_es1688 *chip = card->private_data; snd_es1688_reset(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pnp_card_device_id snd_es968_pnpids[] = { { .id = "ESS0968", .devs = { { "@@@0968" }, } }, { .id = "ESS0968", .devs = { { "ESS0968" }, } }, { .id = "", } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, snd_es968_pnpids); static struct pnp_card_driver es968_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = DEV_NAME " PnP", .id_table = snd_es968_pnpids, .probe = snd_es968_pnp_detect, .remove = __devexit_p(snd_es968_pnp_remove), #ifdef CONFIG_PM .suspend = snd_es968_pnp_suspend, .resume = snd_es968_pnp_resume, #endif }; #endif static int __init alsa_card_es1688_init(void) { #ifdef CONFIG_PNP pnp_register_card_driver(&es968_pnpc_driver); if (snd_es968_pnp_is_probed) return 0; pnp_unregister_card_driver(&es968_pnpc_driver); #endif return isa_register_driver(&snd_es1688_driver, SNDRV_CARDS); } static void __exit alsa_card_es1688_exit(void) { if (!snd_es968_pnp_is_probed) { isa_unregister_driver(&snd_es1688_driver); return; } #ifdef CONFIG_PNP pnp_unregister_card_driver(&es968_pnpc_driver); #endif } module_init(alsa_card_es1688_init); module_exit(alsa_card_es1688_exit);
gpl-2.0
glewarne/LG_G2-D802_StockMOD_Kernel
drivers/media/rc/keymaps/rc-msi-tvanywhere.c
7637
1810
/* msi-tvanywhere.h - Keytable for msi_tvanywhere Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* MSI TV@nywhere MASTER remote */ static struct rc_map_table msi_tvanywhere[] = { /* Keys 0 to 9 */ { 0x00, KEY_0 }, { 0x01, KEY_1 }, { 0x02, KEY_2 }, { 0x03, KEY_3 }, { 0x04, KEY_4 }, { 0x05, KEY_5 }, { 0x06, KEY_6 }, { 0x07, KEY_7 }, { 0x08, KEY_8 }, { 0x09, KEY_9 }, { 0x0c, KEY_MUTE }, { 0x0f, KEY_SCREEN }, /* Full Screen */ { 0x10, KEY_FN }, /* Function */ { 0x11, KEY_TIME }, /* Time shift */ { 0x12, KEY_POWER }, { 0x13, KEY_MEDIA }, /* MTS */ { 0x14, KEY_SLOW }, { 0x16, KEY_REWIND }, /* backward << */ { 0x17, KEY_ENTER }, /* Return */ { 0x18, KEY_FASTFORWARD }, /* forward >> */ { 0x1a, KEY_CHANNELUP }, { 0x1b, KEY_VOLUMEUP }, { 0x1e, KEY_CHANNELDOWN }, { 0x1f, KEY_VOLUMEDOWN }, }; static struct rc_map_list msi_tvanywhere_map = { .map = { .scan = msi_tvanywhere, .size = ARRAY_SIZE(msi_tvanywhere), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_MSI_TVANYWHERE, } }; static int __init init_rc_map_msi_tvanywhere(void) { return rc_map_register(&msi_tvanywhere_map); } static void __exit exit_rc_map_msi_tvanywhere(void) { rc_map_unregister(&msi_tvanywhere_map); } module_init(init_rc_map_msi_tvanywhere) module_exit(exit_rc_map_msi_tvanywhere) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
hiikezoe/android_kernel_nec_n06e
drivers/media/rc/keymaps/rc-avertv-303.c
7637
1860
/* avertv-303.h - Keytable for avertv_303 Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* AVERTV STUDIO 303 Remote */ static struct rc_map_table avertv_303[] = { { 0x2a, KEY_1 }, { 0x32, KEY_2 }, { 0x3a, KEY_3 }, { 0x4a, KEY_4 }, { 0x52, KEY_5 }, { 0x5a, KEY_6 }, { 0x6a, KEY_7 }, { 0x72, KEY_8 }, { 0x7a, KEY_9 }, { 0x0e, KEY_0 }, { 0x02, KEY_POWER }, { 0x22, KEY_VIDEO }, { 0x42, KEY_AUDIO }, { 0x62, KEY_ZOOM }, { 0x0a, KEY_TV }, { 0x12, KEY_CD }, { 0x1a, KEY_TEXT }, { 0x16, KEY_SUBTITLE }, { 0x1e, KEY_REWIND }, { 0x06, KEY_PRINT }, { 0x2e, KEY_SEARCH }, { 0x36, KEY_SLEEP }, { 0x3e, KEY_SHUFFLE }, { 0x26, KEY_MUTE }, { 0x4e, KEY_RECORD }, { 0x56, KEY_PAUSE }, { 0x5e, KEY_STOP }, { 0x46, KEY_PLAY }, { 0x6e, KEY_RED }, { 0x0b, KEY_GREEN }, { 0x66, KEY_YELLOW }, { 0x03, KEY_BLUE }, { 0x76, KEY_LEFT }, { 0x7e, KEY_RIGHT }, { 0x13, KEY_DOWN }, { 0x1b, KEY_UP }, }; static struct rc_map_list avertv_303_map = { .map = { .scan = avertv_303, .size = ARRAY_SIZE(avertv_303), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_AVERTV_303, } }; static int __init init_rc_map_avertv_303(void) { return rc_map_register(&avertv_303_map); } static void __exit exit_rc_map_avertv_303(void) { rc_map_unregister(&avertv_303_map); } module_init(init_rc_map_avertv_303) module_exit(exit_rc_map_avertv_303) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
yajnab/android_kernel_samsung_delos3geur
drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
7637
2631
/* kworld-plus-tv-analog.h - Keytable for kworld_plus_tv_analog Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Kworld Plus TV Analog Lite PCI IR Mauro Carvalho Chehab <mchehab@infradead.org> */ static struct rc_map_table kworld_plus_tv_analog[] = { { 0x0c, KEY_MEDIA }, /* Kworld key */ { 0x16, KEY_CLOSECD }, /* -> ) */ { 0x1d, KEY_POWER2 }, { 0x00, KEY_1 }, { 0x01, KEY_2 }, { 0x02, KEY_3 }, /* Two keys have the same code: 3 and left */ { 0x03, KEY_4 }, /* Two keys have the same code: 3 and right */ { 0x04, KEY_5 }, { 0x05, KEY_6 }, { 0x06, KEY_7 }, { 0x07, KEY_8 }, { 0x08, KEY_9 }, { 0x0a, KEY_0 }, { 0x09, KEY_AGAIN }, { 0x14, KEY_MUTE }, { 0x20, KEY_UP }, { 0x21, KEY_DOWN }, { 0x0b, KEY_ENTER }, { 0x10, KEY_CHANNELUP }, { 0x11, KEY_CHANNELDOWN }, /* Couldn't map key left/key right since those conflict with '3' and '4' scancodes I dunno what the original driver does */ { 0x13, KEY_VOLUMEUP }, { 0x12, KEY_VOLUMEDOWN }, /* The lower part of the IR There are several duplicated keycodes there. Most of them conflict with digits. Add mappings just to the unused scancodes. Somehow, the original driver has a way to know, but this doesn't seem to be on some GPIO. Also, it is not related to the time between keyup and keydown. */ { 0x19, KEY_TIME}, /* Timeshift */ { 0x1a, KEY_STOP}, { 0x1b, KEY_RECORD}, { 0x22, KEY_TEXT}, { 0x15, KEY_AUDIO}, /* ((*)) */ { 0x0f, KEY_ZOOM}, { 0x1c, KEY_CAMERA}, /* snapshot */ { 0x18, KEY_RED}, /* B */ { 0x23, KEY_GREEN}, /* C */ }; static struct rc_map_list kworld_plus_tv_analog_map = { .map = { .scan = kworld_plus_tv_analog, .size = ARRAY_SIZE(kworld_plus_tv_analog), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_KWORLD_PLUS_TV_ANALOG, } }; static int __init init_rc_map_kworld_plus_tv_analog(void) { return rc_map_register(&kworld_plus_tv_analog_map); } static void __exit exit_rc_map_kworld_plus_tv_analog(void) { rc_map_unregister(&kworld_plus_tv_analog_map); } module_init(init_rc_map_kworld_plus_tv_analog) module_exit(exit_rc_map_kworld_plus_tv_analog) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
mzueger/linux-colibri-imx6
drivers/media/rc/keymaps/rc-norwood.c
7637
2638
/* norwood.h - Keytable for norwood Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Norwood Micro (non-Pro) TV Tuner By Peter Naulls <peter@chocky.org> Key comments are the functions given in the manual */ static struct rc_map_table norwood[] = { /* Keys 0 to 9 */ { 0x20, KEY_0 }, { 0x21, KEY_1 }, { 0x22, KEY_2 }, { 0x23, KEY_3 }, { 0x24, KEY_4 }, { 0x25, KEY_5 }, { 0x26, KEY_6 }, { 0x27, KEY_7 }, { 0x28, KEY_8 }, { 0x29, KEY_9 }, { 0x78, KEY_VIDEO }, /* Video Source */ { 0x2c, KEY_EXIT }, /* Open/Close software */ { 0x2a, KEY_SELECT }, /* 2 Digit Select */ { 0x69, KEY_AGAIN }, /* Recall */ { 0x32, KEY_BRIGHTNESSUP }, /* Brightness increase */ { 0x33, KEY_BRIGHTNESSDOWN }, /* Brightness decrease */ { 0x6b, KEY_KPPLUS }, /* (not named >>>>>) */ { 0x6c, KEY_KPMINUS }, /* (not named <<<<<) */ { 0x2d, KEY_MUTE }, /* Mute */ { 0x30, KEY_VOLUMEUP }, /* Volume up */ { 0x31, KEY_VOLUMEDOWN }, /* Volume down */ { 0x60, KEY_CHANNELUP }, /* Channel up */ { 0x61, KEY_CHANNELDOWN }, /* Channel down */ { 0x3f, KEY_RECORD }, /* Record */ { 0x37, KEY_PLAY }, /* Play */ { 0x36, KEY_PAUSE }, /* Pause */ { 0x2b, KEY_STOP }, /* Stop */ { 0x67, KEY_FASTFORWARD }, /* Forward */ { 0x66, KEY_REWIND }, /* Rewind */ { 0x3e, KEY_SEARCH }, /* Auto Scan */ { 0x2e, KEY_CAMERA }, /* Capture Video */ { 0x6d, KEY_MENU }, /* Show/Hide Control */ { 0x2f, KEY_ZOOM }, /* Full Screen */ { 0x34, KEY_RADIO }, /* FM */ { 0x65, KEY_POWER }, /* Computer power */ }; static struct rc_map_list norwood_map = { .map = { .scan = norwood, .size = ARRAY_SIZE(norwood), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_NORWOOD, } }; static int __init init_rc_map_norwood(void) { return rc_map_register(&norwood_map); } static void __exit exit_rc_map_norwood(void) { rc_map_unregister(&norwood_map); } module_init(init_rc_map_norwood) module_exit(exit_rc_map_norwood) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
qianxiaoxi/NX507J_Lollipop_kernel
drivers/media/rc/keymaps/rc-pixelview-new.c
7637
2022
/* pixelview-new.h - Keytable for pixelview_new Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Mauro Carvalho Chehab <mchehab@infradead.org> present on PV MPEG 8000GT */ static struct rc_map_table pixelview_new[] = { { 0x3c, KEY_TIME }, /* Timeshift */ { 0x12, KEY_POWER }, { 0x3d, KEY_1 }, { 0x38, KEY_2 }, { 0x18, KEY_3 }, { 0x35, KEY_4 }, { 0x39, KEY_5 }, { 0x15, KEY_6 }, { 0x36, KEY_7 }, { 0x3a, KEY_8 }, { 0x1e, KEY_9 }, { 0x3e, KEY_0 }, { 0x1c, KEY_AGAIN }, /* LOOP */ { 0x3f, KEY_VIDEO }, /* Source */ { 0x1f, KEY_LAST }, /* +100 */ { 0x1b, KEY_MUTE }, { 0x17, KEY_CHANNELDOWN }, { 0x16, KEY_CHANNELUP }, { 0x10, KEY_VOLUMEUP }, { 0x14, KEY_VOLUMEDOWN }, { 0x13, KEY_ZOOM }, { 0x19, KEY_CAMERA }, /* SNAPSHOT */ { 0x1a, KEY_SEARCH }, /* scan */ { 0x37, KEY_REWIND }, /* << */ { 0x32, KEY_RECORD }, /* o (red) */ { 0x33, KEY_FORWARD }, /* >> */ { 0x11, KEY_STOP }, /* square */ { 0x3b, KEY_PLAY }, /* > */ { 0x30, KEY_PLAYPAUSE }, /* || */ { 0x31, KEY_TV }, { 0x34, KEY_RADIO }, }; static struct rc_map_list pixelview_new_map = { .map = { .scan = pixelview_new, .size = ARRAY_SIZE(pixelview_new), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_PIXELVIEW_NEW, } }; static int __init init_rc_map_pixelview_new(void) { return rc_map_register(&pixelview_new_map); } static void __exit exit_rc_map_pixelview_new(void) { rc_map_unregister(&pixelview_new_map); } module_init(init_rc_map_pixelview_new) module_exit(exit_rc_map_pixelview_new) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
ShinySide/HispAsian_Lollipop_G6
drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
7637
1953
/* real-audio-220-32-keys.h - Keytable for real_audio_220_32_keys Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Zogis Real Audio 220 - 32 keys IR */ static struct rc_map_table real_audio_220_32_keys[] = { { 0x1c, KEY_RADIO}, { 0x12, KEY_POWER2}, { 0x01, KEY_1}, { 0x02, KEY_2}, { 0x03, KEY_3}, { 0x04, KEY_4}, { 0x05, KEY_5}, { 0x06, KEY_6}, { 0x07, KEY_7}, { 0x08, KEY_8}, { 0x09, KEY_9}, { 0x00, KEY_0}, { 0x0c, KEY_VOLUMEUP}, { 0x18, KEY_VOLUMEDOWN}, { 0x0b, KEY_CHANNELUP}, { 0x15, KEY_CHANNELDOWN}, { 0x16, KEY_ENTER}, { 0x11, KEY_VIDEO}, /* Source */ { 0x0d, KEY_AUDIO}, /* stereo */ { 0x0f, KEY_PREVIOUS}, /* Prev */ { 0x1b, KEY_TIME}, /* Timeshift */ { 0x1a, KEY_NEXT}, /* Next */ { 0x0e, KEY_STOP}, { 0x1f, KEY_PLAY}, { 0x1e, KEY_PLAYPAUSE}, /* Pause */ { 0x1d, KEY_RECORD}, { 0x13, KEY_MUTE}, { 0x19, KEY_CAMERA}, /* Snapshot */ }; static struct rc_map_list real_audio_220_32_keys_map = { .map = { .scan = real_audio_220_32_keys, .size = ARRAY_SIZE(real_audio_220_32_keys), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_REAL_AUDIO_220_32_KEYS, } }; static int __init init_rc_map_real_audio_220_32_keys(void) { return rc_map_register(&real_audio_220_32_keys_map); } static void __exit exit_rc_map_real_audio_220_32_keys(void) { rc_map_unregister(&real_audio_220_32_keys_map); } module_init(init_rc_map_real_audio_220_32_keys) module_exit(exit_rc_map_real_audio_220_32_keys) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
invisiblek/android_kernel_motorola_msm8992
drivers/media/rc/keymaps/rc-pinnacle-grey.c
7637
1995
/* pinnacle-grey.h - Keytable for pinnacle_grey Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table pinnacle_grey[] = { { 0x3a, KEY_0 }, { 0x31, KEY_1 }, { 0x32, KEY_2 }, { 0x33, KEY_3 }, { 0x34, KEY_4 }, { 0x35, KEY_5 }, { 0x36, KEY_6 }, { 0x37, KEY_7 }, { 0x38, KEY_8 }, { 0x39, KEY_9 }, { 0x2f, KEY_POWER }, { 0x2e, KEY_P }, { 0x1f, KEY_L }, { 0x2b, KEY_I }, { 0x2d, KEY_SCREEN }, { 0x1e, KEY_ZOOM }, { 0x1b, KEY_VOLUMEUP }, { 0x0f, KEY_VOLUMEDOWN }, { 0x17, KEY_CHANNELUP }, { 0x1c, KEY_CHANNELDOWN }, { 0x25, KEY_INFO }, { 0x3c, KEY_MUTE }, { 0x3d, KEY_LEFT }, { 0x3b, KEY_RIGHT }, { 0x3f, KEY_UP }, { 0x3e, KEY_DOWN }, { 0x1a, KEY_ENTER }, { 0x1d, KEY_MENU }, { 0x19, KEY_AGAIN }, { 0x16, KEY_PREVIOUSSONG }, { 0x13, KEY_NEXTSONG }, { 0x15, KEY_PAUSE }, { 0x0e, KEY_REWIND }, { 0x0d, KEY_PLAY }, { 0x0b, KEY_STOP }, { 0x07, KEY_FORWARD }, { 0x27, KEY_RECORD }, { 0x26, KEY_TUNER }, { 0x29, KEY_TEXT }, { 0x2a, KEY_MEDIA }, { 0x18, KEY_EPG }, }; static struct rc_map_list pinnacle_grey_map = { .map = { .scan = pinnacle_grey, .size = ARRAY_SIZE(pinnacle_grey), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_PINNACLE_GREY, } }; static int __init init_rc_map_pinnacle_grey(void) { return rc_map_register(&pinnacle_grey_map); } static void __exit exit_rc_map_pinnacle_grey(void) { rc_map_unregister(&pinnacle_grey_map); } module_init(init_rc_map_pinnacle_grey) module_exit(exit_rc_map_pinnacle_grey) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
batlin1977/LG_L90_kernel
drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
7637
2258
/* adstech-dvb-t-pci.h - Keytable for adstech_dvb_t_pci Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* ADS Tech Instant TV DVB-T PCI Remote */ static struct rc_map_table adstech_dvb_t_pci[] = { /* Keys 0 to 9 */ { 0x4d, KEY_0 }, { 0x57, KEY_1 }, { 0x4f, KEY_2 }, { 0x53, KEY_3 }, { 0x56, KEY_4 }, { 0x4e, KEY_5 }, { 0x5e, KEY_6 }, { 0x54, KEY_7 }, { 0x4c, KEY_8 }, { 0x5c, KEY_9 }, { 0x5b, KEY_POWER }, { 0x5f, KEY_MUTE }, { 0x55, KEY_GOTO }, { 0x5d, KEY_SEARCH }, { 0x17, KEY_EPG }, /* Guide */ { 0x1f, KEY_MENU }, { 0x0f, KEY_UP }, { 0x46, KEY_DOWN }, { 0x16, KEY_LEFT }, { 0x1e, KEY_RIGHT }, { 0x0e, KEY_SELECT }, /* Enter */ { 0x5a, KEY_INFO }, { 0x52, KEY_EXIT }, { 0x59, KEY_PREVIOUS }, { 0x51, KEY_NEXT }, { 0x58, KEY_REWIND }, { 0x50, KEY_FORWARD }, { 0x44, KEY_PLAYPAUSE }, { 0x07, KEY_STOP }, { 0x1b, KEY_RECORD }, { 0x13, KEY_TUNER }, /* Live */ { 0x0a, KEY_A }, { 0x12, KEY_B }, { 0x03, KEY_RED }, /* 1 */ { 0x01, KEY_GREEN }, /* 2 */ { 0x00, KEY_YELLOW }, /* 3 */ { 0x06, KEY_DVD }, { 0x48, KEY_AUX }, /* Photo */ { 0x40, KEY_VIDEO }, { 0x19, KEY_AUDIO }, /* Music */ { 0x0b, KEY_CHANNELUP }, { 0x08, KEY_CHANNELDOWN }, { 0x15, KEY_VOLUMEUP }, { 0x1c, KEY_VOLUMEDOWN }, }; static struct rc_map_list adstech_dvb_t_pci_map = { .map = { .scan = adstech_dvb_t_pci, .size = ARRAY_SIZE(adstech_dvb_t_pci), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_ADSTECH_DVB_T_PCI, } }; static int __init init_rc_map_adstech_dvb_t_pci(void) { return rc_map_register(&adstech_dvb_t_pci_map); } static void __exit exit_rc_map_adstech_dvb_t_pci(void) { rc_map_unregister(&adstech_dvb_t_pci_map); } module_init(init_rc_map_adstech_dvb_t_pci) module_exit(exit_rc_map_adstech_dvb_t_pci) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
lordeko/du_kernel_samsung_hlte
drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
7637
2432
/* genius-tvgo-a11mce.h - Keytable for genius_tvgo_a11mce Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* * Remote control for the Genius TVGO A11MCE * Adrian Pardini <pardo.bsso@gmail.com> */ static struct rc_map_table genius_tvgo_a11mce[] = { /* Keys 0 to 9 */ { 0x48, KEY_0 }, { 0x09, KEY_1 }, { 0x1d, KEY_2 }, { 0x1f, KEY_3 }, { 0x19, KEY_4 }, { 0x1b, KEY_5 }, { 0x11, KEY_6 }, { 0x17, KEY_7 }, { 0x12, KEY_8 }, { 0x16, KEY_9 }, { 0x54, KEY_RECORD }, /* recording */ { 0x06, KEY_MUTE }, /* mute */ { 0x10, KEY_POWER }, { 0x40, KEY_LAST }, /* recall */ { 0x4c, KEY_CHANNELUP }, /* channel / program + */ { 0x00, KEY_CHANNELDOWN }, /* channel / program - */ { 0x0d, KEY_VOLUMEUP }, { 0x15, KEY_VOLUMEDOWN }, { 0x4d, KEY_OK }, /* also labeled as Pause */ { 0x1c, KEY_ZOOM }, /* full screen and Stop*/ { 0x02, KEY_MODE }, /* AV Source or Rewind*/ { 0x04, KEY_LIST }, /* -/-- */ /* small arrows above numbers */ { 0x1a, KEY_NEXT }, /* also Fast Forward */ { 0x0e, KEY_PREVIOUS }, /* also Rewind */ /* these are in a rather non standard layout and have an alternate name written */ { 0x1e, KEY_UP }, /* Video Setting */ { 0x0a, KEY_DOWN }, /* Video Default */ { 0x05, KEY_CAMERA }, /* Snapshot */ { 0x0c, KEY_RIGHT }, /* Hide Panel */ /* Four buttons without label */ { 0x49, KEY_RED }, { 0x0b, KEY_GREEN }, { 0x13, KEY_YELLOW }, { 0x50, KEY_BLUE }, }; static struct rc_map_list genius_tvgo_a11mce_map = { .map = { .scan = genius_tvgo_a11mce, .size = ARRAY_SIZE(genius_tvgo_a11mce), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_GENIUS_TVGO_A11MCE, } }; static int __init init_rc_map_genius_tvgo_a11mce(void) { return rc_map_register(&genius_tvgo_a11mce_map); } static void __exit exit_rc_map_genius_tvgo_a11mce(void) { rc_map_unregister(&genius_tvgo_a11mce_map); } module_init(init_rc_map_genius_tvgo_a11mce) module_exit(exit_rc_map_genius_tvgo_a11mce) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
amalappunni/msm8916_jalebi
drivers/media/rc/keymaps/rc-manli.c
7637
3428
/* manli.h - Keytable for manli Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Michael Tokarev <mjt@tls.msk.ru> keytable is used by MANLI MTV00[0x0c] and BeholdTV 40[13] at least, and probably other cards too. The "ascii-art picture" below (in comments, first row is the keycode in hex, and subsequent row(s) shows the button labels (several variants when appropriate) helps to descide which keycodes to assign to the buttons. */ static struct rc_map_table manli[] = { /* 0x1c 0x12 * * FUNCTION POWER * * FM (|) * * */ { 0x1c, KEY_RADIO }, /*XXX*/ { 0x12, KEY_POWER }, /* 0x01 0x02 0x03 * * 1 2 3 * * * * 0x04 0x05 0x06 * * 4 5 6 * * * * 0x07 0x08 0x09 * * 7 8 9 * * */ { 0x01, KEY_1 }, { 0x02, KEY_2 }, { 0x03, KEY_3 }, { 0x04, KEY_4 }, { 0x05, KEY_5 }, { 0x06, KEY_6 }, { 0x07, KEY_7 }, { 0x08, KEY_8 }, { 0x09, KEY_9 }, /* 0x0a 0x00 0x17 * * RECALL 0 +100 * * PLUS * * */ { 0x0a, KEY_AGAIN }, /*XXX KEY_REWIND? */ { 0x00, KEY_0 }, { 0x17, KEY_DIGITS }, /*XXX*/ /* 0x14 0x10 * * MENU INFO * * OSD */ { 0x14, KEY_MENU }, { 0x10, KEY_INFO }, /* 0x0b * * Up * * * * 0x18 0x16 0x0c * * Left Ok Right * * * * 0x015 * * Down * * */ { 0x0b, KEY_UP }, { 0x18, KEY_LEFT }, { 0x16, KEY_OK }, /*XXX KEY_SELECT? KEY_ENTER? */ { 0x0c, KEY_RIGHT }, { 0x15, KEY_DOWN }, /* 0x11 0x0d * * TV/AV MODE * * SOURCE STEREO * * */ { 0x11, KEY_TV }, /*XXX*/ { 0x0d, KEY_MODE }, /*XXX there's no KEY_STEREO */ /* 0x0f 0x1b 0x1a * * AUDIO Vol+ Chan+ * * TIMESHIFT??? * * * * 0x0e 0x1f 0x1e * * SLEEP Vol- Chan- * * */ { 0x0f, KEY_AUDIO }, { 0x1b, KEY_VOLUMEUP }, { 0x1a, KEY_CHANNELUP }, { 0x0e, KEY_TIME }, { 0x1f, KEY_VOLUMEDOWN }, { 0x1e, KEY_CHANNELDOWN }, /* 0x13 0x19 * * MUTE SNAPSHOT* * */ { 0x13, KEY_MUTE }, { 0x19, KEY_CAMERA }, /* 0x1d unused ? */ }; static struct rc_map_list manli_map = { .map = { .scan = manli, .size = ARRAY_SIZE(manli), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_MANLI, } }; static int __init init_rc_map_manli(void) { return rc_map_register(&manli_map); } static void __exit exit_rc_map_manli(void) { rc_map_unregister(&manli_map); } module_init(init_rc_map_manli) module_exit(exit_rc_map_manli) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
OwnROM-Devices/OwnKernel-shamu
sound/oss/trix.c
8405
11131
/* * sound/oss/trix.c * * Low level driver for the MediaTrix AudioTrix Pro * (MT-0002-PC Control Chip) * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * Changes * Alan Cox Modularisation, cleanup. * Christoph Hellwig Adapted to module_init/module_exit * Arnaldo C. de Melo Got rid of attach_uart401 */ #include <linux/init.h> #include <linux/module.h> #include "sound_config.h" #include "sb.h" #include "sound_firmware.h" #include "ad1848.h" #include "mpu401.h" #include "trix_boot.h" static int mpu; static bool joystick; static unsigned char trix_read(int addr) { outb(((unsigned char) addr), 0x390); /* MT-0002-PC ASIC address */ return inb(0x391); /* MT-0002-PC ASIC data */ } static void trix_write(int addr, int data) { outb(((unsigned char) addr), 0x390); /* MT-0002-PC ASIC address */ outb(((unsigned char) data), 0x391); /* MT-0002-PC ASIC data */ } static void download_boot(int base) { int i = 0, n = trix_boot_len; if (trix_boot_len == 0) return; trix_write(0xf8, 0x00); /* ??????? */ outb((0x01), base + 6); /* Clear the internal data pointer */ outb((0x00), base + 6); /* Restart */ /* * Write the boot code to the RAM upload/download register. * Each write increments the internal data pointer. */ outb((0x01), base + 6); /* Clear the internal data pointer */ outb((0x1A), 0x390); /* Select RAM download/upload port */ for (i = 0; i < n; i++) outb((trix_boot[i]), 0x391); for (i = n; i < 10016; i++) /* Clear up to first 16 bytes of data RAM */ outb((0x00), 0x391); outb((0x00), base + 6); /* Reset */ outb((0x50), 0x390); /* ?????? */ } static int trix_set_wss_port(struct address_info *hw_config) { unsigned char addr_bits; if (trix_read(0x15) != 0x71) /* No ASIC signature */ { MDB(printk(KERN_ERR "No AudioTrix ASIC signature found\n")); return 0; } /* * Reset some registers. */ trix_write(0x13, 0); trix_write(0x14, 0); /* * Configure the ASIC to place the codec to the proper I/O location */ switch (hw_config->io_base) { case 0x530: addr_bits = 0; break; case 0x604: addr_bits = 1; break; case 0xE80: addr_bits = 2; break; case 0xF40: addr_bits = 3; break; default: return 0; } trix_write(0x19, (trix_read(0x19) & 0x03) | addr_bits); return 1; } /* * Probe and attach routines for the Windows Sound System mode of * AudioTrix Pro */ static int __init init_trix_wss(struct address_info *hw_config) { static unsigned char dma_bits[4] = { 1, 2, 0, 3 }; struct resource *ports; int config_port = hw_config->io_base + 0; int dma1 = hw_config->dma, dma2 = hw_config->dma2; int old_num_mixers = num_mixers; u8 config, bits; int ret; switch(hw_config->irq) { case 7: bits = 8; break; case 9: bits = 0x10; break; case 10: bits = 0x18; break; case 11: bits = 0x20; break; default: printk(KERN_ERR "AudioTrix: Bad WSS IRQ %d\n", hw_config->irq); return 0; } switch (dma1) { case 0: case 1: case 3: break; default: printk(KERN_ERR "AudioTrix: Bad WSS DMA %d\n", dma1); return 0; } switch (dma2) { case -1: case 0: case 1: case 3: break; default: printk(KERN_ERR "AudioTrix: Bad capture DMA %d\n", dma2); return 0; } /* * Check if the IO port returns valid signature. The original MS Sound * system returns 0x04 while some cards (AudioTrix Pro for example) * return 0x00. */ ports = request_region(hw_config->io_base + 4, 4, "ad1848"); if (!ports) { printk(KERN_ERR "AudioTrix: MSS I/O port conflict (%x)\n", hw_config->io_base); return 0; } if (!request_region(hw_config->io_base, 4, "MSS config")) { printk(KERN_ERR "AudioTrix: MSS I/O port conflict (%x)\n", hw_config->io_base); release_region(hw_config->io_base + 4, 4); return 0; } if (!trix_set_wss_port(hw_config)) goto fail; config = inb(hw_config->io_base + 3); if ((config & 0x3f) != 0x00) { MDB(printk(KERN_ERR "No MSS signature detected on port 0x%x\n", hw_config->io_base)); goto fail; } /* * Check that DMA0 is not in use with a 8 bit board. */ if (dma1 == 0 && config & 0x80) { printk(KERN_ERR "AudioTrix: Can't use DMA0 with a 8 bit card slot\n"); goto fail; } if (hw_config->irq > 9 && config & 0x80) { printk(KERN_ERR "AudioTrix: Can't use IRQ%d with a 8 bit card slot\n", hw_config->irq); goto fail; } ret = ad1848_detect(ports, NULL, hw_config->osp); if (!ret) goto fail; if (joystick==1) trix_write(0x15, 0x80); /* * Set the IRQ and DMA addresses. */ outb((bits | 0x40), config_port); if (dma2 == -1 || dma2 == dma1) { bits |= dma_bits[dma1]; dma2 = dma1; } else { unsigned char tmp; tmp = trix_read(0x13) & ~30; trix_write(0x13, tmp | 0x80 | (dma1 << 4)); tmp = trix_read(0x14) & ~30; trix_write(0x14, tmp | 0x80 | (dma2 << 4)); } outb((bits), config_port); /* Write IRQ+DMA setup */ hw_config->slots[0] = ad1848_init("AudioTrix Pro", ports, hw_config->irq, dma1, dma2, 0, hw_config->osp, THIS_MODULE); if (num_mixers > old_num_mixers) /* Mixer got installed */ { AD1848_REROUTE(SOUND_MIXER_LINE1, SOUND_MIXER_LINE); /* Line in */ AD1848_REROUTE(SOUND_MIXER_LINE2, SOUND_MIXER_CD); AD1848_REROUTE(SOUND_MIXER_LINE3, SOUND_MIXER_SYNTH); /* OPL4 */ AD1848_REROUTE(SOUND_MIXER_SPEAKER, SOUND_MIXER_ALTPCM); /* SB */ } return 1; fail: release_region(hw_config->io_base, 4); release_region(hw_config->io_base + 4, 4); return 0; } static int __init probe_trix_sb(struct address_info *hw_config) { int tmp; unsigned char conf; extern int sb_be_quiet; int old_quiet; static signed char irq_translate[] = { -1, -1, -1, 0, 1, 2, -1, 3 }; if (trix_boot_len == 0) return 0; /* No boot code -> no fun */ if ((hw_config->io_base & 0xffffff8f) != 0x200) return 0; tmp = hw_config->irq; if (tmp > 7) return 0; if (irq_translate[tmp] == -1) return 0; tmp = hw_config->dma; if (tmp != 1 && tmp != 3) return 0; if (!request_region(hw_config->io_base, 16, "soundblaster")) { printk(KERN_ERR "AudioTrix: SB I/O port conflict (%x)\n", hw_config->io_base); return 0; } conf = 0x84; /* DMA and IRQ enable */ conf |= hw_config->io_base & 0x70; /* I/O address bits */ conf |= irq_translate[hw_config->irq]; if (hw_config->dma == 3) conf |= 0x08; trix_write(0x1b, conf); download_boot(hw_config->io_base); hw_config->name = "AudioTrix SB"; if (!sb_dsp_detect(hw_config, 0, 0, NULL)) { release_region(hw_config->io_base, 16); return 0; } hw_config->driver_use_1 = SB_NO_MIDI | SB_NO_MIXER | SB_NO_RECORDING; /* Prevent false alarms */ old_quiet = sb_be_quiet; sb_be_quiet = 1; sb_dsp_init(hw_config, THIS_MODULE); sb_be_quiet = old_quiet; return 1; } static int __init probe_trix_mpu(struct address_info *hw_config) { unsigned char conf; static int irq_bits[] = { -1, -1, -1, 1, 2, 3, -1, 4, -1, 5 }; if (hw_config->irq > 9) { printk(KERN_ERR "AudioTrix: Bad MPU IRQ %d\n", hw_config->irq); return 0; } if (irq_bits[hw_config->irq] == -1) { printk(KERN_ERR "AudioTrix: Bad MPU IRQ %d\n", hw_config->irq); return 0; } switch (hw_config->io_base) { case 0x330: conf = 0x00; break; case 0x370: conf = 0x04; break; case 0x3b0: conf = 0x08; break; case 0x3f0: conf = 0x0c; break; default: return 0; /* Invalid port */ } conf |= irq_bits[hw_config->irq] << 4; trix_write(0x19, (trix_read(0x19) & 0x83) | conf); hw_config->name = "AudioTrix Pro"; return probe_uart401(hw_config, THIS_MODULE); } static void __exit unload_trix_wss(struct address_info *hw_config) { int dma2 = hw_config->dma2; if (dma2 == -1) dma2 = hw_config->dma; release_region(0x390, 2); release_region(hw_config->io_base, 4); ad1848_unload(hw_config->io_base + 4, hw_config->irq, hw_config->dma, dma2, 0); sound_unload_audiodev(hw_config->slots[0]); } static inline void __exit unload_trix_mpu(struct address_info *hw_config) { unload_uart401(hw_config); } static inline void __exit unload_trix_sb(struct address_info *hw_config) { sb_dsp_unload(hw_config, mpu); } static struct address_info cfg; static struct address_info cfg2; static struct address_info cfg_mpu; static int sb; static int fw_load; static int __initdata io = -1; static int __initdata irq = -1; static int __initdata dma = -1; static int __initdata dma2 = -1; /* Set this for modules that need it */ static int __initdata sb_io = -1; static int __initdata sb_dma = -1; static int __initdata sb_irq = -1; static int __initdata mpu_io = -1; static int __initdata mpu_irq = -1; module_param(io, int, 0); module_param(irq, int, 0); module_param(dma, int, 0); module_param(dma2, int, 0); module_param(sb_io, int, 0); module_param(sb_dma, int, 0); module_param(sb_irq, int, 0); module_param(mpu_io, int, 0); module_param(mpu_irq, int, 0); module_param(joystick, bool, 0); static int __init init_trix(void) { printk(KERN_INFO "MediaTrix audio driver Copyright (C) by Hannu Savolainen 1993-1996\n"); cfg.io_base = io; cfg.irq = irq; cfg.dma = dma; cfg.dma2 = dma2; cfg2.io_base = sb_io; cfg2.irq = sb_irq; cfg2.dma = sb_dma; cfg_mpu.io_base = mpu_io; cfg_mpu.irq = mpu_irq; if (cfg.io_base == -1 || cfg.dma == -1 || cfg.irq == -1) { printk(KERN_INFO "I/O, IRQ, DMA and type are mandatory\n"); return -EINVAL; } if (cfg2.io_base != -1 && (cfg2.irq == -1 || cfg2.dma == -1)) { printk(KERN_INFO "CONFIG_SB_IRQ and CONFIG_SB_DMA must be specified if SB_IO is set.\n"); return -EINVAL; } if (cfg_mpu.io_base != -1 && cfg_mpu.irq == -1) { printk(KERN_INFO "CONFIG_MPU_IRQ must be specified if MPU_IO is set.\n"); return -EINVAL; } if (!trix_boot) { fw_load = 1; trix_boot_len = mod_firmware_load("/etc/sound/trxpro.bin", (char **) &trix_boot); } if (!request_region(0x390, 2, "AudioTrix")) { printk(KERN_ERR "AudioTrix: Config port I/O conflict\n"); return -ENODEV; } if (!init_trix_wss(&cfg)) { release_region(0x390, 2); return -ENODEV; } /* * We must attach in the right order to get the firmware * loaded up in time. */ if (cfg2.io_base != -1) { sb = probe_trix_sb(&cfg2); } if (cfg_mpu.io_base != -1) mpu = probe_trix_mpu(&cfg_mpu); return 0; } static void __exit cleanup_trix(void) { if (fw_load && trix_boot) vfree(trix_boot); if (sb) unload_trix_sb(&cfg2); if (mpu) unload_trix_mpu(&cfg_mpu); unload_trix_wss(&cfg); } module_init(init_trix); module_exit(cleanup_trix); #ifndef MODULE static int __init setup_trix (char *str) { /* io, irq, dma, dma2, sb_io, sb_irq, sb_dma, mpu_io, mpu_irq */ int ints[9]; str = get_options(str, ARRAY_SIZE(ints), ints); io = ints[1]; irq = ints[2]; dma = ints[3]; dma2 = ints[4]; sb_io = ints[5]; sb_irq = ints[6]; sb_dma = ints[6]; mpu_io = ints[7]; mpu_irq = ints[8]; return 1; } __setup("trix=", setup_trix); #endif MODULE_LICENSE("GPL");
gpl-2.0
bilalliberty/android_kernel_HTC_ville_evita
arch/sh/mm/gup.c
10453
6772
/* * Lockless get_user_pages_fast for SuperH * * Copyright (C) 2009 - 2010 Paul Mundt * * Cloned from the x86 and PowerPC versions, by: * * Copyright (C) 2008 Nick Piggin * Copyright (C) 2008 Novell Inc. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/vmstat.h> #include <linux/highmem.h> #include <asm/pgtable.h> static inline pte_t gup_get_pte(pte_t *ptep) { #ifndef CONFIG_X2TLB return ACCESS_ONCE(*ptep); #else /* * With get_user_pages_fast, we walk down the pagetables without * taking any locks. For this we would like to load the pointers * atomically, but that is not possible with 64-bit PTEs. What * we do have is the guarantee that a pte will only either go * from not present to present, or present to not present or both * -- it will not switch to a completely different present page * without a TLB flush in between; something that we are blocking * by holding interrupts off. * * Setting ptes from not present to present goes: * ptep->pte_high = h; * smp_wmb(); * ptep->pte_low = l; * * And present to not present goes: * ptep->pte_low = 0; * smp_wmb(); * ptep->pte_high = 0; * * We must ensure here that the load of pte_low sees l iff pte_high * sees h. We load pte_high *after* loading pte_low, which ensures we * don't see an older value of pte_high. *Then* we recheck pte_low, * which ensures that we haven't picked up a changed pte high. We might * have got rubbish values from pte_low and pte_high, but we are * guaranteed that pte_low will not have the present bit set *unless* * it is 'l'. And get_user_pages_fast only operates on present ptes, so * we're safe. * * gup_get_pte should not be used or copied outside gup.c without being * very careful -- it does not atomically load the pte or anything that * is likely to be useful for you. */ pte_t pte; retry: pte.pte_low = ptep->pte_low; smp_rmb(); pte.pte_high = ptep->pte_high; smp_rmb(); if (unlikely(pte.pte_low != ptep->pte_low)) goto retry; return pte; #endif } /* * The performance critical leaf functions are made noinline otherwise gcc * inlines everything into a single function which results in too much * register pressure. */ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { u64 mask, result; pte_t *ptep; #ifdef CONFIG_X2TLB result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ); if (write) result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE); #elif defined(CONFIG_SUPERH64) result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ; if (write) result |= _PAGE_WRITE; #else result = _PAGE_PRESENT | _PAGE_USER; if (write) result |= _PAGE_RW; #endif mask = result | _PAGE_SPECIAL; ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *page; if ((pte_val(pte) & mask) != result) { pte_unmap(ptep); return 0; } VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); get_page(page); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); pte_unmap(ptep - 1); return 1; } static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = *pmdp; next = pmd_addr_end(addr, end); if (pmd_none(pmd)) return 0; if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset(&pgd, addr); do { pud_t pud = *pudp; next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } /* * Like get_user_pages_fast() except its IRQ-safe in that it won't fall * back to the regular GUP. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; unsigned long addr, len, end; unsigned long next; unsigned long flags; pgd_t *pgdp; int nr = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, (void __user *)start, len))) return 0; /* * This doesn't prevent pagetable teardown, but does prevent * the pagetables and pages from being freed. */ local_irq_save(flags); pgdp = pgd_offset(mm, addr); do { pgd_t pgd = *pgdp; next = pgd_addr_end(addr, end); if (pgd_none(pgd)) break; if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) break; } while (pgdp++, addr = next, addr != end); local_irq_restore(flags); return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; unsigned long addr, len, end; unsigned long next; pgd_t *pgdp; int nr = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (end < start) goto slow_irqon; local_irq_disable(); pgdp = pgd_offset(mm, addr); do { pgd_t pgd = *pgdp; next = pgd_addr_end(addr, end); if (pgd_none(pgd)) goto slow; if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) goto slow; } while (pgdp++, addr = next, addr != end); local_irq_enable(); VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); return nr; { int ret; slow: local_irq_enable(); slow_irqon: /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; down_read(&mm->mmap_sem); ret = get_user_pages(current, mm, start, (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); up_read(&mm->mmap_sem); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } return ret; } }
gpl-2.0
spegelius/android_kernel_samsung_jf
arch/sh/mm/gup.c
10453
6772
/* * Lockless get_user_pages_fast for SuperH * * Copyright (C) 2009 - 2010 Paul Mundt * * Cloned from the x86 and PowerPC versions, by: * * Copyright (C) 2008 Nick Piggin * Copyright (C) 2008 Novell Inc. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/vmstat.h> #include <linux/highmem.h> #include <asm/pgtable.h> static inline pte_t gup_get_pte(pte_t *ptep) { #ifndef CONFIG_X2TLB return ACCESS_ONCE(*ptep); #else /* * With get_user_pages_fast, we walk down the pagetables without * taking any locks. For this we would like to load the pointers * atomically, but that is not possible with 64-bit PTEs. What * we do have is the guarantee that a pte will only either go * from not present to present, or present to not present or both * -- it will not switch to a completely different present page * without a TLB flush in between; something that we are blocking * by holding interrupts off. * * Setting ptes from not present to present goes: * ptep->pte_high = h; * smp_wmb(); * ptep->pte_low = l; * * And present to not present goes: * ptep->pte_low = 0; * smp_wmb(); * ptep->pte_high = 0; * * We must ensure here that the load of pte_low sees l iff pte_high * sees h. We load pte_high *after* loading pte_low, which ensures we * don't see an older value of pte_high. *Then* we recheck pte_low, * which ensures that we haven't picked up a changed pte high. We might * have got rubbish values from pte_low and pte_high, but we are * guaranteed that pte_low will not have the present bit set *unless* * it is 'l'. And get_user_pages_fast only operates on present ptes, so * we're safe. * * gup_get_pte should not be used or copied outside gup.c without being * very careful -- it does not atomically load the pte or anything that * is likely to be useful for you. */ pte_t pte; retry: pte.pte_low = ptep->pte_low; smp_rmb(); pte.pte_high = ptep->pte_high; smp_rmb(); if (unlikely(pte.pte_low != ptep->pte_low)) goto retry; return pte; #endif } /* * The performance critical leaf functions are made noinline otherwise gcc * inlines everything into a single function which results in too much * register pressure. */ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { u64 mask, result; pte_t *ptep; #ifdef CONFIG_X2TLB result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ); if (write) result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE); #elif defined(CONFIG_SUPERH64) result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ; if (write) result |= _PAGE_WRITE; #else result = _PAGE_PRESENT | _PAGE_USER; if (write) result |= _PAGE_RW; #endif mask = result | _PAGE_SPECIAL; ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *page; if ((pte_val(pte) & mask) != result) { pte_unmap(ptep); return 0; } VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); get_page(page); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); pte_unmap(ptep - 1); return 1; } static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = *pmdp; next = pmd_addr_end(addr, end); if (pmd_none(pmd)) return 0; if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset(&pgd, addr); do { pud_t pud = *pudp; next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } /* * Like get_user_pages_fast() except its IRQ-safe in that it won't fall * back to the regular GUP. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; unsigned long addr, len, end; unsigned long next; unsigned long flags; pgd_t *pgdp; int nr = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, (void __user *)start, len))) return 0; /* * This doesn't prevent pagetable teardown, but does prevent * the pagetables and pages from being freed. */ local_irq_save(flags); pgdp = pgd_offset(mm, addr); do { pgd_t pgd = *pgdp; next = pgd_addr_end(addr, end); if (pgd_none(pgd)) break; if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) break; } while (pgdp++, addr = next, addr != end); local_irq_restore(flags); return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; unsigned long addr, len, end; unsigned long next; pgd_t *pgdp; int nr = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (end < start) goto slow_irqon; local_irq_disable(); pgdp = pgd_offset(mm, addr); do { pgd_t pgd = *pgdp; next = pgd_addr_end(addr, end); if (pgd_none(pgd)) goto slow; if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) goto slow; } while (pgdp++, addr = next, addr != end); local_irq_enable(); VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); return nr; { int ret; slow: local_irq_enable(); slow_irqon: /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; down_read(&mm->mmap_sem); ret = get_user_pages(current, mm, start, (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); up_read(&mm->mmap_sem); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } return ret; } }
gpl-2.0
SatrioDwiPrabowo/Intuisy-3.4xx-Kernel-Nanhu
drivers/gpu/msm/kgsl_pwrscale.c
214
9335
/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/export.h> #include <linux/kernel.h> #include <asm/page.h> #include "kgsl.h" #include "kgsl_pwrscale.h" #include "kgsl_device.h" struct kgsl_pwrscale_attribute { struct attribute attr; ssize_t (*show)(struct kgsl_device *device, char *buf); ssize_t (*store)(struct kgsl_device *device, const char *buf, size_t count); }; #define to_pwrscale(k) container_of(k, struct kgsl_pwrscale, kobj) #define pwrscale_to_device(p) container_of(p, struct kgsl_device, pwrscale) #define to_device(k) container_of(k, struct kgsl_device, pwrscale_kobj) #define to_pwrscale_attr(a) \ container_of(a, struct kgsl_pwrscale_attribute, attr) #define to_policy_attr(a) \ container_of(a, struct kgsl_pwrscale_policy_attribute, attr) #define PWRSCALE_ATTR(_name, _mode, _show, _store) \ struct kgsl_pwrscale_attribute pwrscale_attr_##_name = \ __ATTR(_name, _mode, _show, _store) /* Master list of available policies */ static struct kgsl_pwrscale_policy *kgsl_pwrscale_policies[] = { #ifdef CONFIG_MSM_SCM &kgsl_pwrscale_policy_tz, #endif #ifdef CONFIG_MSM_SLEEP_STATS_DEVICE &kgsl_pwrscale_policy_idlestats, #endif #ifdef CONFIG_MSM_DCVS &kgsl_pwrscale_policy_msm, #endif NULL }; static ssize_t pwrscale_policy_store(struct kgsl_device *device, const char *buf, size_t count) { int i; struct kgsl_pwrscale_policy *policy = NULL; /* The special keyword none allows the user to detach all policies */ if (!strncmp("none", buf, 4)) { kgsl_pwrscale_detach_policy(device); return count; } for (i = 0; kgsl_pwrscale_policies[i]; i++) { if (!strncmp(kgsl_pwrscale_policies[i]->name, buf, strnlen(kgsl_pwrscale_policies[i]->name, PAGE_SIZE))) { policy = kgsl_pwrscale_policies[i]; break; } } if (policy) if (kgsl_pwrscale_attach_policy(device, policy)) return -EIO; return count; } static ssize_t pwrscale_policy_show(struct kgsl_device *device, char *buf) { int ret; if (device->pwrscale.policy) { ret = snprintf(buf, PAGE_SIZE, "%s", device->pwrscale.policy->name); if (device->pwrscale.enabled == 0) ret += snprintf(buf + ret, PAGE_SIZE - ret, " (disabled)"); ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); } else ret = snprintf(buf, PAGE_SIZE, "none\n"); return ret; } PWRSCALE_ATTR(policy, 0664, pwrscale_policy_show, pwrscale_policy_store); static ssize_t pwrscale_avail_policies_show(struct kgsl_device *device, char *buf) { int i, ret = 0; for (i = 0; kgsl_pwrscale_policies[i]; i++) { ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s ", kgsl_pwrscale_policies[i]->name); } ret += snprintf(buf + ret, PAGE_SIZE - ret, "none\n"); return ret; } PWRSCALE_ATTR(avail_policies, 0444, pwrscale_avail_policies_show, NULL); static struct attribute *pwrscale_attrs[] = { &pwrscale_attr_policy.attr, &pwrscale_attr_avail_policies.attr, NULL }; static ssize_t policy_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj); struct kgsl_device *device = pwrscale_to_device(pwrscale); struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr); ssize_t ret; if (pattr->show) ret = pattr->show(device, pwrscale, buf); else ret = -EIO; return ret; } static ssize_t policy_sysfs_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj); struct kgsl_device *device = pwrscale_to_device(pwrscale); struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr); ssize_t ret; if (pattr->store) ret = pattr->store(device, pwrscale, buf, count); else ret = -EIO; return ret; } static void policy_sysfs_release(struct kobject *kobj) { } static ssize_t pwrscale_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct kgsl_device *device = to_device(kobj); struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr); ssize_t ret; if (pattr->show) ret = pattr->show(device, buf); else ret = -EIO; return ret; } static ssize_t pwrscale_sysfs_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct kgsl_device *device = to_device(kobj); struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr); ssize_t ret; if (pattr->store) ret = pattr->store(device, buf, count); else ret = -EIO; return ret; } static void pwrscale_sysfs_release(struct kobject *kobj) { } static const struct sysfs_ops policy_sysfs_ops = { .show = policy_sysfs_show, .store = policy_sysfs_store }; static const struct sysfs_ops pwrscale_sysfs_ops = { .show = pwrscale_sysfs_show, .store = pwrscale_sysfs_store }; static struct kobj_type ktype_pwrscale_policy = { .sysfs_ops = &policy_sysfs_ops, .default_attrs = NULL, .release = policy_sysfs_release }; static struct kobj_type ktype_pwrscale = { .sysfs_ops = &pwrscale_sysfs_ops, .default_attrs = pwrscale_attrs, .release = pwrscale_sysfs_release }; #define PWRSCALE_ACTIVE(_d) \ ((_d)->pwrscale.policy && (_d)->pwrscale.enabled) void kgsl_pwrscale_sleep(struct kgsl_device *device) { if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->sleep) device->pwrscale.policy->sleep(device, &device->pwrscale); } EXPORT_SYMBOL(kgsl_pwrscale_sleep); void kgsl_pwrscale_wake(struct kgsl_device *device) { if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->wake) device->pwrscale.policy->wake(device, &device->pwrscale); } EXPORT_SYMBOL(kgsl_pwrscale_wake); void kgsl_pwrscale_busy(struct kgsl_device *device) { if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->busy) if ((!device->pwrscale.gpu_busy) && (device->requested_state != KGSL_STATE_SLUMBER)) device->pwrscale.policy->busy(device, &device->pwrscale); device->pwrscale.gpu_busy = 1; } void kgsl_pwrscale_idle(struct kgsl_device *device) { if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->idle) if (device->requested_state != KGSL_STATE_SLUMBER && device->requested_state != KGSL_STATE_SLEEP) device->pwrscale.policy->idle(device, &device->pwrscale); device->pwrscale.gpu_busy = 0; } EXPORT_SYMBOL(kgsl_pwrscale_idle); void kgsl_pwrscale_disable(struct kgsl_device *device) { device->pwrscale.enabled = 0; } EXPORT_SYMBOL(kgsl_pwrscale_disable); void kgsl_pwrscale_enable(struct kgsl_device *device) { device->pwrscale.enabled = 1; } EXPORT_SYMBOL(kgsl_pwrscale_enable); int kgsl_pwrscale_policy_add_files(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, struct attribute_group *attr_group) { int ret; ret = kobject_add(&pwrscale->kobj, &device->pwrscale_kobj, "%s", pwrscale->policy->name); if (ret) return ret; ret = sysfs_create_group(&pwrscale->kobj, attr_group); if (ret) { kobject_del(&pwrscale->kobj); kobject_put(&pwrscale->kobj); } return ret; } void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, struct attribute_group *attr_group) { sysfs_remove_group(&pwrscale->kobj, attr_group); kobject_del(&pwrscale->kobj); kobject_put(&pwrscale->kobj); } static void _kgsl_pwrscale_detach_policy(struct kgsl_device *device) { if (device->pwrscale.policy != NULL) { device->pwrscale.policy->close(device, &device->pwrscale); kgsl_pwrctrl_pwrlevel_change(device, device->pwrctrl.thermal_pwrlevel); } device->pwrscale.policy = NULL; } void kgsl_pwrscale_detach_policy(struct kgsl_device *device) { mutex_lock(&device->mutex); _kgsl_pwrscale_detach_policy(device); mutex_unlock(&device->mutex); } EXPORT_SYMBOL(kgsl_pwrscale_detach_policy); int kgsl_pwrscale_attach_policy(struct kgsl_device *device, struct kgsl_pwrscale_policy *policy) { int ret = 0; mutex_lock(&device->mutex); if (device->pwrscale.policy == policy) goto done; if (device->pwrctrl.num_pwrlevels < 3) { ret = -EINVAL; goto done; } if (device->pwrscale.policy != NULL) _kgsl_pwrscale_detach_policy(device); device->pwrscale.policy = policy; /* Pwrscale is enabled by default at attach time */ kgsl_pwrscale_enable(device); if (policy) { ret = device->pwrscale.policy->init(device, &device->pwrscale); if (ret) device->pwrscale.policy = NULL; } done: mutex_unlock(&device->mutex); return ret; } EXPORT_SYMBOL(kgsl_pwrscale_attach_policy); int kgsl_pwrscale_init(struct kgsl_device *device) { int ret; ret = kobject_init_and_add(&device->pwrscale_kobj, &ktype_pwrscale, &device->dev->kobj, "pwrscale"); if (ret) return ret; kobject_init(&device->pwrscale.kobj, &ktype_pwrscale_policy); return ret; } EXPORT_SYMBOL(kgsl_pwrscale_init); void kgsl_pwrscale_close(struct kgsl_device *device) { kobject_put(&device->pwrscale_kobj); } EXPORT_SYMBOL(kgsl_pwrscale_close);
gpl-2.0
Baastyr/semc-es209ra-kernel
drivers/net/chelsio/subr.c
214
32401
/***************************************************************************** * * * File: subr.c * * $Revision: 1.27 $ * * $Date: 2005/06/22 01:08:36 $ * * Description: * * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. * * part of the Chelsio 10Gb Ethernet Driver. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License, version 2, as * * published by the Free Software Foundation. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, write to the Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: maintainers@chelsio.com * * * * Authors: Dimitrios Michailidis <dm@chelsio.com> * * Tina Yang <tainay@chelsio.com> * * Felix Marti <felix@chelsio.com> * * Scott Bardone <sbardone@chelsio.com> * * Kurt Ottaway <kottaway@chelsio.com> * * Frank DiMambro <frank@chelsio.com> * * * * History: * * * ****************************************************************************/ #include "common.h" #include "elmer0.h" #include "regs.h" #include "gmac.h" #include "cphy.h" #include "sge.h" #include "tp.h" #include "espi.h" /** * t1_wait_op_done - wait until an operation is completed * @adapter: the adapter performing the operation * @reg: the register to check for completion * @mask: a single-bit field within @reg that indicates completion * @polarity: the value of the field when the operation is completed * @attempts: number of check iterations * @delay: delay in usecs between iterations * * Wait until an operation is completed by checking a bit in a register * up to @attempts times. Returns %0 if the operation completes and %1 * otherwise. */ static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity, int attempts, int delay) { while (1) { u32 val = readl(adapter->regs + reg) & mask; if (!!val == polarity) return 0; if (--attempts == 0) return 1; if (delay) udelay(delay); } } #define TPI_ATTEMPTS 50 /* * Write a register over the TPI interface (unlocked and locked versions). */ int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) { int tpi_busy; writel(addr, adapter->regs + A_TPI_ADDR); writel(value, adapter->regs + A_TPI_WR_DATA); writel(F_TPIWR, adapter->regs + A_TPI_CSR); tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, TPI_ATTEMPTS, 3); if (tpi_busy) CH_ALERT("%s: TPI write to 0x%x failed\n", adapter->name, addr); return tpi_busy; } int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) { int ret; spin_lock(&adapter->tpi_lock); ret = __t1_tpi_write(adapter, addr, value); spin_unlock(&adapter->tpi_lock); return ret; } /* * Read a register over the TPI interface (unlocked and locked versions). */ int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) { int tpi_busy; writel(addr, adapter->regs + A_TPI_ADDR); writel(0, adapter->regs + A_TPI_CSR); tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, TPI_ATTEMPTS, 3); if (tpi_busy) CH_ALERT("%s: TPI read from 0x%x failed\n", adapter->name, addr); else *valp = readl(adapter->regs + A_TPI_RD_DATA); return tpi_busy; } int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) { int ret; spin_lock(&adapter->tpi_lock); ret = __t1_tpi_read(adapter, addr, valp); spin_unlock(&adapter->tpi_lock); return ret; } /* * Set a TPI parameter. */ static void t1_tpi_par(adapter_t *adapter, u32 value) { writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR); } /* * Called when a port's link settings change to propagate the new values to the * associated PHY and MAC. After performing the common tasks it invokes an * OS-specific handler. */ void t1_link_changed(adapter_t *adapter, int port_id) { int link_ok, speed, duplex, fc; struct cphy *phy = adapter->port[port_id].phy; struct link_config *lc = &adapter->port[port_id].link_config; phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); lc->speed = speed < 0 ? SPEED_INVALID : speed; lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; if (!(lc->requested_fc & PAUSE_AUTONEG)) fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) { /* Set MAC speed, duplex, and flow control to match PHY. */ struct cmac *mac = adapter->port[port_id].mac; mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc); lc->fc = (unsigned char)fc; } t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc); } static int t1_pci_intr_handler(adapter_t *adapter) { u32 pcix_cause; pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause); if (pcix_cause) { pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, pcix_cause); t1_fatal_err(adapter); /* PCI errors are fatal */ } return 0; } #ifdef CONFIG_CHELSIO_T1_COUGAR #include "cspi.h" #endif #ifdef CONFIG_CHELSIO_T1_1G #include "fpga_defs.h" /* * PHY interrupt handler for FPGA boards. */ static int fpga_phy_intr_handler(adapter_t *adapter) { int p; u32 cause = readl(adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); for_each_port(adapter, p) if (cause & (1 << p)) { struct cphy *phy = adapter->port[p].phy; int phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, p); } writel(cause, adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); return 0; } /* * Slow path interrupt handler for FPGAs. */ static int fpga_slow_intr(adapter_t *adapter) { u32 cause = readl(adapter->regs + A_PL_CAUSE); cause &= ~F_PL_INTR_SGE_DATA; if (cause & F_PL_INTR_SGE_ERR) t1_sge_intr_error_handler(adapter->sge); if (cause & FPGA_PCIX_INTERRUPT_GMAC) fpga_phy_intr_handler(adapter); if (cause & FPGA_PCIX_INTERRUPT_TP) { /* * FPGA doesn't support MC4 interrupts and it requires * this odd layer of indirection for MC5. */ u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); /* Clear TP interrupt */ writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); } if (cause & FPGA_PCIX_INTERRUPT_PCIX) t1_pci_intr_handler(adapter); /* Clear the interrupts just processed. */ if (cause) writel(cause, adapter->regs + A_PL_CAUSE); return cause != 0; } #endif /* * Wait until Elmer's MI1 interface is ready for new operations. */ static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg) { int attempts = 100, busy; do { u32 val; __t1_tpi_read(adapter, mi1_reg, &val); busy = val & F_MI1_OP_BUSY; if (busy) udelay(10); } while (busy && --attempts); if (busy) CH_ALERT("%s: MDIO operation timed out\n", adapter->name); return busy; } /* * MI1 MDIO initialization. */ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi) { u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1; u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) | V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv); if (!(bi->caps & SUPPORTED_10000baseT_Full)) val |= V_MI1_SOF(1); t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); } #if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) /* * Elmer MI1 MDIO read/write operations. */ static int mi1_mdio_read(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int *valp) { u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); if (mmd_addr) return -EINVAL; spin_lock(&adapter->tpi_lock); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp); spin_unlock(&adapter->tpi_lock); return 0; } static int mi1_mdio_write(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int val) { u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); if (mmd_addr) return -EINVAL; spin_lock(&adapter->tpi_lock); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_WRITE); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); spin_unlock(&adapter->tpi_lock); return 0; } #if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) static const struct mdio_ops mi1_mdio_ops = { .init = mi1_mdio_init, .read = mi1_mdio_read, .write = mi1_mdio_write }; #endif #endif static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int *valp) { u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); spin_lock(&adapter->tpi_lock); /* Write the address we want. */ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_ADDRESS); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); /* Write the operation we want. */ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); /* Read the data. */ __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp); spin_unlock(&adapter->tpi_lock); return 0; } static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int val) { u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); spin_lock(&adapter->tpi_lock); /* Write the address we want. */ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_ADDRESS); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); /* Write the data. */ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE); mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); spin_unlock(&adapter->tpi_lock); return 0; } static const struct mdio_ops mi1_mdio_ext_ops = { .init = mi1_mdio_init, .read = mi1_mdio_ext_read, .write = mi1_mdio_ext_write }; enum { CH_BRD_T110_1CU, CH_BRD_N110_1F, CH_BRD_N210_1F, CH_BRD_T210_1F, CH_BRD_T210_1CU, CH_BRD_N204_4CU, }; static const struct board_info t1_board[] = { { .board = CHBT_BOARD_CHT110, .port_number = 1, .caps = SUPPORTED_10000baseT_Full, .chip_term = CHBT_TERM_T1, .chip_mac = CHBT_MAC_PM3393, .chip_phy = CHBT_PHY_MY3126, .clock_core = 125000000, .clock_mc3 = 150000000, .clock_mc4 = 125000000, .espi_nports = 1, .clock_elmer0 = 44, .mdio_mdien = 1, .mdio_mdiinv = 1, .mdio_mdc = 1, .mdio_phybaseaddr = 1, .gmac = &t1_pm3393_ops, .gphy = &t1_my3126_ops, .mdio_ops = &mi1_mdio_ext_ops, .desc = "Chelsio T110 1x10GBase-CX4 TOE", }, { .board = CHBT_BOARD_N110, .port_number = 1, .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE, .chip_term = CHBT_TERM_T1, .chip_mac = CHBT_MAC_PM3393, .chip_phy = CHBT_PHY_88X2010, .clock_core = 125000000, .espi_nports = 1, .clock_elmer0 = 44, .mdio_mdien = 0, .mdio_mdiinv = 0, .mdio_mdc = 1, .mdio_phybaseaddr = 0, .gmac = &t1_pm3393_ops, .gphy = &t1_mv88x201x_ops, .mdio_ops = &mi1_mdio_ext_ops, .desc = "Chelsio N110 1x10GBaseX NIC", }, { .board = CHBT_BOARD_N210, .port_number = 1, .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE, .chip_term = CHBT_TERM_T2, .chip_mac = CHBT_MAC_PM3393, .chip_phy = CHBT_PHY_88X2010, .clock_core = 125000000, .espi_nports = 1, .clock_elmer0 = 44, .mdio_mdien = 0, .mdio_mdiinv = 0, .mdio_mdc = 1, .mdio_phybaseaddr = 0, .gmac = &t1_pm3393_ops, .gphy = &t1_mv88x201x_ops, .mdio_ops = &mi1_mdio_ext_ops, .desc = "Chelsio N210 1x10GBaseX NIC", }, { .board = CHBT_BOARD_CHT210, .port_number = 1, .caps = SUPPORTED_10000baseT_Full, .chip_term = CHBT_TERM_T2, .chip_mac = CHBT_MAC_PM3393, .chip_phy = CHBT_PHY_88X2010, .clock_core = 125000000, .clock_mc3 = 133000000, .clock_mc4 = 125000000, .espi_nports = 1, .clock_elmer0 = 44, .mdio_mdien = 0, .mdio_mdiinv = 0, .mdio_mdc = 1, .mdio_phybaseaddr = 0, .gmac = &t1_pm3393_ops, .gphy = &t1_mv88x201x_ops, .mdio_ops = &mi1_mdio_ext_ops, .desc = "Chelsio T210 1x10GBaseX TOE", }, { .board = CHBT_BOARD_CHT210, .port_number = 1, .caps = SUPPORTED_10000baseT_Full, .chip_term = CHBT_TERM_T2, .chip_mac = CHBT_MAC_PM3393, .chip_phy = CHBT_PHY_MY3126, .clock_core = 125000000, .clock_mc3 = 133000000, .clock_mc4 = 125000000, .espi_nports = 1, .clock_elmer0 = 44, .mdio_mdien = 1, .mdio_mdiinv = 1, .mdio_mdc = 1, .mdio_phybaseaddr = 1, .gmac = &t1_pm3393_ops, .gphy = &t1_my3126_ops, .mdio_ops = &mi1_mdio_ext_ops, .desc = "Chelsio T210 1x10GBase-CX4 TOE", }, #ifdef CONFIG_CHELSIO_T1_1G { .board = CHBT_BOARD_CHN204, .port_number = 4, .caps = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_PAUSE | SUPPORTED_TP, .chip_term = CHBT_TERM_T2, .chip_mac = CHBT_MAC_VSC7321, .chip_phy = CHBT_PHY_88E1111, .clock_core = 100000000, .espi_nports = 4, .clock_elmer0 = 44, .mdio_mdien = 0, .mdio_mdiinv = 0, .mdio_mdc = 0, .mdio_phybaseaddr = 4, .gmac = &t1_vsc7326_ops, .gphy = &t1_mv88e1xxx_ops, .mdio_ops = &mi1_mdio_ops, .desc = "Chelsio N204 4x100/1000BaseT NIC", }, #endif }; struct pci_device_id t1_pci_tbl[] = { CH_DEVICE(8, 0, CH_BRD_T110_1CU), CH_DEVICE(8, 1, CH_BRD_T110_1CU), CH_DEVICE(7, 0, CH_BRD_N110_1F), CH_DEVICE(10, 1, CH_BRD_N210_1F), CH_DEVICE(11, 1, CH_BRD_T210_1F), CH_DEVICE(14, 1, CH_BRD_T210_1CU), CH_DEVICE(16, 1, CH_BRD_N204_4CU), { 0 } }; MODULE_DEVICE_TABLE(pci, t1_pci_tbl); /* * Return the board_info structure with a given index. Out-of-range indices * return NULL. */ const struct board_info *t1_get_board_info(unsigned int board_id) { return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL; } struct chelsio_vpd_t { u32 format_version; u8 serial_number[16]; u8 mac_base_address[6]; u8 pad[2]; /* make multiple-of-4 size requirement explicit */ }; #define EEPROMSIZE (8 * 1024) #define EEPROM_MAX_POLL 4 /* * Read SEEPROM. A zero is written to the flag register when the addres is * written to the Control register. The hardware device will set the flag to a * one when 4B have been transferred to the Data register. */ int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data) { int i = EEPROM_MAX_POLL; u16 val; u32 v; if (addr >= EEPROMSIZE || (addr & 3)) return -EINVAL; pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr); do { udelay(50); pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val); } while (!(val & F_VPD_OP_FLAG) && --i); if (!(val & F_VPD_OP_FLAG)) { CH_ERR("%s: reading EEPROM address 0x%x failed\n", adapter->name, addr); return -EIO; } pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, &v); *data = cpu_to_le32(v); return 0; } static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd) { int addr, ret = 0; for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32)) ret = t1_seeprom_read(adapter, addr, (__le32 *)((u8 *)vpd + addr)); return ret; } /* * Read a port's MAC address from the VPD ROM. */ static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[]) { struct chelsio_vpd_t vpd; if (t1_eeprom_vpd_get(adapter, &vpd)) return 1; memcpy(mac_addr, vpd.mac_base_address, 5); mac_addr[5] = vpd.mac_base_address[5] + index; return 0; } /* * Set up the MAC/PHY according to the requested link settings. * * If the PHY can auto-negotiate first decide what to advertise, then * enable/disable auto-negotiation as desired and reset. * * If the PHY does not auto-negotiate we just reset it. * * If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) { unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); if (lc->supported & SUPPORTED_Autoneg) { lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE); if (fc) { if (fc == ((PAUSE_RX | PAUSE_TX) & (mac->adapter->params.nports < 2))) lc->advertising |= ADVERTISED_PAUSE; else { lc->advertising |= ADVERTISED_ASYM_PAUSE; if (fc == PAUSE_RX) lc->advertising |= ADVERTISED_PAUSE; } } phy->ops->advertise(phy, lc->advertising); if (lc->autoneg == AUTONEG_DISABLE) { lc->speed = lc->requested_speed; lc->duplex = lc->requested_duplex; lc->fc = (unsigned char)fc; mac->ops->set_speed_duplex_fc(mac, lc->speed, lc->duplex, fc); /* Also disables autoneg */ phy->state = PHY_AUTONEG_RDY; phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); phy->ops->reset(phy, 0); } else { phy->state = PHY_AUTONEG_EN; phy->ops->autoneg_enable(phy); /* also resets PHY */ } } else { phy->state = PHY_AUTONEG_RDY; mac->ops->set_speed_duplex_fc(mac, -1, -1, fc); lc->fc = (unsigned char)fc; phy->ops->reset(phy, 0); } return 0; } /* * External interrupt handler for boards using elmer0. */ int t1_elmer0_ext_intr_handler(adapter_t *adapter) { struct cphy *phy; int phy_cause; u32 cause; t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause); switch (board_info(adapter)->board) { #ifdef CONFIG_CHELSIO_T1_1G case CHBT_BOARD_CHT204: case CHBT_BOARD_CHT204E: case CHBT_BOARD_CHN204: case CHBT_BOARD_CHT204V: { int i, port_bit; for_each_port(adapter, i) { port_bit = i + 1; if (!(cause & (1 << port_bit))) continue; phy = adapter->port[i].phy; phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, i); } break; } case CHBT_BOARD_CHT101: if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ phy = adapter->port[0].phy; phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, 0); } break; case CHBT_BOARD_7500: { int p; /* * Elmer0's interrupt cause isn't useful here because there is * only one bit that can be set for all 4 ports. This means * we are forced to check every PHY's interrupt status * register to see who initiated the interrupt. */ for_each_port(adapter, p) { phy = adapter->port[p].phy; phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, p); } break; } #endif case CHBT_BOARD_CHT210: case CHBT_BOARD_N210: case CHBT_BOARD_N110: if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */ phy = adapter->port[0].phy; phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, 0); } break; case CHBT_BOARD_8000: case CHBT_BOARD_CHT110: CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n", cause); if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ struct cmac *mac = adapter->port[0].mac; mac->ops->interrupt_handler(mac); } if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */ u32 mod_detect; t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); CH_MSG(adapter, INFO, LINK, "XPAK %s\n", mod_detect ? "removed" : "inserted"); } break; #ifdef CONFIG_CHELSIO_T1_COUGAR case CHBT_BOARD_COUGAR: if (adapter->params.nports == 1) { if (cause & ELMER0_GP_BIT1) { /* Vitesse MAC */ struct cmac *mac = adapter->port[0].mac; mac->ops->interrupt_handler(mac); } if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */ } } else { int i, port_bit; for_each_port(adapter, i) { port_bit = i ? i + 1 : 0; if (!(cause & (1 << port_bit))) continue; phy = adapter->port[i].phy; phy_cause = phy->ops->interrupt_handler(phy); if (phy_cause & cphy_cause_link_change) t1_link_changed(adapter, i); } } break; #endif } t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); return 0; } /* Enables all interrupts. */ void t1_interrupts_enable(adapter_t *adapter) { unsigned int i; adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP; t1_sge_intr_enable(adapter->sge); t1_tp_intr_enable(adapter->tp); if (adapter->espi) { adapter->slow_intr_mask |= F_PL_INTR_ESPI; t1_espi_intr_enable(adapter->espi); } /* Enable MAC/PHY interrupts for each port. */ for_each_port(adapter, i) { adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac); adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy); } /* Enable PCIX & external chip interrupts on ASIC boards. */ if (t1_is_asic(adapter)) { u32 pl_intr = readl(adapter->regs + A_PL_ENABLE); /* PCI-X interrupts */ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0xffffffff); adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX; pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX; writel(pl_intr, adapter->regs + A_PL_ENABLE); } } /* Disables all interrupts. */ void t1_interrupts_disable(adapter_t* adapter) { unsigned int i; t1_sge_intr_disable(adapter->sge); t1_tp_intr_disable(adapter->tp); if (adapter->espi) t1_espi_intr_disable(adapter->espi); /* Disable MAC/PHY interrupts for each port. */ for_each_port(adapter, i) { adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac); adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy); } /* Disable PCIX & external chip interrupts. */ if (t1_is_asic(adapter)) writel(0, adapter->regs + A_PL_ENABLE); /* PCI-X interrupts */ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); adapter->slow_intr_mask = 0; } /* Clears all interrupts */ void t1_interrupts_clear(adapter_t* adapter) { unsigned int i; t1_sge_intr_clear(adapter->sge); t1_tp_intr_clear(adapter->tp); if (adapter->espi) t1_espi_intr_clear(adapter->espi); /* Clear MAC/PHY interrupts for each port. */ for_each_port(adapter, i) { adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac); adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy); } /* Enable interrupts for external devices. */ if (t1_is_asic(adapter)) { u32 pl_intr = readl(adapter->regs + A_PL_CAUSE); writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX, adapter->regs + A_PL_CAUSE); } /* PCI-X interrupts */ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff); } /* * Slow path interrupt handler for ASICs. */ static int asic_slow_intr(adapter_t *adapter) { u32 cause = readl(adapter->regs + A_PL_CAUSE); cause &= adapter->slow_intr_mask; if (!cause) return 0; if (cause & F_PL_INTR_SGE_ERR) t1_sge_intr_error_handler(adapter->sge); if (cause & F_PL_INTR_TP) t1_tp_intr_handler(adapter->tp); if (cause & F_PL_INTR_ESPI) t1_espi_intr_handler(adapter->espi); if (cause & F_PL_INTR_PCIX) t1_pci_intr_handler(adapter); if (cause & F_PL_INTR_EXT) t1_elmer0_ext_intr(adapter); /* Clear the interrupts just processed. */ writel(cause, adapter->regs + A_PL_CAUSE); readl(adapter->regs + A_PL_CAUSE); /* flush writes */ return 1; } int t1_slow_intr_handler(adapter_t *adapter) { #ifdef CONFIG_CHELSIO_T1_1G if (!t1_is_asic(adapter)) return fpga_slow_intr(adapter); #endif return asic_slow_intr(adapter); } /* Power sequencing is a work-around for Intel's XPAKs. */ static void power_sequence_xpak(adapter_t* adapter) { u32 mod_detect; u32 gpo; /* Check for XPAK */ t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); if (!(ELMER0_GP_BIT5 & mod_detect)) { /* XPAK is present */ t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); gpo |= ELMER0_GP_BIT18; t1_tpi_write(adapter, A_ELMER0_GPO, gpo); } } int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, struct adapter_params *p) { p->chip_version = bi->chip_term; p->is_asic = (p->chip_version != CHBT_TERM_FPGA); if (p->chip_version == CHBT_TERM_T1 || p->chip_version == CHBT_TERM_T2 || p->chip_version == CHBT_TERM_FPGA) { u32 val = readl(adapter->regs + A_TP_PC_CONFIG); val = G_TP_PC_REV(val); if (val == 2) p->chip_revision = TERM_T1B; else if (val == 3) p->chip_revision = TERM_T2; else return -1; } else return -1; return 0; } /* * Enable board components other than the Chelsio chip, such as external MAC * and PHY. */ static int board_init(adapter_t *adapter, const struct board_info *bi) { switch (bi->board) { case CHBT_BOARD_8000: case CHBT_BOARD_N110: case CHBT_BOARD_N210: case CHBT_BOARD_CHT210: case CHBT_BOARD_COUGAR: t1_tpi_par(adapter, 0xf); t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); break; case CHBT_BOARD_CHT110: t1_tpi_par(adapter, 0xf); t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); /* TBD XXX Might not need. This fixes a problem * described in the Intel SR XPAK errata. */ power_sequence_xpak(adapter); break; #ifdef CONFIG_CHELSIO_T1_1G case CHBT_BOARD_CHT204E: /* add config space write here */ case CHBT_BOARD_CHT204: case CHBT_BOARD_CHT204V: case CHBT_BOARD_CHN204: t1_tpi_par(adapter, 0xf); t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); break; case CHBT_BOARD_CHT101: case CHBT_BOARD_7500: t1_tpi_par(adapter, 0xf); t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); break; #endif } return 0; } /* * Initialize and configure the Terminator HW modules. Note that external * MAC and PHYs are initialized separately. */ int t1_init_hw_modules(adapter_t *adapter) { int err = -EIO; const struct board_info *bi = board_info(adapter); if (!bi->clock_mc4) { u32 val = readl(adapter->regs + A_MC4_CFG); writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG); writel(F_M_BUS_ENABLE | F_TCAM_RESET, adapter->regs + A_MC5_CONFIG); } #ifdef CONFIG_CHELSIO_T1_COUGAR if (adapter->cspi && t1_cspi_init(adapter->cspi)) goto out_err; #endif if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, bi->espi_nports)) goto out_err; if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core)) goto out_err; err = t1_sge_configure(adapter->sge, &adapter->params.sge); if (err) goto out_err; err = 0; out_err: return err; } /* * Determine a card's PCI mode. */ static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) { static const unsigned short speed_map[] = { 33, 66, 100, 133 }; u32 pci_mode; pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)]; p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32; p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0; } /* * Release the structures holding the SW per-Terminator-HW-module state. */ void t1_free_sw_modules(adapter_t *adapter) { unsigned int i; for_each_port(adapter, i) { struct cmac *mac = adapter->port[i].mac; struct cphy *phy = adapter->port[i].phy; if (mac) mac->ops->destroy(mac); if (phy) phy->ops->destroy(phy); } if (adapter->sge) t1_sge_destroy(adapter->sge); if (adapter->tp) t1_tp_destroy(adapter->tp); if (adapter->espi) t1_espi_destroy(adapter->espi); #ifdef CONFIG_CHELSIO_T1_COUGAR if (adapter->cspi) t1_cspi_destroy(adapter->cspi); #endif } static void __devinit init_link_config(struct link_config *lc, const struct board_info *bi) { lc->supported = bi->caps; lc->requested_speed = lc->speed = SPEED_INVALID; lc->requested_duplex = lc->duplex = DUPLEX_INVALID; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; if (lc->supported & SUPPORTED_Autoneg) { lc->advertising = lc->supported; lc->autoneg = AUTONEG_ENABLE; lc->requested_fc |= PAUSE_AUTONEG; } else { lc->advertising = 0; lc->autoneg = AUTONEG_DISABLE; } } #ifdef CONFIG_CHELSIO_T1_COUGAR if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) { CH_ERR("%s: CSPI initialization failed\n", adapter->name); goto error; } #endif /* * Allocate and initialize the data structures that hold the SW state of * the Terminator HW modules. */ int __devinit t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi) { unsigned int i; adapter->params.brd_info = bi; adapter->params.nports = bi->port_number; adapter->params.stats_update_period = bi->gmac->stats_update_period; adapter->sge = t1_sge_create(adapter, &adapter->params.sge); if (!adapter->sge) { CH_ERR("%s: SGE initialization failed\n", adapter->name); goto error; } if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) { CH_ERR("%s: ESPI initialization failed\n", adapter->name); goto error; } adapter->tp = t1_tp_create(adapter, &adapter->params.tp); if (!adapter->tp) { CH_ERR("%s: TP initialization failed\n", adapter->name); goto error; } board_init(adapter, bi); bi->mdio_ops->init(adapter, bi); if (bi->gphy->reset) bi->gphy->reset(adapter); if (bi->gmac->reset) bi->gmac->reset(adapter); for_each_port(adapter, i) { u8 hw_addr[6]; struct cmac *mac; int phy_addr = bi->mdio_phybaseaddr + i; adapter->port[i].phy = bi->gphy->create(adapter, phy_addr, bi->mdio_ops); if (!adapter->port[i].phy) { CH_ERR("%s: PHY %d initialization failed\n", adapter->name, i); goto error; } adapter->port[i].mac = mac = bi->gmac->create(adapter, i); if (!mac) { CH_ERR("%s: MAC %d initialization failed\n", adapter->name, i); goto error; } /* * Get the port's MAC addresses either from the EEPROM if one * exists or the one hardcoded in the MAC. */ if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY) mac->ops->macaddress_get(mac, hw_addr); else if (vpd_macaddress_get(adapter, i, hw_addr)) { CH_ERR("%s: could not read MAC address from VPD ROM\n", adapter->port[i].dev->name); goto error; } memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN); init_link_config(&adapter->port[i].link_config, bi); } get_pci_mode(adapter, &adapter->params.pci); t1_interrupts_clear(adapter); return 0; error: t1_free_sw_modules(adapter); return -1; }
gpl-2.0
NXT-F1V3/kernel_dev
drivers/gpu/drm/omapdrm/omap_encoder.c
214
5160
/* * drivers/gpu/drm/omapdrm/omap_encoder.c * * Copyright (C) 2011 Texas Instruments * Author: Rob Clark <rob@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/list.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> #include "omap_drv.h" /* * encoder funcs */ #define to_omap_encoder(x) container_of(x, struct omap_encoder, base) /* The encoder and connector both map to same dssdev.. the encoder * handles the 'active' parts, ie. anything the modifies the state * of the hw, and the connector handles the 'read-only' parts, like * detecting connection and reading edid. */ struct omap_encoder { struct drm_encoder base; struct omap_dss_device *dssdev; }; struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); return omap_encoder->dssdev; } static void omap_encoder_destroy(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); drm_encoder_cleanup(encoder); kfree(omap_encoder); } static const struct drm_encoder_funcs omap_encoder_funcs = { .destroy = omap_encoder_destroy, }; static void omap_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->dssdev; struct drm_connector *connector; bool hdmi_mode; int r; hdmi_mode = false; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { hdmi_mode = omap_connector_get_hdmi_mode(connector); break; } } if (dssdev->driver->set_hdmi_mode) dssdev->driver->set_hdmi_mode(dssdev, hdmi_mode); if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) { struct hdmi_avi_infoframe avi; r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode); if (r == 0) dssdev->driver->set_hdmi_infoframe(dssdev, &avi); } } static void omap_encoder_disable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->dssdev; struct omap_dss_driver *dssdrv = dssdev->driver; dssdrv->disable(dssdev); } static int omap_encoder_update(struct drm_encoder *encoder, enum omap_channel channel, struct omap_video_timings *timings) { struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->dssdev; struct omap_dss_driver *dssdrv = dssdev->driver; int ret; if (dssdrv->check_timings) { ret = dssdrv->check_timings(dssdev, timings); } else { struct omap_video_timings t = {0}; dssdrv->get_timings(dssdev, &t); if (memcmp(timings, &t, sizeof(struct omap_video_timings))) ret = -EINVAL; else ret = 0; } if (ret) { dev_err(dev->dev, "could not set timings: %d\n", ret); return ret; } if (dssdrv->set_timings) dssdrv->set_timings(dssdev, timings); return 0; } static void omap_encoder_enable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->dssdev; struct omap_dss_driver *dssdrv = dssdev->driver; int r; omap_encoder_update(encoder, omap_crtc_channel(encoder->crtc), omap_crtc_timings(encoder->crtc)); r = dssdrv->enable(dssdev); if (r) dev_err(encoder->dev->dev, "Failed to enable display '%s': %d\n", dssdev->name, r); } static int omap_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { return 0; } static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { .mode_set = omap_encoder_mode_set, .disable = omap_encoder_disable, .enable = omap_encoder_enable, .atomic_check = omap_encoder_atomic_check, }; /* initialize encoder */ struct drm_encoder *omap_encoder_init(struct drm_device *dev, struct omap_dss_device *dssdev) { struct drm_encoder *encoder = NULL; struct omap_encoder *omap_encoder; omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL); if (!omap_encoder) goto fail; omap_encoder->dssdev = dssdev; encoder = &omap_encoder->base; drm_encoder_init(dev, encoder, &omap_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs); return encoder; fail: if (encoder) omap_encoder_destroy(encoder); return NULL; }
gpl-2.0
linux-pmfs/pmfs
drivers/irqchip/irq-sun4i.c
470
4198
/* * Allwinner A1X SoCs IRQ chip driver. * * Copyright (C) 2012 Maxime Ripard * * Maxime Ripard <maxime.ripard@free-electrons.com> * * Based on code from * Allwinner Technology Co., Ltd. <www.allwinnertech.com> * Benn Huang <benn@allwinnertech.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/io.h> #include <linux/irq.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/exception.h> #include <asm/mach/irq.h> #include "irqchip.h" #define SUN4I_IRQ_VECTOR_REG 0x00 #define SUN4I_IRQ_PROTECTION_REG 0x08 #define SUN4I_IRQ_NMI_CTRL_REG 0x0c #define SUN4I_IRQ_PENDING_REG(x) (0x10 + 0x4 * x) #define SUN4I_IRQ_FIQ_PENDING_REG(x) (0x20 + 0x4 * x) #define SUN4I_IRQ_ENABLE_REG(x) (0x40 + 0x4 * x) #define SUN4I_IRQ_MASK_REG(x) (0x50 + 0x4 * x) static void __iomem *sun4i_irq_base; static struct irq_domain *sun4i_irq_domain; static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs); static void sun4i_irq_ack(struct irq_data *irqd) { unsigned int irq = irqd_to_hwirq(irqd); unsigned int irq_off = irq % 32; int reg = irq / 32; u32 val; val = readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg)); writel(val | (1 << irq_off), sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg)); } static void sun4i_irq_mask(struct irq_data *irqd) { unsigned int irq = irqd_to_hwirq(irqd); unsigned int irq_off = irq % 32; int reg = irq / 32; u32 val; val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); writel(val & ~(1 << irq_off), sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); } static void sun4i_irq_unmask(struct irq_data *irqd) { unsigned int irq = irqd_to_hwirq(irqd); unsigned int irq_off = irq % 32; int reg = irq / 32; u32 val; val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); writel(val | (1 << irq_off), sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg)); } static struct irq_chip sun4i_irq_chip = { .name = "sun4i_irq", .irq_ack = sun4i_irq_ack, .irq_mask = sun4i_irq_mask, .irq_unmask = sun4i_irq_unmask, }; static int sun4i_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_level_irq); set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); return 0; } static struct irq_domain_ops sun4i_irq_ops = { .map = sun4i_irq_map, .xlate = irq_domain_xlate_onecell, }; static int __init sun4i_of_init(struct device_node *node, struct device_node *parent) { sun4i_irq_base = of_iomap(node, 0); if (!sun4i_irq_base) panic("%s: unable to map IC registers\n", node->full_name); /* Disable all interrupts */ writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0)); writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(1)); writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(2)); /* Mask all the interrupts */ writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(0)); writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(1)); writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(2)); /* Clear all the pending interrupts */ writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)); writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(1)); writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(2)); /* Enable protection mode */ writel(0x01, sun4i_irq_base + SUN4I_IRQ_PROTECTION_REG); /* Configure the external interrupt source type */ writel(0x00, sun4i_irq_base + SUN4I_IRQ_NMI_CTRL_REG); sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32, &sun4i_irq_ops, NULL); if (!sun4i_irq_domain) panic("%s: unable to create IRQ domain\n", node->full_name); set_handle_irq(sun4i_handle_irq); return 0; } IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-ic", sun4i_of_init); static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs) { u32 irq, hwirq; hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; while (hwirq != 0) { irq = irq_find_mapping(sun4i_irq_domain, hwirq); handle_IRQ(irq, regs); hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; } }
gpl-2.0
Alberto96/android_kernel_ulefone_k11ta_a
arch/arm/mach-at91/board-kafa.c
470
2862
/* * linux/arch/arm/mach-at91/board-kafa.c * * Copyright (C) 2006 Sperry-Sun * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/cpu.h> #include "at91_aic.h" #include "board.h" #include "generic.h" #include "gpio.h" static void __init kafa_init_early(void) { /* Set cpu type: PQFP */ at91rm9200_set_type(ARCH_REVISON_9200_PQFP); /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); } static struct macb_platform_data __initdata kafa_eth_data = { .phy_irq_pin = AT91_PIN_PC4, .is_rmii = 0, }; static struct at91_usbh_data __initdata kafa_usbh_data = { .ports = 1, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; static struct at91_udc_data __initdata kafa_udc_data = { .vbus_pin = AT91_PIN_PB6, .pullup_pin = AT91_PIN_PB7, }; /* * LEDs */ static struct gpio_led kafa_leds[] = { { /* D1 */ .name = "led1", .gpio = AT91_PIN_PB4, .active_low = 1, .default_trigger = "heartbeat", }, }; static void __init kafa_board_init(void) { /* Serial */ /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1 (Rx, Tx, CTS, RTS) */ at91_register_uart(AT91RM9200_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS); at91_add_device_serial(); /* Ethernet */ at91_add_device_eth(&kafa_eth_data); /* USB Host */ at91_add_device_usbh(&kafa_usbh_data); /* USB Device */ at91_add_device_udc(&kafa_udc_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* SPI */ at91_add_device_spi(NULL, 0); /* LEDs */ at91_gpio_leds(kafa_leds, ARRAY_SIZE(kafa_leds)); } MACHINE_START(KAFA, "Sperry-Sun KAFA") /* Maintainer: Sergei Sharonov */ .init_time = at91rm9200_timer_init, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = kafa_init_early, .init_irq = at91_init_irq_default, .init_machine = kafa_board_init, MACHINE_END
gpl-2.0
Phoenix-Silver/Zte-Blade-New-35-kernel
arch/arm/mach-omap1/board-perseus2.c
726
7691
/* * linux/arch/arm/mach-omap1/board-perseus2.c * * Modified from board-generic.c * * Original OMAP730 support by Jean Pihet <j-pihet@ti.com> * Updated for 2.6 by Kevin Hilman <kjh@hilman.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/input.h> #include <linux/smc91x.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/tc.h> #include <mach/gpio.h> #include <plat/mux.h> #include <plat/fpga.h> #include <plat/flash.h> #include <plat/keypad.h> #include <plat/common.h> #include <plat/board.h> static int p2_keymap[] = { KEY(0,0,KEY_UP), KEY(0,1,KEY_RIGHT), KEY(0,2,KEY_LEFT), KEY(0,3,KEY_DOWN), KEY(0,4,KEY_ENTER), KEY(1,0,KEY_F10), KEY(1,1,KEY_SEND), KEY(1,2,KEY_END), KEY(1,3,KEY_VOLUMEDOWN), KEY(1,4,KEY_VOLUMEUP), KEY(1,5,KEY_RECORD), KEY(2,0,KEY_F9), KEY(2,1,KEY_3), KEY(2,2,KEY_6), KEY(2,3,KEY_9), KEY(2,4,KEY_KPDOT), KEY(3,0,KEY_BACK), KEY(3,1,KEY_2), KEY(3,2,KEY_5), KEY(3,3,KEY_8), KEY(3,4,KEY_0), KEY(3,5,KEY_KPSLASH), KEY(4,0,KEY_HOME), KEY(4,1,KEY_1), KEY(4,2,KEY_4), KEY(4,3,KEY_7), KEY(4,4,KEY_KPASTERISK), KEY(4,5,KEY_POWER), 0 }; static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { [0] = { .start = H2P2_DBG_FPGA_ETHR_START, /* Physical */ .end = H2P2_DBG_FPGA_ETHR_START + 0xf, .flags = IORESOURCE_MEM, }, [1] = { .start = INT_7XX_MPU_EXT_NIRQ, .end = 0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; static struct mtd_partition nor_partitions[] = { /* bootloader (U-Boot, etc) in first sector */ { .name = "bootloader", .offset = 0, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* bootloader params in the next sector */ { .name = "params", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, .mask_flags = 0, }, /* kernel */ { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_2M, .mask_flags = 0 }, /* rest of flash is a file system */ { .name = "rootfs", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0 }, }; static struct physmap_flash_data nor_data = { .width = 2, .set_vpp = omap1_set_vpp, .parts = nor_partitions, .nr_parts = ARRAY_SIZE(nor_partitions), }; static struct resource nor_resource = { .start = OMAP_CS0_PHYS, .end = OMAP_CS0_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device nor_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &nor_data, }, .num_resources = 1, .resource = &nor_resource, }; static void nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; unsigned long mask; if (cmd == NAND_CMD_NONE) return; mask = (ctrl & NAND_CLE) ? 0x02 : 0; if (ctrl & NAND_ALE) mask |= 0x04; writeb(cmd, (unsigned long)this->IO_ADDR_W | mask); } #define P2_NAND_RB_GPIO_PIN 62 static int nand_dev_ready(struct mtd_info *mtd) { return gpio_get_value(P2_NAND_RB_GPIO_PIN); } static const char *part_probes[] = { "cmdlinepart", NULL }; static struct platform_nand_data nand_data = { .chip = { .nr_chips = 1, .chip_offset = 0, .options = NAND_SAMSUNG_LP_OPTIONS, .part_probe_types = part_probes, }, .ctrl = { .cmd_ctrl = nand_cmd_ctl, .dev_ready = nand_dev_ready, }, }; static struct resource nand_resource = { .start = OMAP_CS3_PHYS, .end = OMAP_CS3_PHYS + SZ_4K - 1, .flags = IORESOURCE_MEM, }; static struct platform_device nand_device = { .name = "gen_nand", .id = 0, .dev = { .platform_data = &nand_data, }, .num_resources = 1, .resource = &nand_resource, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .dev = { .platform_data = &smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct resource kp_resources[] = { [0] = { .start = INT_7XX_MPUIO_KEYPAD, .end = INT_7XX_MPUIO_KEYPAD, .flags = IORESOURCE_IRQ, }, }; static struct omap_kp_platform_data kp_data = { .rows = 8, .cols = 8, .keymap = p2_keymap, .keymapsize = ARRAY_SIZE(p2_keymap), .delay = 4, .dbounce = 1, }; static struct platform_device kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &kp_data, }, .num_resources = ARRAY_SIZE(kp_resources), .resource = kp_resources, }; static struct platform_device lcd_device = { .name = "lcd_p2", .id = -1, }; static struct platform_device *devices[] __initdata = { &nor_device, &nand_device, &smc91x_device, &kp_device, &lcd_device, }; static struct omap_lcd_config perseus2_lcd_config __initdata = { .ctrl_name = "internal", }; static struct omap_board_config_kernel perseus2_config[] __initdata = { { OMAP_TAG_LCD, &perseus2_lcd_config }, }; static void __init omap_perseus2_init(void) { if (gpio_request(P2_NAND_RB_GPIO_PIN, "NAND ready") < 0) BUG(); gpio_direction_input(P2_NAND_RB_GPIO_PIN); omap_cfg_reg(L3_1610_FLASH_CS2B_OE); omap_cfg_reg(M8_1610_FLASH_CS2B_WE); platform_add_devices(devices, ARRAY_SIZE(devices)); omap_board_config = perseus2_config; omap_board_config_size = ARRAY_SIZE(perseus2_config); omap_serial_init(); omap_register_i2c_bus(1, 100, NULL, 0); } static void __init perseus2_init_smc91x(void) { fpga_write(1, H2P2_DBG_FPGA_LAN_RESET); mdelay(50); fpga_write(fpga_read(H2P2_DBG_FPGA_LAN_RESET) & ~1, H2P2_DBG_FPGA_LAN_RESET); mdelay(50); } static void __init omap_perseus2_init_irq(void) { omap1_init_common_hw(); omap_init_irq(); omap_gpio_init(); perseus2_init_smc91x(); } /* Only FPGA needs to be mapped here. All others are done with ioremap */ static struct map_desc omap_perseus2_io_desc[] __initdata = { { .virtual = H2P2_DBG_FPGA_BASE, .pfn = __phys_to_pfn(H2P2_DBG_FPGA_START), .length = H2P2_DBG_FPGA_SIZE, .type = MT_DEVICE } }; static void __init omap_perseus2_map_io(void) { omap1_map_common_io(); iotable_init(omap_perseus2_io_desc, ARRAY_SIZE(omap_perseus2_io_desc)); /* Early, board-dependent init */ /* * Hold GSM Reset until needed */ omap_writew(omap_readw(OMAP7XX_DSP_M_CTL) & ~1, OMAP7XX_DSP_M_CTL); /* * UARTs -> done automagically by 8250 driver */ /* * CSx timings, GPIO Mux ... setup */ /* Flash: CS0 timings setup */ omap_writel(0x0000fff3, OMAP7XX_FLASH_CFG_0); omap_writel(0x00000088, OMAP7XX_FLASH_ACFG_0); /* * Ethernet support through the debug board * CS1 timings setup */ omap_writel(0x0000fff3, OMAP7XX_FLASH_CFG_1); omap_writel(0x00000000, OMAP7XX_FLASH_ACFG_1); /* * Configure MPU_EXT_NIRQ IO in IO_CONF9 register, * It is used as the Ethernet controller interrupt */ omap_writel(omap_readl(OMAP7XX_IO_CONF_9) & 0x1FFFFFFF, OMAP7XX_IO_CONF_9); } MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2") /* Maintainer: Kevin Hilman <kjh@hilman.org> */ .phys_io = 0xfff00000, .io_pg_offst = ((0xfef00000) >> 18) & 0xfffc, .boot_params = 0x10000100, .map_io = omap_perseus2_map_io, .init_irq = omap_perseus2_init_irq, .init_machine = omap_perseus2_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
Fevax/exynos8890_stock
drivers/gpio/gpio-max730x.c
1238
6007
/** * Copyright (C) 2006 Juergen Beisert, Pengutronix * Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix * Copyright (C) 2009 Wolfram Sang, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The Maxim MAX7300/1 device is an I2C/SPI driven GPIO expander. There are * 28 GPIOs. 8 of them can trigger an interrupt. See datasheet for more * details * Note: * - DIN must be stable at the rising edge of clock. * - when writing: * - always clock in 16 clocks at once * - at DIN: D15 first, D0 last * - D0..D7 = databyte, D8..D14 = commandbyte * - D15 = low -> write command * - when reading * - always clock in 16 clocks at once * - at DIN: D15 first, D0 last * - D0..D7 = dummy, D8..D14 = register address * - D15 = high -> read command * - raise CS and assert it again * - always clock in 16 clocks at once * - at DOUT: D15 first, D0 last * - D0..D7 contains the data from the first cycle * * The driver exports a standard gpiochip interface */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/spi/max7301.h> #include <linux/gpio.h> #include <linux/slab.h> /* * Pin configurations, see MAX7301 datasheet page 6 */ #define PIN_CONFIG_MASK 0x03 #define PIN_CONFIG_IN_PULLUP 0x03 #define PIN_CONFIG_IN_WO_PULLUP 0x02 #define PIN_CONFIG_OUT 0x01 #define PIN_NUMBER 28 static int max7301_direction_input(struct gpio_chip *chip, unsigned offset) { struct max7301 *ts = container_of(chip, struct max7301, chip); u8 *config; u8 offset_bits, pin_config; int ret; /* First 4 pins are unused in the controller */ offset += 4; offset_bits = (offset & 3) << 1; config = &ts->port_config[offset >> 2]; if (ts->input_pullup_active & BIT(offset)) pin_config = PIN_CONFIG_IN_PULLUP; else pin_config = PIN_CONFIG_IN_WO_PULLUP; mutex_lock(&ts->lock); *config = (*config & ~(PIN_CONFIG_MASK << offset_bits)) | (pin_config << offset_bits); ret = ts->write(ts->dev, 0x08 + (offset >> 2), *config); mutex_unlock(&ts->lock); return ret; } static int __max7301_set(struct max7301 *ts, unsigned offset, int value) { if (value) { ts->out_level |= 1 << offset; return ts->write(ts->dev, 0x20 + offset, 0x01); } else { ts->out_level &= ~(1 << offset); return ts->write(ts->dev, 0x20 + offset, 0x00); } } static int max7301_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct max7301 *ts = container_of(chip, struct max7301, chip); u8 *config; u8 offset_bits; int ret; /* First 4 pins are unused in the controller */ offset += 4; offset_bits = (offset & 3) << 1; config = &ts->port_config[offset >> 2]; mutex_lock(&ts->lock); *config = (*config & ~(PIN_CONFIG_MASK << offset_bits)) | (PIN_CONFIG_OUT << offset_bits); ret = __max7301_set(ts, offset, value); if (!ret) ret = ts->write(ts->dev, 0x08 + (offset >> 2), *config); mutex_unlock(&ts->lock); return ret; } static int max7301_get(struct gpio_chip *chip, unsigned offset) { struct max7301 *ts = container_of(chip, struct max7301, chip); int config, level = -EINVAL; /* First 4 pins are unused in the controller */ offset += 4; mutex_lock(&ts->lock); config = (ts->port_config[offset >> 2] >> ((offset & 3) << 1)) & PIN_CONFIG_MASK; switch (config) { case PIN_CONFIG_OUT: /* Output: return cached level */ level = !!(ts->out_level & (1 << offset)); break; case PIN_CONFIG_IN_WO_PULLUP: case PIN_CONFIG_IN_PULLUP: /* Input: read out */ level = ts->read(ts->dev, 0x20 + offset) & 0x01; } mutex_unlock(&ts->lock); return level; } static void max7301_set(struct gpio_chip *chip, unsigned offset, int value) { struct max7301 *ts = container_of(chip, struct max7301, chip); /* First 4 pins are unused in the controller */ offset += 4; mutex_lock(&ts->lock); __max7301_set(ts, offset, value); mutex_unlock(&ts->lock); } int __max730x_probe(struct max7301 *ts) { struct device *dev = ts->dev; struct max7301_platform_data *pdata; int i, ret; pdata = dev_get_platdata(dev); mutex_init(&ts->lock); dev_set_drvdata(dev, ts); /* Power up the chip and disable IRQ output */ ts->write(dev, 0x04, 0x01); if (pdata) { ts->input_pullup_active = pdata->input_pullup_active; ts->chip.base = pdata->base; } else { ts->chip.base = -1; } ts->chip.label = dev->driver->name; ts->chip.direction_input = max7301_direction_input; ts->chip.get = max7301_get; ts->chip.direction_output = max7301_direction_output; ts->chip.set = max7301_set; ts->chip.ngpio = PIN_NUMBER; ts->chip.can_sleep = true; ts->chip.dev = dev; ts->chip.owner = THIS_MODULE; /* * initialize pullups according to platform data and cache the * register values for later use. */ for (i = 1; i < 8; i++) { int j; /* * initialize port_config with "0xAA", which means * input with internal pullup disabled. This is needed * to avoid writing zeros (in the inner for loop), * which is not allowed according to the datasheet. */ ts->port_config[i] = 0xAA; for (j = 0; j < 4; j++) { int offset = (i - 1) * 4 + j; ret = max7301_direction_input(&ts->chip, offset); if (ret) goto exit_destroy; } } ret = gpiochip_add(&ts->chip); if (ret) goto exit_destroy; return ret; exit_destroy: mutex_destroy(&ts->lock); return ret; } EXPORT_SYMBOL_GPL(__max730x_probe); int __max730x_remove(struct device *dev) { struct max7301 *ts = dev_get_drvdata(dev); if (ts == NULL) return -ENODEV; /* Power down the chip and disable IRQ output */ ts->write(dev, 0x04, 0x00); gpiochip_remove(&ts->chip); mutex_destroy(&ts->lock); kfree(ts); return 0; } EXPORT_SYMBOL_GPL(__max730x_remove); MODULE_AUTHOR("Juergen Beisert, Wolfram Sang"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MAX730x GPIO-Expanders, generic parts");
gpl-2.0
jderrick/linux-torvalds
net/irda/irda_device.c
1238
8042
/********************************************************************* * * Filename: irda_device.c * Version: 0.9 * Description: Utility functions used by the device drivers * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sat Oct 9 09:22:27 1999 * Modified at: Sun Jan 23 17:41:24 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * ********************************************************************/ #include <linux/string.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/capability.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/kmod.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/export.h> #include <asm/ioctls.h> #include <asm/uaccess.h> #include <asm/dma.h> #include <asm/io.h> #include <net/irda/irda_device.h> #include <net/irda/irlap.h> #include <net/irda/timer.h> #include <net/irda/wrapper.h> static void __irda_task_delete(struct irda_task *task); static hashbin_t *dongles = NULL; static hashbin_t *tasks = NULL; static void irda_task_timer_expired(void *data); int __init irda_device_init( void) { dongles = hashbin_new(HB_NOLOCK); if (dongles == NULL) { net_warn_ratelimited("IrDA: Can't allocate dongles hashbin!\n"); return -ENOMEM; } spin_lock_init(&dongles->hb_spinlock); tasks = hashbin_new(HB_LOCK); if (tasks == NULL) { net_warn_ratelimited("IrDA: Can't allocate tasks hashbin!\n"); hashbin_delete(dongles, NULL); return -ENOMEM; } /* We no longer initialise the driver ourselves here, we let * the system do it for us... - Jean II */ return 0; } static void leftover_dongle(void *arg) { struct dongle_reg *reg = arg; net_warn_ratelimited("IrDA: Dongle type %x not unregistered\n", reg->type); } void irda_device_cleanup(void) { hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete); hashbin_delete(dongles, leftover_dongle); } /* * Function irda_device_set_media_busy (self, status) * * Called when we have detected that another station is transmitting * in contention mode. */ void irda_device_set_media_busy(struct net_device *dev, int status) { struct irlap_cb *self; pr_debug("%s(%s)\n", __func__, status ? "TRUE" : "FALSE"); self = (struct irlap_cb *) dev->atalk_ptr; /* Some drivers may enable the receive interrupt before calling * irlap_open(), or they may disable the receive interrupt * after calling irlap_close(). * The IrDA stack is protected from this in irlap_driver_rcv(). * However, the driver calls directly the wrapper, that calls * us directly. Make sure we protect ourselves. * Jean II */ if (!self || self->magic != LAP_MAGIC) return; if (status) { self->media_busy = TRUE; if (status == SMALL) irlap_start_mbusy_timer(self, SMALLBUSY_TIMEOUT); else irlap_start_mbusy_timer(self, MEDIABUSY_TIMEOUT); pr_debug("Media busy!\n"); } else { self->media_busy = FALSE; irlap_stop_mbusy_timer(self); } } EXPORT_SYMBOL(irda_device_set_media_busy); /* * Function irda_device_is_receiving (dev) * * Check if the device driver is currently receiving data * */ int irda_device_is_receiving(struct net_device *dev) { struct if_irda_req req; int ret; if (!dev->netdev_ops->ndo_do_ioctl) { net_err_ratelimited("%s: do_ioctl not impl. by device driver\n", __func__); return -1; } ret = (dev->netdev_ops->ndo_do_ioctl)(dev, (struct ifreq *) &req, SIOCGRECEIVING); if (ret < 0) return ret; return req.ifr_receiving; } static void __irda_task_delete(struct irda_task *task) { del_timer(&task->timer); kfree(task); } static void irda_task_delete(struct irda_task *task) { /* Unregister task */ hashbin_remove(tasks, (long) task, NULL); __irda_task_delete(task); } /* * Function irda_task_kick (task) * * Tries to execute a task possible multiple times until the task is either * finished, or askes for a timeout. When a task is finished, we do post * processing, and notify the parent task, that is waiting for this task * to complete. */ static int irda_task_kick(struct irda_task *task) { int finished = TRUE; int count = 0; int timeout; IRDA_ASSERT(task != NULL, return -1;); IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;); /* Execute task until it's finished, or askes for a timeout */ do { timeout = task->function(task); if (count++ > 100) { net_err_ratelimited("%s: error in task handler!\n", __func__); irda_task_delete(task); return TRUE; } } while ((timeout == 0) && (task->state != IRDA_TASK_DONE)); if (timeout < 0) { net_err_ratelimited("%s: Error executing task!\n", __func__); irda_task_delete(task); return TRUE; } /* Check if we are finished */ if (task->state == IRDA_TASK_DONE) { del_timer(&task->timer); /* Do post processing */ if (task->finished) task->finished(task); /* Notify parent */ if (task->parent) { /* Check if parent is waiting for us to complete */ if (task->parent->state == IRDA_TASK_CHILD_WAIT) { task->parent->state = IRDA_TASK_CHILD_DONE; /* Stop timer now that we are here */ del_timer(&task->parent->timer); /* Kick parent task */ irda_task_kick(task->parent); } } irda_task_delete(task); } else if (timeout > 0) { irda_start_timer(&task->timer, timeout, (void *) task, irda_task_timer_expired); finished = FALSE; } else { pr_debug("%s(), not finished, and no timeout!\n", __func__); finished = FALSE; } return finished; } /* * Function irda_task_timer_expired (data) * * Task time has expired. We now try to execute task (again), and restart * the timer if the task has not finished yet */ static void irda_task_timer_expired(void *data) { struct irda_task *task; task = data; irda_task_kick(task); } /* * Function irda_device_setup (dev) * * This function should be used by low level device drivers in a similar way * as ether_setup() is used by normal network device drivers */ static void irda_device_setup(struct net_device *dev) { dev->hard_header_len = 0; dev->addr_len = LAP_ALEN; dev->type = ARPHRD_IRDA; dev->tx_queue_len = 8; /* Window size + 1 s-frame */ memset(dev->broadcast, 0xff, LAP_ALEN); dev->mtu = 2048; dev->flags = IFF_NOARP; } /* * Funciton alloc_irdadev * Allocates and sets up an IRDA device in a manner similar to * alloc_etherdev. */ struct net_device *alloc_irdadev(int sizeof_priv) { return alloc_netdev(sizeof_priv, "irda%d", NET_NAME_UNKNOWN, irda_device_setup); } EXPORT_SYMBOL(alloc_irdadev); #ifdef CONFIG_ISA_DMA_API /* * Function setup_dma (idev, buffer, count, mode) * * Setup the DMA channel. Commonly used by LPC FIR drivers * */ void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode) { unsigned long flags; flags = claim_dma_lock(); disable_dma(channel); clear_dma_ff(channel); set_dma_mode(channel, mode); set_dma_addr(channel, buffer); set_dma_count(channel, count); enable_dma(channel); release_dma_lock(flags); } EXPORT_SYMBOL(irda_setup_dma); #endif
gpl-2.0
engicam-stable/iCoreM6_linux
fs/cifs/cifssmb.c
1750
183826
/* * fs/cifs/cifssmb.c * * Copyright (C) International Business Machines Corp., 2002,2010 * Author(s): Steve French (sfrench@us.ibm.com) * * Contains the routines for constructing the SMB PDUs themselves * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* SMB/CIFS PDU handling routines here - except for leftovers in connect.c */ /* These are mostly routines that operate on a pathname, or on a tree id */ /* (mounted volume), but there are eight handle based routines which must be */ /* treated slightly differently for reconnection purposes since we never */ /* want to reuse a stale file handle and only the caller knows the file info */ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/vfs.h> #include <linux/slab.h> #include <linux/posix_acl_xattr.h> #include <linux/pagemap.h> #include <asm/uaccess.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsacl.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #ifdef CONFIG_CIFS_POSIX static struct { int index; char *name; } protocols[] = { #ifdef CONFIG_CIFS_WEAK_PW_HASH {LANMAN_PROT, "\2LM1.2X002"}, {LANMAN2_PROT, "\2LANMAN2.1"}, #endif /* weak password hashing for legacy clients */ {CIFS_PROT, "\2NT LM 0.12"}, {POSIX_PROT, "\2POSIX 2"}, {BAD_PROT, "\2"} }; #else static struct { int index; char *name; } protocols[] = { #ifdef CONFIG_CIFS_WEAK_PW_HASH {LANMAN_PROT, "\2LM1.2X002"}, {LANMAN2_PROT, "\2LANMAN2.1"}, #endif /* weak password hashing for legacy clients */ {CIFS_PROT, "\2NT LM 0.12"}, {BAD_PROT, "\2"} }; #endif /* define the number of elements in the cifs dialect array */ #ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_CIFS_WEAK_PW_HASH #define CIFS_NUM_PROT 4 #else #define CIFS_NUM_PROT 2 #endif /* CIFS_WEAK_PW_HASH */ #else /* not posix */ #ifdef CONFIG_CIFS_WEAK_PW_HASH #define CIFS_NUM_PROT 3 #else #define CIFS_NUM_PROT 1 #endif /* CONFIG_CIFS_WEAK_PW_HASH */ #endif /* CIFS_POSIX */ /* Mark as invalid, all open files on tree connections since they were closed when session to server was lost */ static void mark_open_files_invalid(struct cifs_tcon *pTcon) { struct cifsFileInfo *open_file = NULL; struct list_head *tmp; struct list_head *tmp1; /* list all files open on tree connection and mark them invalid */ spin_lock(&cifs_file_list_lock); list_for_each_safe(tmp, tmp1, &pTcon->openFileList) { open_file = list_entry(tmp, struct cifsFileInfo, tlist); open_file->invalidHandle = true; open_file->oplock_break_cancelled = true; } spin_unlock(&cifs_file_list_lock); /* BB Add call to invalidate_inodes(sb) for all superblocks mounted to this tcon */ } /* reconnect the socket, tcon, and smb session if needed */ static int cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) { int rc = 0; struct cifs_ses *ses; struct TCP_Server_Info *server; struct nls_table *nls_codepage; /* * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for * tcp and smb session status done differently for those three - in the * calling routine */ if (!tcon) return 0; ses = tcon->ses; server = ses->server; /* * only tree disconnect, open, and write, (and ulogoff which does not * have tcon) are allowed as we start force umount */ if (tcon->tidStatus == CifsExiting) { if (smb_command != SMB_COM_WRITE_ANDX && smb_command != SMB_COM_OPEN_ANDX && smb_command != SMB_COM_TREE_DISCONNECT) { cFYI(1, "can not send cmd %d while umounting", smb_command); return -ENODEV; } } /* * Give demultiplex thread up to 10 seconds to reconnect, should be * greater than cifs socket timeout which is 7 seconds */ while (server->tcpStatus == CifsNeedReconnect) { wait_event_interruptible_timeout(server->response_q, (server->tcpStatus != CifsNeedReconnect), 10 * HZ); /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) break; /* * on "soft" mounts we wait once. Hard mounts keep * retrying until process is killed or server comes * back on-line */ if (!tcon->retry) { cFYI(1, "gave up waiting on reconnect in smb_init"); return -EHOSTDOWN; } } if (!ses->need_reconnect && !tcon->need_reconnect) return 0; nls_codepage = load_nls_default(); /* * need to prevent multiple threads trying to simultaneously * reconnect the same SMB session */ mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(0, ses); if (rc == 0 && ses->need_reconnect) rc = cifs_setup_session(0, ses, nls_codepage); /* do we need to reconnect tcon? */ if (rc || !tcon->need_reconnect) { mutex_unlock(&ses->session_mutex); goto out; } mark_open_files_invalid(tcon); rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage); mutex_unlock(&ses->session_mutex); cFYI(1, "reconnect tcon rc = %d", rc); if (rc) goto out; /* * FIXME: check if wsize needs updated due to negotiated smb buffer * size shrinking */ atomic_inc(&tconInfoReconnectCount); /* tell server Unix caps we support */ if (ses->capabilities & CAP_UNIX) reset_cifs_unix_caps(0, tcon, NULL, NULL); /* * Removed call to reopen open files here. It is safer (and faster) to * reopen files one at a time as needed in read and write. * * FIXME: what about file locks? don't we need to reclaim them ASAP? */ out: /* * Check if handle based operation so we know whether we can continue * or not without returning to caller to reset file handle */ switch (smb_command) { case SMB_COM_READ_ANDX: case SMB_COM_WRITE_ANDX: case SMB_COM_CLOSE: case SMB_COM_FIND_CLOSE2: case SMB_COM_LOCKING_ANDX: rc = -EAGAIN; } unload_nls(nls_codepage); return rc; } /* Allocate and return pointer to an SMB request buffer, and set basic SMB information in the SMB header. If the return code is zero, this function must have filled in request_buf pointer */ static int small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf) { int rc; rc = cifs_reconnect_tcon(tcon, smb_command); if (rc) return rc; *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, wct); if (tcon != NULL) cifs_stats_inc(&tcon->num_smbs_sent); return 0; } int small_smb_init_no_tc(const int smb_command, const int wct, struct cifs_ses *ses, void **request_buf) { int rc; struct smb_hdr *buffer; rc = small_smb_init(smb_command, wct, NULL, request_buf); if (rc) return rc; buffer = (struct smb_hdr *)*request_buf; buffer->Mid = GetNextMid(ses->server); if (ses->capabilities & CAP_UNICODE) buffer->Flags2 |= SMBFLG2_UNICODE; if (ses->capabilities & CAP_STATUS32) buffer->Flags2 |= SMBFLG2_ERR_STATUS; /* uid, tid can stay at zero as set in header assemble */ /* BB add support for turning on the signing when this function is used after 1st of session setup requests */ return rc; } /* If the return code is zero, this function must fill in request_buf pointer */ static int __smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { *request_buf = cifs_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } /* Although the original thought was we needed the response buf for */ /* potential retries of smb operations it turns out we can determine */ /* from the mid flags when the request buffer can be resent without */ /* having to use a second distinct buffer for the response */ if (response_buf) *response_buf = *request_buf; header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, wct); if (tcon != NULL) cifs_stats_inc(&tcon->num_smbs_sent); return 0; } /* If the return code is zero, this function must fill in request_buf pointer */ static int smb_init(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { int rc; rc = cifs_reconnect_tcon(tcon, smb_command); if (rc) return rc; return __smb_init(smb_command, wct, tcon, request_buf, response_buf); } static int smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon, void **request_buf, void **response_buf) { if (tcon->ses->need_reconnect || tcon->need_reconnect) return -EHOSTDOWN; return __smb_init(smb_command, wct, tcon, request_buf, response_buf); } static int validate_t2(struct smb_t2_rsp *pSMB) { unsigned int total_size; /* check for plausible wct */ if (pSMB->hdr.WordCount < 10) goto vt2_err; /* check for parm and data offset going beyond end of smb */ if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 || get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024) goto vt2_err; total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount); if (total_size >= 512) goto vt2_err; /* check that bcc is at least as big as parms + data, and that it is * less than negotiated smb buffer */ total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount); if (total_size > get_bcc(&pSMB->hdr) || total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) goto vt2_err; return 0; vt2_err: cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB, sizeof(struct smb_t2_rsp) + 16); return -EINVAL; } static inline void inc_rfc1001_len(void *pSMB, int count) { struct smb_hdr *hdr = (struct smb_hdr *)pSMB; be32_add_cpu(&hdr->smb_buf_length, count); } int CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) { NEGOTIATE_REQ *pSMB; NEGOTIATE_RSP *pSMBr; int rc = 0; int bytes_returned; int i; struct TCP_Server_Info *server; u16 count; unsigned int secFlags; if (ses->server) server = ses->server; else { rc = -EIO; return rc; } rc = smb_init(SMB_COM_NEGOTIATE, 0, NULL /* no tcon yet */ , (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; /* if any of auth flags (ie not sign or seal) are overriden use them */ if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */ else /* if override flags set only sign/seal OR them with global auth */ secFlags = global_secflags | ses->overrideSecFlg; cFYI(1, "secFlags 0x%x", secFlags); pSMB->hdr.Mid = GetNextMid(server); pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_KRB5) { cFYI(1, "Kerberos only mechanism, enable extended security"); pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; } else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP) pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_NTLMSSP) { cFYI(1, "NTLMSSP only mechanism, enable extended security"); pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; } count = 0; for (i = 0; i < CIFS_NUM_PROT; i++) { strncpy(pSMB->DialectsArray+count, protocols[i].name, 16); count += strlen(protocols[i].name) + 1; /* null at end of source and target buffers anyway */ } inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc != 0) goto neg_err_exit; server->dialect = le16_to_cpu(pSMBr->DialectIndex); cFYI(1, "Dialect: %d", server->dialect); /* Check wct = 1 error case */ if ((pSMBr->hdr.WordCount < 13) || (server->dialect == BAD_PROT)) { /* core returns wct = 1, but we do not ask for core - otherwise small wct just comes when dialect index is -1 indicating we could not negotiate a common dialect */ rc = -EOPNOTSUPP; goto neg_err_exit; #ifdef CONFIG_CIFS_WEAK_PW_HASH } else if ((pSMBr->hdr.WordCount == 13) && ((server->dialect == LANMAN_PROT) || (server->dialect == LANMAN2_PROT))) { __s16 tmp; struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr; if ((secFlags & CIFSSEC_MAY_LANMAN) || (secFlags & CIFSSEC_MAY_PLNTXT)) server->secType = LANMAN; else { cERROR(1, "mount failed weak security disabled" " in /proc/fs/cifs/SecurityFlags"); rc = -EOPNOTSUPP; goto neg_err_exit; } server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode); server->maxReq = le16_to_cpu(rsp->MaxMpxCount); server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize), (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); /* even though we do not use raw we might as well set this accurately, in case we ever find a need for it */ if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) { server->max_rw = 0xFF00; server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE; } else { server->max_rw = 0;/* do not need to use raw anyway */ server->capabilities = CAP_MPX_MODE; } tmp = (__s16)le16_to_cpu(rsp->ServerTimeZone); if (tmp == -1) { /* OS/2 often does not set timezone therefore * we must use server time to calc time zone. * Could deviate slightly from the right zone. * Smallest defined timezone difference is 15 minutes * (i.e. Nepal). Rounding up/down is done to match * this requirement. */ int val, seconds, remain, result; struct timespec ts, utc; utc = CURRENT_TIME; ts = cnvrtDosUnixTm(rsp->SrvTime.Date, rsp->SrvTime.Time, 0); cFYI(1, "SrvTime %d sec since 1970 (utc: %d) diff: %d", (int)ts.tv_sec, (int)utc.tv_sec, (int)(utc.tv_sec - ts.tv_sec)); val = (int)(utc.tv_sec - ts.tv_sec); seconds = abs(val); result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ; remain = seconds % MIN_TZ_ADJ; if (remain >= (MIN_TZ_ADJ / 2)) result += MIN_TZ_ADJ; if (val < 0) result = -result; server->timeAdj = result; } else { server->timeAdj = (int)tmp; server->timeAdj *= 60; /* also in seconds */ } cFYI(1, "server->timeAdj: %d seconds", server->timeAdj); /* BB get server time for time conversions and add code to use it and timezone since this is not UTC */ if (rsp->EncryptionKeyLength == cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { memcpy(ses->server->cryptkey, rsp->EncryptionKey, CIFS_CRYPTO_KEY_SIZE); } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { rc = -EIO; /* need cryptkey unless plain text */ goto neg_err_exit; } cFYI(1, "LANMAN negotiated"); /* we will not end up setting signing flags - as no signing was in LANMAN and server did not return the flags on */ goto signing_check; #else /* weak security disabled */ } else if (pSMBr->hdr.WordCount == 13) { cERROR(1, "mount failed, cifs module not built " "with CIFS_WEAK_PW_HASH support"); rc = -EOPNOTSUPP; #endif /* WEAK_PW_HASH */ goto neg_err_exit; } else if (pSMBr->hdr.WordCount != 17) { /* unknown wct */ rc = -EOPNOTSUPP; goto neg_err_exit; } /* else wct == 17 NTLM */ server->sec_mode = pSMBr->SecurityMode; if ((server->sec_mode & SECMODE_USER) == 0) cFYI(1, "share mode security"); if ((server->sec_mode & SECMODE_PW_ENCRYPT) == 0) #ifdef CONFIG_CIFS_WEAK_PW_HASH if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0) #endif /* CIFS_WEAK_PW_HASH */ cERROR(1, "Server requests plain text password" " but client support disabled"); if ((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2) server->secType = NTLMv2; else if (secFlags & CIFSSEC_MAY_NTLM) server->secType = NTLM; else if (secFlags & CIFSSEC_MAY_NTLMV2) server->secType = NTLMv2; else if (secFlags & CIFSSEC_MAY_KRB5) server->secType = Kerberos; else if (secFlags & CIFSSEC_MAY_NTLMSSP) server->secType = RawNTLMSSP; else if (secFlags & CIFSSEC_MAY_LANMAN) server->secType = LANMAN; else { rc = -EOPNOTSUPP; cERROR(1, "Invalid security type"); goto neg_err_exit; } /* else ... any others ...? */ /* one byte, so no need to convert this or EncryptionKeyLen from little endian */ server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount); /* probably no need to store and check maxvcs */ server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize), (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); cFYI(DBG2, "Max buf = %d", ses->server->maxBuf); server->capabilities = le32_to_cpu(pSMBr->Capabilities); server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); server->timeAdj *= 60; if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey, CIFS_CRYPTO_KEY_SIZE); } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC || server->capabilities & CAP_EXTENDED_SECURITY) && (pSMBr->EncryptionKeyLength == 0)) { /* decode security blob */ count = get_bcc(&pSMBr->hdr); if (count < 16) { rc = -EIO; goto neg_err_exit; } spin_lock(&cifs_tcp_ses_lock); if (server->srv_count > 1) { spin_unlock(&cifs_tcp_ses_lock); if (memcmp(server->server_GUID, pSMBr->u.extended_response. GUID, 16) != 0) { cFYI(1, "server UID changed"); memcpy(server->server_GUID, pSMBr->u.extended_response.GUID, 16); } } else { spin_unlock(&cifs_tcp_ses_lock); memcpy(server->server_GUID, pSMBr->u.extended_response.GUID, 16); } if (count == 16) { server->secType = RawNTLMSSP; } else { rc = decode_negTokenInit(pSMBr->u.extended_response. SecurityBlob, count - 16, server); if (rc == 1) rc = 0; else rc = -EINVAL; if (server->secType == Kerberos) { if (!server->sec_kerberos && !server->sec_mskerberos) rc = -EOPNOTSUPP; } else if (server->secType == RawNTLMSSP) { if (!server->sec_ntlmssp) rc = -EOPNOTSUPP; } else rc = -EOPNOTSUPP; } } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { rc = -EIO; /* no crypt key only if plain text pwd */ goto neg_err_exit; } else server->capabilities &= ~CAP_EXTENDED_SECURITY; #ifdef CONFIG_CIFS_WEAK_PW_HASH signing_check: #endif if ((secFlags & CIFSSEC_MAY_SIGN) == 0) { /* MUST_SIGN already includes the MAY_SIGN FLAG so if this is zero it means that signing is disabled */ cFYI(1, "Signing disabled"); if (server->sec_mode & SECMODE_SIGN_REQUIRED) { cERROR(1, "Server requires " "packet signing to be enabled in " "/proc/fs/cifs/SecurityFlags."); rc = -EOPNOTSUPP; } server->sec_mode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { /* signing required */ cFYI(1, "Must sign - secFlags 0x%x", secFlags); if ((server->sec_mode & (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { cERROR(1, "signing required but server lacks support"); rc = -EOPNOTSUPP; } else server->sec_mode |= SECMODE_SIGN_REQUIRED; } else { /* signing optional ie CIFSSEC_MAY_SIGN */ if ((server->sec_mode & SECMODE_SIGN_REQUIRED) == 0) server->sec_mode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); } neg_err_exit: cifs_buf_release(pSMB); cFYI(1, "negprot rc %d", rc); return rc; } int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon) { struct smb_hdr *smb_buffer; int rc = 0; cFYI(1, "In tree disconnect"); /* BB: do we need to check this? These should never be NULL. */ if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) return -EIO; /* * No need to return error on this operation if tid invalidated and * closed on server already e.g. due to tcp session crashing. Also, * the tcon is no longer on the list, so no need to take lock before * checking this. */ if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) return 0; rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, (void **)&smb_buffer); if (rc) return rc; rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0); if (rc) cFYI(1, "Tree disconnect failed %d", rc); /* No need to return error on this operation if tid invalidated and closed on server already e.g. due to tcp session crashing */ if (rc == -EAGAIN) rc = 0; return rc; } /* * This is a no-op for now. We're not really interested in the reply, but * rather in the fact that the server sent one and that server->lstrp * gets updated. * * FIXME: maybe we should consider checking that the reply matches request? */ static void cifs_echo_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->callback_data; DeleteMidQEntry(mid); atomic_dec(&server->inFlight); wake_up(&server->request_q); } int CIFSSMBEcho(struct TCP_Server_Info *server) { ECHO_REQ *smb; int rc = 0; struct kvec iov; cFYI(1, "In echo request"); rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb); if (rc) return rc; /* set up echo request */ smb->hdr.Tid = 0xffff; smb->hdr.WordCount = 1; put_unaligned_le16(1, &smb->EchoCount); put_bcc(1, &smb->hdr); smb->Data[0] = 'a'; inc_rfc1001_len(smb, 3); iov.iov_base = smb; iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true); if (rc) cFYI(1, "Echo request failed: %d", rc); cifs_small_buf_release(smb); return rc; } int CIFSSMBLogoff(const int xid, struct cifs_ses *ses) { LOGOFF_ANDX_REQ *pSMB; int rc = 0; cFYI(1, "In SMBLogoff for session disconnect"); /* * BB: do we need to check validity of ses and server? They should * always be valid since we have an active reference. If not, that * should probably be a BUG() */ if (!ses || !ses->server) return -EIO; mutex_lock(&ses->session_mutex); if (ses->need_reconnect) goto session_already_dead; /* no need to send SMBlogoff if uid already closed due to reconnect */ rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB); if (rc) { mutex_unlock(&ses->session_mutex); return rc; } pSMB->hdr.Mid = GetNextMid(ses->server); if (ses->server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->hdr.Uid = ses->Suid; pSMB->AndXCommand = 0xFF; rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0); session_already_dead: mutex_unlock(&ses->session_mutex); /* if session dead then we do not need to do ulogoff, since server closed smb session, no sense reporting error */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName, __u16 type, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; struct unlink_psx_rq *pRqD; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cFYI(1, "In POSIX delete"); PsxDelete: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB add path length overrun check */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = 0; /* BB double check this with jra */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; /* Setup pointer to Request Data (inode type) */ pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset); pRqD->type = cpu_to_le16(type); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + sizeof(struct unlink_psx_rq); pSMB->DataCount = cpu_to_le16(sizeof(struct unlink_psx_rq)); pSMB->TotalDataCount = cpu_to_le16(sizeof(struct unlink_psx_rq)); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_UNLINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "Posix delete returned %d", rc); cifs_buf_release(pSMB); cifs_stats_inc(&tcon->num_deletes); if (rc == -EAGAIN) goto PsxDelete; return rc; } int CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName, const struct nls_table *nls_codepage, int remap) { DELETE_FILE_REQ *pSMB = NULL; DELETE_FILE_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; DelFileRetry: rc = smb_init(SMB_COM_DELETE, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->fileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->fileName, fileName, name_len); } pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM); pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_deletes); if (rc) cFYI(1, "Error in RMFile = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto DelFileRetry; return rc; } int CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon, const char *dirName, const struct nls_table *nls_codepage, int remap) { DELETE_DIRECTORY_REQ *pSMB = NULL; DELETE_DIRECTORY_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; cFYI(1, "In CIFSSMBRmDir"); RmDirRetry: rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, dirName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ name_len = strnlen(dirName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->DirName, dirName, name_len); } pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_rmdirs); if (rc) cFYI(1, "Error in RMDir = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto RmDirRetry; return rc; } int CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon, const char *name, const struct nls_table *nls_codepage, int remap) { int rc = 0; CREATE_DIRECTORY_REQ *pSMB = NULL; CREATE_DIRECTORY_RSP *pSMBr = NULL; int bytes_returned; int name_len; cFYI(1, "In CIFSSMBMkDir"); MkDirRetry: rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ name_len = strnlen(name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->DirName, name, name_len); } pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_mkdirs); if (rc) cFYI(1, "Error in Mkdir = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto MkDirRetry; return rc; } int CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon, __u32 posix_flags, __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData, __u32 *pOplock, const char *name, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count, count; OPEN_PSX_REQ *pdata; OPEN_PSX_RSP *psx_rsp; cFYI(1, "In POSIX Create"); PsxCreat: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(name, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, name, name_len); } params = 6 + name_len; count = sizeof(OPEN_PSX_REQ); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* large enough */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset); pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pdata->Permissions = cpu_to_le64(mode); pdata->PosixOpenFlags = cpu_to_le32(posix_flags); pdata->OpenFlags = cpu_to_le32(*pOplock); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_OPEN); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Posix create returned %d", rc); goto psx_create_err; } cFYI(1, "copying inode info"); rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP)) { rc = -EIO; /* bad smb */ goto psx_create_err; } /* copy return information to pRetData */ psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset)); *pOplock = le16_to_cpu(psx_rsp->OplockFlags); if (netfid) *netfid = psx_rsp->Fid; /* cifs fid stays in le */ /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ if (cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction) *pOplock |= CIFS_CREATE_ACTION; /* check to make sure response data is there */ if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) { pRetData->Type = cpu_to_le32(-1); /* unknown */ cFYI(DBG2, "unknown type"); } else { if (get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP) + sizeof(FILE_UNIX_BASIC_INFO)) { cERROR(1, "Open response data too small"); pRetData->Type = cpu_to_le32(-1); goto psx_create_err; } memcpy((char *) pRetData, (char *)psx_rsp + sizeof(OPEN_PSX_RSP), sizeof(FILE_UNIX_BASIC_INFO)); } psx_create_err: cifs_buf_release(pSMB); if (posix_flags & SMB_O_DIRECTORY) cifs_stats_inc(&tcon->num_posixmkdirs); else cifs_stats_inc(&tcon->num_posixopens); if (rc == -EAGAIN) goto PsxCreat; return rc; } static __u16 convert_disposition(int disposition) { __u16 ofun = 0; switch (disposition) { case FILE_SUPERSEDE: ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC; break; case FILE_OPEN: ofun = SMBOPEN_OAPPEND; break; case FILE_CREATE: ofun = SMBOPEN_OCREATE; break; case FILE_OPEN_IF: ofun = SMBOPEN_OCREATE | SMBOPEN_OAPPEND; break; case FILE_OVERWRITE: ofun = SMBOPEN_OTRUNC; break; case FILE_OVERWRITE_IF: ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC; break; default: cFYI(1, "unknown disposition %d", disposition); ofun = SMBOPEN_OAPPEND; /* regular open */ } return ofun; } static int access_flags_to_smbopen_mode(const int access_flags) { int masked_flags = access_flags & (GENERIC_READ | GENERIC_WRITE); if (masked_flags == GENERIC_READ) return SMBOPEN_READ; else if (masked_flags == GENERIC_WRITE) return SMBOPEN_WRITE; /* just go for read/write */ return SMBOPEN_READWRITE; } int SMBLegacyOpen(const int xid, struct cifs_tcon *tcon, const char *fileName, const int openDisposition, const int access_flags, const int create_options, __u16 *netfid, int *pOplock, FILE_ALL_INFO *pfile_info, const struct nls_table *nls_codepage, int remap) { int rc = -EACCES; OPENX_REQ *pSMB = NULL; OPENX_RSP *pSMBr = NULL; int bytes_returned; int name_len; __u16 count; OldOpenRetry: rc = smb_init(SMB_COM_OPEN_ANDX, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->AndXCommand = 0xFF; /* none */ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { count = 1; /* account for one byte pad to word boundary */ name_len = cifsConvertToUCS((__le16 *) (pSMB->fileName + 1), fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ count = 0; /* no pad */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->fileName, fileName, name_len); } if (*pOplock & REQ_OPLOCK) pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK); else if (*pOplock & REQ_BATCHOPLOCK) pSMB->OpenFlags = cpu_to_le16(REQ_BATCHOPLOCK); pSMB->OpenFlags |= cpu_to_le16(REQ_MORE_INFO); pSMB->Mode = cpu_to_le16(access_flags_to_smbopen_mode(access_flags)); pSMB->Mode |= cpu_to_le16(0x40); /* deny none */ /* set file as system file if special file such as fifo and server expecting SFU style and no Unix extensions */ if (create_options & CREATE_OPTION_SPECIAL) pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM); else /* BB FIXME BB */ pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/); if (create_options & CREATE_OPTION_READONLY) pSMB->FileAttributes |= cpu_to_le16(ATTR_READONLY); /* BB FIXME BB */ /* pSMB->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK); */ /* BB FIXME END BB */ pSMB->Sattr = cpu_to_le16(ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->OpenFunction = cpu_to_le16(convert_disposition(openDisposition)); count += name_len; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); /* long_op set to 1 to allow for oplock break timeouts */ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *)pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_opens); if (rc) { cFYI(1, "Error in Open = %d", rc); } else { /* BB verify if wct == 15 */ /* *pOplock = pSMBr->OplockLevel; */ /* BB take from action field*/ *netfid = pSMBr->Fid; /* cifs fid stays in le */ /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ /* BB FIXME BB */ /* if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction) *pOplock |= CIFS_CREATE_ACTION; */ /* BB FIXME END */ if (pfile_info) { pfile_info->CreationTime = 0; /* BB convert CreateTime*/ pfile_info->LastAccessTime = 0; /* BB fixme */ pfile_info->LastWriteTime = 0; /* BB fixme */ pfile_info->ChangeTime = 0; /* BB fixme */ pfile_info->Attributes = cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes)); /* the file_info buf is endian converted by caller */ pfile_info->AllocationSize = cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile)); pfile_info->EndOfFile = pfile_info->AllocationSize; pfile_info->NumberOfLinks = cpu_to_le32(1); pfile_info->DeletePending = 0; } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto OldOpenRetry; return rc; } int CIFSSMBOpen(const int xid, struct cifs_tcon *tcon, const char *fileName, const int openDisposition, const int access_flags, const int create_options, __u16 *netfid, int *pOplock, FILE_ALL_INFO *pfile_info, const struct nls_table *nls_codepage, int remap) { int rc = -EACCES; OPEN_REQ *pSMB = NULL; OPEN_RSP *pSMBr = NULL; int bytes_returned; int name_len; __u16 count; openRetry: rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->AndXCommand = 0xFF; /* none */ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { count = 1; /* account for one byte pad to word boundary */ name_len = cifsConvertToUCS((__le16 *) (pSMB->fileName + 1), fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->NameLength = cpu_to_le16(name_len); } else { /* BB improve check for buffer overruns BB */ count = 0; /* no pad */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ pSMB->NameLength = cpu_to_le16(name_len); strncpy(pSMB->fileName, fileName, name_len); } if (*pOplock & REQ_OPLOCK) pSMB->OpenFlags = cpu_to_le32(REQ_OPLOCK); else if (*pOplock & REQ_BATCHOPLOCK) pSMB->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK); pSMB->DesiredAccess = cpu_to_le32(access_flags); pSMB->AllocationSize = 0; /* set file as system file if special file such as fifo and server expecting SFU style and no Unix extensions */ if (create_options & CREATE_OPTION_SPECIAL) pSMB->FileAttributes = cpu_to_le32(ATTR_SYSTEM); else pSMB->FileAttributes = cpu_to_le32(ATTR_NORMAL); /* XP does not handle ATTR_POSIX_SEMANTICS */ /* but it helps speed up case sensitive checks for other servers such as Samba */ if (tcon->ses->capabilities & CAP_UNIX) pSMB->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS); if (create_options & CREATE_OPTION_READONLY) pSMB->FileAttributes |= cpu_to_le32(ATTR_READONLY); pSMB->ShareAccess = cpu_to_le32(FILE_SHARE_ALL); pSMB->CreateDisposition = cpu_to_le32(openDisposition); pSMB->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK); /* BB Expirement with various impersonation levels and verify */ pSMB->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION); pSMB->SecurityFlags = SECURITY_CONTEXT_TRACKING | SECURITY_EFFECTIVE_ONLY; count += name_len; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); /* long_op set to 1 to allow for oplock break timeouts */ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *)pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_opens); if (rc) { cFYI(1, "Error in Open = %d", rc); } else { *pOplock = pSMBr->OplockLevel; /* 1 byte no need to le_to_cpu */ *netfid = pSMBr->Fid; /* cifs fid stays in le */ /* Let caller know file was created so we can set the mode. */ /* Do we care about the CreateAction in any other cases? */ if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction) *pOplock |= CIFS_CREATE_ACTION; if (pfile_info) { memcpy((char *)pfile_info, (char *)&pSMBr->CreationTime, 36 /* CreationTime to Attributes */); /* the file_info buf is endian converted by caller */ pfile_info->AllocationSize = pSMBr->AllocationSize; pfile_info->EndOfFile = pSMBr->EndOfFile; pfile_info->NumberOfLinks = cpu_to_le32(1); pfile_info->DeletePending = 0; } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto openRetry; return rc; } int CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, char **buf, int *pbuf_type) { int rc = -EACCES; READ_REQ *pSMB = NULL; READ_RSP *pSMBr = NULL; char *pReadData = NULL; int wct; int resp_buf_type = 0; struct kvec iov[1]; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; cFYI(1, "Reading %d bytes on fid %d", count, netfid); if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 12; else { wct = 10; /* old style read */ if ((offset >> 32) > 0) { /* can not handle this big offset for old */ return -EIO; } } *nbytes = 0; rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 12) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Remaining = 0; pSMB->MaxCount = cpu_to_le16(count & 0xFFFF); pSMB->MaxCountHigh = cpu_to_le32(count >> 16); if (wct == 12) pSMB->ByteCount = 0; /* no need to do le conversion since 0 */ else { /* old style read */ struct smb_com_readx_req *pSMBW = (struct smb_com_readx_req *)pSMB; pSMBW->ByteCount = 0; } iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, &resp_buf_type, CIFS_LOG_ERROR); cifs_stats_inc(&tcon->num_reads); pSMBr = (READ_RSP *)iov[0].iov_base; if (rc) { cERROR(1, "Send error in read = %d", rc); } else { int data_length = le16_to_cpu(pSMBr->DataLengthHigh); data_length = data_length << 16; data_length += le16_to_cpu(pSMBr->DataLength); *nbytes = data_length; /*check that DataLength would not go beyond end of SMB */ if ((data_length > CIFSMaxBufSize) || (data_length > count)) { cFYI(1, "bad length %d for count %d", data_length, count); rc = -EIO; *nbytes = 0; } else { pReadData = (char *) (&pSMBr->hdr.Protocol) + le16_to_cpu(pSMBr->DataOffset); /* if (rc = copy_to_user(buf, pReadData, data_length)) { cERROR(1, "Faulting on read rc = %d",rc); rc = -EFAULT; }*/ /* can not use copy_to_user when using page cache*/ if (*buf) memcpy(*buf, pReadData, data_length); } } /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ if (*buf) { if (resp_buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (resp_buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); } else if (resp_buf_type != CIFS_NO_BUFFER) { /* return buffer to caller to free */ *buf = iov[0].iov_base; if (resp_buf_type == CIFS_SMALL_BUFFER) *pbuf_type = CIFS_SMALL_BUFFER; else if (resp_buf_type == CIFS_LARGE_BUFFER) *pbuf_type = CIFS_LARGE_BUFFER; } /* else no valid buffer on return - leave as null */ /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, const char *buf, const char __user *ubuf, const int long_op) { int rc = -EACCES; WRITE_REQ *pSMB = NULL; WRITE_RSP *pSMBr = NULL; int bytes_returned, wct; __u32 bytes_sent; __u16 byte_count; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; *nbytes = 0; /* cFYI(1, "write at %lld %d bytes", offset, count);*/ if (tcon->ses == NULL) return -ECONNABORTED; if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 14; else { wct = 12; if ((offset >> 32) > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 14) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Reserved = 0xFFFFFFFF; pSMB->WriteMode = 0; pSMB->Remaining = 0; /* Can increase buffer size if buffer is big enough in some cases ie we can send more if LARGE_WRITE_X capability returned by the server and if our buffer is big enough or if we convert to iovecs on socket writes and eliminate the copy to the CIFS buffer */ if (tcon->ses->capabilities & CAP_LARGE_WRITE_X) { bytes_sent = min_t(const unsigned int, CIFSMaxBufSize, count); } else { bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & ~0xFF; } if (bytes_sent > count) bytes_sent = count; pSMB->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); if (buf) memcpy(pSMB->Data, buf, bytes_sent); else if (ubuf) { if (copy_from_user(pSMB->Data, ubuf, bytes_sent)) { cifs_buf_release(pSMB); return -EFAULT; } } else if (count != 0) { /* No buffer */ cifs_buf_release(pSMB); return -EINVAL; } /* else setting file size with write of zero bytes */ if (wct == 14) byte_count = bytes_sent + 1; /* pad */ else /* wct == 12 */ byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */ pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF); pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16); inc_rfc1001_len(pSMB, byte_count); if (wct == 14) pSMB->ByteCount = cpu_to_le16(byte_count); else { /* old style write has byte count 4 bytes earlier so 4 bytes pad */ struct smb_com_writex_req *pSMBW = (struct smb_com_writex_req *)pSMB; pSMBW->ByteCount = cpu_to_le16(byte_count); } rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, long_op); cifs_stats_inc(&tcon->num_writes); if (rc) { cFYI(1, "Send error in write = %d", rc); } else { *nbytes = le16_to_cpu(pSMBr->CountHigh); *nbytes = (*nbytes) << 16; *nbytes += le16_to_cpu(pSMBr->Count); /* * Mask off high 16 bits when bytes written as returned by the * server is greater than bytes requested by the client. Some * OS/2 servers are known to set incorrect CountHigh values. */ if (*nbytes > count) *nbytes &= 0xFFFF; } cifs_buf_release(pSMB); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } void cifs_writedata_release(struct kref *refcount) { struct cifs_writedata *wdata = container_of(refcount, struct cifs_writedata, refcount); if (wdata->cfile) cifsFileInfo_put(wdata->cfile); kfree(wdata); } /* * Write failed with a retryable error. Resend the write request. It's also * possible that the page was redirtied so re-clean the page. */ static void cifs_writev_requeue(struct cifs_writedata *wdata) { int i, rc; struct inode *inode = wdata->cfile->dentry->d_inode; for (i = 0; i < wdata->nr_pages; i++) { lock_page(wdata->pages[i]); clear_page_dirty_for_io(wdata->pages[i]); } do { rc = cifs_async_writev(wdata); } while (rc == -EAGAIN); for (i = 0; i < wdata->nr_pages; i++) { if (rc != 0) SetPageError(wdata->pages[i]); unlock_page(wdata->pages[i]); } mapping_set_error(inode->i_mapping, rc); kref_put(&wdata->refcount, cifs_writedata_release); } static void cifs_writev_complete(struct work_struct *work) { struct cifs_writedata *wdata = container_of(work, struct cifs_writedata, work); struct inode *inode = wdata->cfile->dentry->d_inode; int i = 0; if (wdata->result == 0) { cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), wdata->bytes); } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) return cifs_writev_requeue(wdata); for (i = 0; i < wdata->nr_pages; i++) { struct page *page = wdata->pages[i]; if (wdata->result == -EAGAIN) __set_page_dirty_nobuffers(page); else if (wdata->result < 0) SetPageError(page); end_page_writeback(page); page_cache_release(page); } if (wdata->result != -EAGAIN) mapping_set_error(inode->i_mapping, wdata->result); kref_put(&wdata->refcount, cifs_writedata_release); } struct cifs_writedata * cifs_writedata_alloc(unsigned int nr_pages) { struct cifs_writedata *wdata; /* this would overflow */ if (nr_pages == 0) { cERROR(1, "%s: called with nr_pages == 0!", __func__); return NULL; } /* writedata + number of page pointers */ wdata = kzalloc(sizeof(*wdata) + sizeof(struct page *) * (nr_pages - 1), GFP_NOFS); if (wdata != NULL) { INIT_WORK(&wdata->work, cifs_writev_complete); kref_init(&wdata->refcount); } return wdata; } /* * Check the midState and signature on received buffer (if any), and queue the * workqueue completion task. */ static void cifs_writev_callback(struct mid_q_entry *mid) { struct cifs_writedata *wdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); unsigned int written; WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; switch (mid->midState) { case MID_RESPONSE_RECEIVED: wdata->result = cifs_check_receive(mid, tcon->ses->server, 0); if (wdata->result != 0) break; written = le16_to_cpu(smb->CountHigh); written <<= 16; written += le16_to_cpu(smb->Count); /* * Mask off high 16 bits when bytes written as returned * by the server is greater than bytes requested by the * client. OS/2 servers are known to set incorrect * CountHigh values. */ if (written > wdata->bytes) written &= 0xFFFF; if (written < wdata->bytes) wdata->result = -ENOSPC; else wdata->bytes = written; break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: wdata->result = -EAGAIN; break; default: wdata->result = -EIO; break; } queue_work(system_nrt_wq, &wdata->work); DeleteMidQEntry(mid); atomic_dec(&tcon->ses->server->inFlight); wake_up(&tcon->ses->server->request_q); } /* cifs_async_writev - send an async write, and set up mid to handle result */ int cifs_async_writev(struct cifs_writedata *wdata) { int i, rc = -EACCES; WRITE_REQ *smb = NULL; int wct; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct inode *inode = wdata->cfile->dentry->d_inode; struct kvec *iov = NULL; if (tcon->ses->capabilities & CAP_LARGE_FILES) { wct = 14; } else { wct = 12; if (wdata->offset >> 32 > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb); if (rc) goto async_writev_out; /* 1 iov per page + 1 for header */ iov = kzalloc((wdata->nr_pages + 1) * sizeof(*iov), GFP_NOFS); if (iov == NULL) { rc = -ENOMEM; goto async_writev_out; } smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid); smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16)); smb->AndXCommand = 0xFF; /* none */ smb->Fid = wdata->cfile->netfid; smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF); if (wct == 14) smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32); smb->Reserved = 0xFFFFFFFF; smb->WriteMode = 0; smb->Remaining = 0; smb->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); /* 4 for RFC1001 length + 1 for BCC */ iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1; iov[0].iov_base = smb; /* marshal up the pages into iov array */ wdata->bytes = 0; for (i = 0; i < wdata->nr_pages; i++) { iov[i + 1].iov_len = min(inode->i_size - page_offset(wdata->pages[i]), (loff_t)PAGE_CACHE_SIZE); iov[i + 1].iov_base = kmap(wdata->pages[i]); wdata->bytes += iov[i + 1].iov_len; } cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes); smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF); smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16); if (wct == 14) { inc_rfc1001_len(&smb->hdr, wdata->bytes + 1); put_bcc(wdata->bytes + 1, &smb->hdr); } else { /* wct == 12 */ struct smb_com_writex_req *smbw = (struct smb_com_writex_req *)smb; inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5); put_bcc(wdata->bytes + 5, &smbw->hdr); iov[0].iov_len += 4; /* pad bigger by four bytes */ } kref_get(&wdata->refcount); rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1, cifs_writev_callback, wdata, false); if (rc == 0) cifs_stats_inc(&tcon->num_writes); else kref_put(&wdata->refcount, cifs_writedata_release); /* send is done, unmap pages */ for (i = 0; i < wdata->nr_pages; i++) kunmap(wdata->pages[i]); async_writev_out: cifs_small_buf_release(smb); kfree(iov); return rc; } int CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, struct kvec *iov, int n_vec, const int long_op) { int rc = -EACCES; WRITE_REQ *pSMB = NULL; int wct; int smb_hdr_len; int resp_buf_type = 0; __u32 pid = io_parms->pid; __u16 netfid = io_parms->netfid; __u64 offset = io_parms->offset; struct cifs_tcon *tcon = io_parms->tcon; unsigned int count = io_parms->length; *nbytes = 0; cFYI(1, "write2 at %lld %d bytes", (long long)offset, count); if (tcon->ses->capabilities & CAP_LARGE_FILES) { wct = 14; } else { wct = 12; if ((offset >> 32) > 0) { /* can not handle big offset for old srv */ return -EIO; } } rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); /* tcon and ses pointer are checked in smb_init */ if (tcon->ses->server == NULL) return -ECONNABORTED; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = netfid; pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 14) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); pSMB->Reserved = 0xFFFFFFFF; pSMB->WriteMode = 0; pSMB->Remaining = 0; pSMB->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); pSMB->DataLengthLow = cpu_to_le16(count & 0xFFFF); pSMB->DataLengthHigh = cpu_to_le16(count >> 16); /* header + 1 byte pad */ smb_hdr_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 1; if (wct == 14) inc_rfc1001_len(pSMB, count + 1); else /* wct == 12 */ inc_rfc1001_len(pSMB, count + 5); /* smb data starts later */ if (wct == 14) pSMB->ByteCount = cpu_to_le16(count + 1); else /* wct == 12 */ /* bigger pad, smaller smb hdr, keep offset ok */ { struct smb_com_writex_req *pSMBW = (struct smb_com_writex_req *)pSMB; pSMBW->ByteCount = cpu_to_le16(count + 5); } iov[0].iov_base = pSMB; if (wct == 14) iov[0].iov_len = smb_hdr_len + 4; else /* wct == 12 pad bigger by four bytes */ iov[0].iov_len = smb_hdr_len + 8; rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type, long_op); cifs_stats_inc(&tcon->num_writes); if (rc) { cFYI(1, "Send error Write2 = %d", rc); } else if (resp_buf_type == 0) { /* presumably this can not happen, but best to be safe */ rc = -EIO; } else { WRITE_RSP *pSMBr = (WRITE_RSP *)iov[0].iov_base; *nbytes = le16_to_cpu(pSMBr->CountHigh); *nbytes = (*nbytes) << 16; *nbytes += le16_to_cpu(pSMBr->Count); /* * Mask off high 16 bits when bytes written as returned by the * server is greater than bytes requested by the client. OS/2 * servers are known to set incorrect CountHigh values. */ if (*nbytes > count) *nbytes &= 0xFFFF; } /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ if (resp_buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (resp_buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBLock(const int xid, struct cifs_tcon *tcon, const __u16 smb_file_id, const __u64 len, const __u64 offset, const __u32 numUnlock, const __u32 numLock, const __u8 lockType, const bool waitFlag, const __u8 oplock_level) { int rc = 0; LOCK_REQ *pSMB = NULL; /* LOCK_RSP *pSMBr = NULL; */ /* No response data other than rc to parse */ int bytes_returned; int timeout = 0; __u16 count; cFYI(1, "CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock); rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB); if (rc) return rc; if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) { timeout = CIFS_ASYNC_OP; /* no response expected */ pSMB->Timeout = 0; } else if (waitFlag) { timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */ } else { pSMB->Timeout = 0; } pSMB->NumberOfLocks = cpu_to_le16(numLock); pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock); pSMB->LockType = lockType; pSMB->OplockLevel = oplock_level; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = smb_file_id; /* netfid stays le */ if ((numLock != 0) || (numUnlock != 0)) { pSMB->Locks[0].Pid = cpu_to_le16(current->tgid); /* BB where to store pid high? */ pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len); pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32)); pSMB->Locks[0].OffsetLow = cpu_to_le32((u32)offset); pSMB->Locks[0].OffsetHigh = cpu_to_le32((u32)(offset>>32)); count = sizeof(LOCKING_ANDX_RANGE); } else { /* oplock break */ count = 0; } inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); if (waitFlag) { rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMB, &bytes_returned); cifs_small_buf_release(pSMB); } else { rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB, timeout); /* SMB buffer freed by function above */ } cifs_stats_inc(&tcon->num_locks); if (rc) cFYI(1, "Send error in Lock = %d", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon, const __u16 smb_file_id, const int get_flag, const __u64 len, struct file_lock *pLockData, const __u16 lock_type, const bool waitFlag) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; struct cifs_posix_lock *parm_data; int rc = 0; int timeout = 0; int bytes_returned = 0; int resp_buf_type = 0; __u16 params, param_offset, offset, byte_count, count; struct kvec iov[1]; cFYI(1, "Posix Lock"); if (pLockData == NULL) return -EINVAL; rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB; params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; count = sizeof(struct cifs_posix_lock); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */ pSMB->SetupCount = 1; pSMB->Reserved3 = 0; if (get_flag) pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); else pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); parm_data = (struct cifs_posix_lock *) (((char *) &pSMB->hdr.Protocol) + offset); parm_data->lock_type = cpu_to_le16(lock_type); if (waitFlag) { timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ parm_data->lock_flags = cpu_to_le16(1); pSMB->Timeout = cpu_to_le32(-1); } else pSMB->Timeout = 0; parm_data->pid = cpu_to_le32(current->tgid); parm_data->start = cpu_to_le64(pLockData->fl_start); parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = smb_file_id; pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); if (waitFlag) { rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned); } else { iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, &resp_buf_type, timeout); pSMB = NULL; /* request buf already freed by SendReceive2. Do not try to free it twice below on exit */ pSMBr = (struct smb_com_transaction2_sfi_rsp *)iov[0].iov_base; } if (rc) { cFYI(1, "Send error in Posix Lock = %d", rc); } else if (get_flag) { /* lock structure can be returned on get */ __u16 data_offset; __u16 data_count; rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(*parm_data)) { rc = -EIO; /* bad smb */ goto plk_err_exit; } data_offset = le16_to_cpu(pSMBr->t2.DataOffset); data_count = le16_to_cpu(pSMBr->t2.DataCount); if (data_count < sizeof(struct cifs_posix_lock)) { rc = -EIO; goto plk_err_exit; } parm_data = (struct cifs_posix_lock *) ((char *)&pSMBr->hdr.Protocol + data_offset); if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_UNLCK)) pLockData->fl_type = F_UNLCK; else { if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_RDLCK)) pLockData->fl_type = F_RDLCK; else if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_WRLCK)) pLockData->fl_type = F_WRLCK; pLockData->fl_start = le64_to_cpu(parm_data->start); pLockData->fl_end = pLockData->fl_start + le64_to_cpu(parm_data->length) - 1; pLockData->fl_pid = le32_to_cpu(parm_data->pid); } } plk_err_exit: if (pSMB) cifs_small_buf_release(pSMB); if (resp_buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (resp_buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id) { int rc = 0; CLOSE_REQ *pSMB = NULL; cFYI(1, "In CIFSSMBClose"); /* do not retry on dead session on close */ rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB); if (rc == -EAGAIN) return 0; if (rc) return rc; pSMB->FileID = (__u16) smb_file_id; pSMB->LastWriteTime = 0xFFFFFFFF; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); cifs_stats_inc(&tcon->num_closes); if (rc) { if (rc != -EINTR) { /* EINTR is expected when user ctl-c to kill app */ cERROR(1, "Send error in Close = %d", rc); } } /* Since session is dead, file will be closed on server already */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id) { int rc = 0; FLUSH_REQ *pSMB = NULL; cFYI(1, "In CIFSSMBFlush"); rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB); if (rc) return rc; pSMB->FileID = (__u16) smb_file_id; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); cifs_stats_inc(&tcon->num_flushes); if (rc) cERROR(1, "Send error in Flush = %d", rc); return rc; } int CIFSSMBRename(const int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage, int remap) { int rc = 0; RENAME_REQ *pSMB = NULL; RENAME_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; cFYI(1, "In CIFSSMBRename"); renameRetry: rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->BufferFormat = 0x04; pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->OldFileName[name_len] = 0x04; /* pad */ /* protocol requires ASCII signature byte on Unicode string */ pSMB->OldFileName[name_len + 1] = 0x00; name_len2 = cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], toName, PATH_MAX, nls_codepage, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fromName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->OldFileName, fromName, name_len); name_len2 = strnlen(toName, PATH_MAX); name_len2++; /* trailing null */ pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); name_len2++; /* trailing null */ name_len2++; /* signature byte */ } count = 1 /* 1st signature byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_renames); if (rc) cFYI(1, "Send error in rename = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto renameRetry; return rc; } int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon, int netfid, const char *target_name, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; struct set_file_rename *rename_info; char *data_offset; char dummy_string[30]; int rc = 0; int bytes_returned = 0; int len_of_str; __u16 params, param_offset, offset, count, byte_count; cFYI(1, "Rename to File by handle"); rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; rename_info = (struct set_file_rename *) data_offset; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */ pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); /* construct random name ".cifs_tmp<inodenum><mid>" */ rename_info->overwrite = cpu_to_le32(1); rename_info->root_fid = 0; /* unicode only call */ if (target_name == NULL) { sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid); len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name, dummy_string, 24, nls_codepage, remap); } else { len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name, target_name, PATH_MAX, nls_codepage, remap); } rename_info->target_name_len = cpu_to_le32(2 * len_of_str); count = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str); byte_count += count; pSMB->DataCount = cpu_to_le16(count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->Fid = netfid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_RENAME_INFORMATION); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&pTcon->num_t2renames); if (rc) cFYI(1, "Send error in Rename (by file handle) = %d", rc); cifs_buf_release(pSMB); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBCopy(const int xid, struct cifs_tcon *tcon, const char *fromName, const __u16 target_tid, const char *toName, const int flags, const struct nls_table *nls_codepage, int remap) { int rc = 0; COPY_REQ *pSMB = NULL; COPY_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; cFYI(1, "In CIFSSMBCopy"); copyRetry: rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->BufferFormat = 0x04; pSMB->Tid2 = target_tid; pSMB->Flags = cpu_to_le16(flags & COPY_TREE); if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->OldFileName[name_len] = 0x04; /* pad */ /* protocol requires ASCII signature byte on Unicode string */ pSMB->OldFileName[name_len + 1] = 0x00; name_len2 = cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], toName, PATH_MAX, nls_codepage, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fromName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->OldFileName, fromName, name_len); name_len2 = strnlen(toName, PATH_MAX); name_len2++; /* trailing null */ pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); name_len2++; /* trailing null */ name_len2++; /* signature byte */ } count = 1 /* 1st signature byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in copy = %d with %d files copied", rc, le16_to_cpu(pSMBr->CopyCount)); } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto copyRetry; return rc; } int CIFSUnixCreateSymLink(const int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; char *data_offset; int name_len; int name_len_target; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cFYI(1, "In Symlink Unix style"); createSymLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifs_strtoUCS((__le16 *) pSMB->FileName, fromName, PATH_MAX /* find define for this maxpathcomponent */ , nls_codepage); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fromName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fromName, name_len); } params = 6 + name_len; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len_target = cifs_strtoUCS((__le16 *) data_offset, toName, PATH_MAX /* find define for this maxpathcomponent */ , nls_codepage); name_len_target++; /* trailing null */ name_len_target *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len_target = strnlen(toName, PATH_MAX); name_len_target++; /* trailing null */ strncpy(data_offset, toName, name_len_target); } pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max on data count below from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + name_len_target; pSMB->DataCount = cpu_to_le16(name_len_target); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_LINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_symlinks); if (rc) cFYI(1, "Send error in SetPathInfo create symlink = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto createSymLinkRetry; return rc; } int CIFSUnixCreateHardLink(const int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; char *data_offset; int name_len; int name_len_target; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cFYI(1, "In Create Hard link Unix style"); createHardLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, toName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(toName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, toName, name_len); } params = 6 + name_len; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len_target = cifsConvertToUCS((__le16 *) data_offset, fromName, PATH_MAX, nls_codepage, remap); name_len_target++; /* trailing null */ name_len_target *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len_target = strnlen(fromName, PATH_MAX); name_len_target++; /* trailing null */ strncpy(data_offset, fromName, name_len_target); } pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max on data count below from sess*/ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + name_len_target; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->DataCount = cpu_to_le16(name_len_target); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_HLINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_hardlinks); if (rc) cFYI(1, "Send error in SetPathInfo (hard link) = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto createHardLinkRetry; return rc; } int CIFSCreateHardLink(const int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, const struct nls_table *nls_codepage, int remap) { int rc = 0; NT_RENAME_REQ *pSMB = NULL; RENAME_RSP *pSMBr = NULL; int bytes_returned; int name_len, name_len2; __u16 count; cFYI(1, "In CIFSCreateHardLink"); winCreateHardLinkRetry: rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->Flags = cpu_to_le16(CREATE_HARD_LINK); pSMB->ClusterCount = 0; pSMB->BufferFormat = 0x04; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; /* protocol specifies ASCII buffer format (0x04) for unicode */ pSMB->OldFileName[name_len] = 0x04; pSMB->OldFileName[name_len + 1] = 0x00; /* pad */ name_len2 = cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], toName, PATH_MAX, nls_codepage, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fromName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->OldFileName, fromName, name_len); name_len2 = strnlen(toName, PATH_MAX); name_len2++; /* trailing null */ pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); name_len2++; /* trailing null */ name_len2++; /* signature byte */ } count = 1 /* string type byte */ + name_len + name_len2; inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_hardlinks); if (rc) cFYI(1, "Send error in hard link (NT rename) = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto winCreateHardLinkRetry; return rc; } int CIFSSMBUnixQuerySymLink(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char **symlinkinfo, const struct nls_table *nls_codepage) { /* SMB_QUERY_FILE_UNIX_LINK */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; char *data_start; cFYI(1, "In QPathSymLinkInfo (Unix) for path %s", searchName); querySymLinkRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifs_strtoUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_LINK); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QuerySymLinkInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) rc = -EIO; else { bool is_unicode; u16 count = le16_to_cpu(pSMBr->t2.DataCount); data_start = ((char *) &pSMBr->hdr.Protocol) + le16_to_cpu(pSMBr->t2.DataOffset); if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* BB FIXME investigate remapping reserved chars here */ *symlinkinfo = cifs_strndup_from_ucs(data_start, count, is_unicode, nls_codepage); if (!*symlinkinfo) rc = -ENOMEM; } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto querySymLinkRetry; return rc; } #ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL /* * Recent Windows versions now create symlinks more frequently * and they use the "reparse point" mechanism below. We can of course * do symlinks nicely to Samba and other servers which support the * CIFS Unix Extensions and we can also do SFU symlinks and "client only" * "MF" symlinks optionally, but for recent Windows we really need to * reenable the code below and fix the cifs_symlink callers to handle this. * In the interim this code has been moved to its own config option so * it is not compiled in by default until callers fixed up and more tested. */ int CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char *symlinkinfo, const int buflen, __u16 fid, const struct nls_table *nls_codepage) { int rc = 0; int bytes_returned; struct smb_com_transaction_ioctl_req *pSMB; struct smb_com_transaction_ioctl_rsp *pSMBr; cFYI(1, "In Windows reparse style QueryLink for path %s", searchName); rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->TotalParameterCount = 0 ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le32(2); /* BB find exact data count max from sess structure BB */ pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); pSMB->MaxSetupCount = 4; pSMB->Reserved = 0; pSMB->ParameterOffset = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 4; pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT); pSMB->IsFsctl = 1; /* FSCTL */ pSMB->IsRootFlag = 0; pSMB->Fid = fid; /* file handle always le */ pSMB->ByteCount = 0; rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QueryReparseLinkInfo = %d", rc); } else { /* decode response */ __u32 data_offset = le32_to_cpu(pSMBr->DataOffset); __u32 data_count = le32_to_cpu(pSMBr->DataCount); if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) { /* BB also check enough total bytes returned */ rc = -EIO; /* bad smb */ goto qreparse_out; } if (data_count && (data_count < 2048)) { char *end_of_smb = 2 /* sizeof byte count */ + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount; struct reparse_data *reparse_buf = (struct reparse_data *) ((char *)&pSMBr->hdr.Protocol + data_offset); if ((char *)reparse_buf >= end_of_smb) { rc = -EIO; goto qreparse_out; } if ((reparse_buf->LinkNamesBuf + reparse_buf->TargetNameOffset + reparse_buf->TargetNameLen) > end_of_smb) { cFYI(1, "reparse buf beyond SMB"); rc = -EIO; goto qreparse_out; } if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) { cifs_from_ucs2(symlinkinfo, (__le16 *) (reparse_buf->LinkNamesBuf + reparse_buf->TargetNameOffset), buflen, reparse_buf->TargetNameLen, nls_codepage, 0); } else { /* ASCII names */ strncpy(symlinkinfo, reparse_buf->LinkNamesBuf + reparse_buf->TargetNameOffset, min_t(const int, buflen, reparse_buf->TargetNameLen)); } } else { rc = -EIO; cFYI(1, "Invalid return data count on " "get reparse info ioctl"); } symlinkinfo[buflen] = 0; /* just in case so the caller does not go off the end of the buffer */ cFYI(1, "readlink result - %s", symlinkinfo); } qreparse_out: cifs_buf_release(pSMB); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } #endif /* CIFS_SYMLINK_EXPERIMENTAL */ /* BB temporarily unused */ #ifdef CONFIG_CIFS_POSIX /*Convert an Access Control Entry from wire format to local POSIX xattr format*/ static void cifs_convert_ace(posix_acl_xattr_entry *ace, struct cifs_posix_ace *cifs_ace) { /* u8 cifs fields do not need le conversion */ ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm); ace->e_tag = cpu_to_le16(cifs_ace->cifs_e_tag); ace->e_id = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid)); /* cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id); */ return; } /* Convert ACL from CIFS POSIX wire format to local Linux POSIX ACL xattr */ static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen, const int acl_type, const int size_of_data_area) { int size = 0; int i; __u16 count; struct cifs_posix_ace *pACE; struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src; posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)trgt; if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION) return -EOPNOTSUPP; if (acl_type & ACL_TYPE_ACCESS) { count = le16_to_cpu(cifs_acl->access_entry_count); pACE = &cifs_acl->ace_array[0]; size = sizeof(struct cifs_posix_acl); size += sizeof(struct cifs_posix_ace) * count; /* check if we would go beyond end of SMB */ if (size_of_data_area < size) { cFYI(1, "bad CIFS POSIX ACL size %d vs. %d", size_of_data_area, size); return -EINVAL; } } else if (acl_type & ACL_TYPE_DEFAULT) { count = le16_to_cpu(cifs_acl->access_entry_count); size = sizeof(struct cifs_posix_acl); size += sizeof(struct cifs_posix_ace) * count; /* skip past access ACEs to get to default ACEs */ pACE = &cifs_acl->ace_array[count]; count = le16_to_cpu(cifs_acl->default_entry_count); size += sizeof(struct cifs_posix_ace) * count; /* check if we would go beyond end of SMB */ if (size_of_data_area < size) return -EINVAL; } else { /* illegal type */ return -EINVAL; } size = posix_acl_xattr_size(count); if ((buflen == 0) || (local_acl == NULL)) { /* used to query ACL EA size */ } else if (size > buflen) { return -ERANGE; } else /* buffer big enough */ { local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION); for (i = 0; i < count ; i++) { cifs_convert_ace(&local_acl->a_entries[i], pACE); pACE++; } } return size; } static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace, const posix_acl_xattr_entry *local_ace) { __u16 rc = 0; /* 0 = ACL converted ok */ cifs_ace->cifs_e_perm = le16_to_cpu(local_ace->e_perm); cifs_ace->cifs_e_tag = le16_to_cpu(local_ace->e_tag); /* BB is there a better way to handle the large uid? */ if (local_ace->e_id == cpu_to_le32(-1)) { /* Probably no need to le convert -1 on any arch but can not hurt */ cifs_ace->cifs_uid = cpu_to_le64(-1); } else cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id)); /*cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id);*/ return rc; } /* Convert ACL from local Linux POSIX xattr to CIFS POSIX ACL wire format */ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL, const int buflen, const int acl_type) { __u16 rc = 0; struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data; posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)pACL; int count; int i; if ((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL)) return 0; count = posix_acl_xattr_count((size_t)buflen); cFYI(1, "setting acl with %d entries from buf of length %d and " "version of %d", count, buflen, le32_to_cpu(local_acl->a_version)); if (le32_to_cpu(local_acl->a_version) != 2) { cFYI(1, "unknown POSIX ACL version %d", le32_to_cpu(local_acl->a_version)); return 0; } cifs_acl->version = cpu_to_le16(1); if (acl_type == ACL_TYPE_ACCESS) cifs_acl->access_entry_count = cpu_to_le16(count); else if (acl_type == ACL_TYPE_DEFAULT) cifs_acl->default_entry_count = cpu_to_le16(count); else { cFYI(1, "unknown ACL type %d", acl_type); return 0; } for (i = 0; i < count; i++) { rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &local_acl->a_entries[i]); if (rc != 0) { /* ACE not converted */ break; } } if (rc == 0) { rc = (__u16)(count * sizeof(struct cifs_posix_ace)); rc += sizeof(struct cifs_posix_acl); /* BB add check to make sure ACL does not overflow SMB */ } return rc; } int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char *acl_inf, const int buflen, const int acl_type, const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_POSIX_ACL */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; cFYI(1, "In GetPosixACL (Unix) for path %s", searchName); queryAclRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; pSMB->FileName[name_len] = 0; pSMB->FileName[name_len+1] = 0; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max data count below from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_ACL); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_acl_get); if (rc) { cFYI(1, "Send error in Query POSIX ACL = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); rc = cifs_copy_posix_acl(acl_inf, (char *)&pSMBr->hdr.Protocol+data_offset, buflen, acl_type, count); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto queryAclRetry; return rc; } int CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon, const unsigned char *fileName, const char *local_acl, const int buflen, const int acl_type, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; char *parm_data; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, byte_count, data_count, param_offset, offset; cFYI(1, "In SetPosixACL (Unix) for path %s", fileName); setAclRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB size from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; parm_data = ((char *) &pSMB->hdr.Protocol) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); /* convert to on the wire format for POSIX ACL */ data_count = ACL_to_cifs_posix(parm_data, local_acl, buflen, acl_type); if (data_count == 0) { rc = -EOPNOTSUPP; goto setACLerrorExit; } pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_ACL); byte_count = 3 /* pad */ + params + data_count; pSMB->DataCount = cpu_to_le16(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "Set POSIX ACL returned %d", rc); setACLerrorExit: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setAclRetry; return rc; } /* BB fix tabs in this function FIXME BB */ int CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon, const int netfid, __u64 *pExtAttrBits, __u64 *pMask) { int rc = 0; struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int bytes_returned; __u16 params, byte_count; cFYI(1, "In GetExtAttr"); if (tcon == NULL) return -ENODEV; GetExtAttrRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(4000); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); pSMB->t2.ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "error %d in GetExtAttr", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) /* If rc should we check for EOPNOSUPP and disable the srvino flag? or in caller? */ rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); struct file_chattr_info *pfinfo; /* BB Do we need a cast or hash here ? */ if (count != 16) { cFYI(1, "Illegal size ret in GetExtAttr"); rc = -EIO; goto GetExtAttrOut; } pfinfo = (struct file_chattr_info *) (data_offset + (char *) &pSMBr->hdr.Protocol); *pExtAttrBits = le64_to_cpu(pfinfo->mode); *pMask = le64_to_cpu(pfinfo->mask); } } GetExtAttrOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto GetExtAttrRetry; return rc; } #endif /* CONFIG_POSIX */ #ifdef CONFIG_CIFS_ACL /* * Initialize NT TRANSACT SMB into small smb request buffer. This assumes that * all NT TRANSACTS that we init here have total parm and data under about 400 * bytes (to fit in small cifs buffer size), which is the case so far, it * easily fits. NB: Setup words themselves and ByteCount MaxSetupCount (size of * returned setup area) and MaxParameterCount (returned parms size) must be set * by caller */ static int smb_init_nttransact(const __u16 sub_command, const int setup_count, const int parm_len, struct cifs_tcon *tcon, void **ret_buf) { int rc; __u32 temp_offset; struct smb_com_ntransact_req *pSMB; rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon, (void **)&pSMB); if (rc) return rc; *ret_buf = (void *)pSMB; pSMB->Reserved = 0; pSMB->TotalParameterCount = cpu_to_le32(parm_len); pSMB->TotalDataCount = 0; pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->DataCount = pSMB->TotalDataCount; temp_offset = offsetof(struct smb_com_ntransact_req, Parms) + (setup_count * 2) - 4 /* for rfc1001 length itself */; pSMB->ParameterOffset = cpu_to_le32(temp_offset); pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len); pSMB->SetupCount = setup_count; /* no need to le convert byte fields */ pSMB->SubCommand = cpu_to_le16(sub_command); return 0; } static int validate_ntransact(char *buf, char **ppparm, char **ppdata, __u32 *pparmlen, __u32 *pdatalen) { char *end_of_smb; __u32 data_count, data_offset, parm_count, parm_offset; struct smb_com_ntransact_rsp *pSMBr; u16 bcc; *pdatalen = 0; *pparmlen = 0; if (buf == NULL) return -EINVAL; pSMBr = (struct smb_com_ntransact_rsp *)buf; bcc = get_bcc(&pSMBr->hdr); end_of_smb = 2 /* sizeof byte count */ + bcc + (char *)&pSMBr->ByteCount; data_offset = le32_to_cpu(pSMBr->DataOffset); data_count = le32_to_cpu(pSMBr->DataCount); parm_offset = le32_to_cpu(pSMBr->ParameterOffset); parm_count = le32_to_cpu(pSMBr->ParameterCount); *ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset; *ppdata = (char *)&pSMBr->hdr.Protocol + data_offset; /* should we also check that parm and data areas do not overlap? */ if (*ppparm > end_of_smb) { cFYI(1, "parms start after end of smb"); return -EINVAL; } else if (parm_count + *ppparm > end_of_smb) { cFYI(1, "parm end after end of smb"); return -EINVAL; } else if (*ppdata > end_of_smb) { cFYI(1, "data starts after end of smb"); return -EINVAL; } else if (data_count + *ppdata > end_of_smb) { cFYI(1, "data %p + count %d (%p) past smb end %p start %p", *ppdata, data_count, (data_count + *ppdata), end_of_smb, pSMBr); return -EINVAL; } else if (parm_count + data_count > bcc) { cFYI(1, "parm count and data count larger than SMB"); return -EINVAL; } *pdatalen = data_count; *pparmlen = parm_count; return 0; } /* Get Security Descriptor (by handle) from remote server for a file or dir */ int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, struct cifs_ntsd **acl_inf, __u32 *pbuflen) { int rc = 0; int buf_type = 0; QUERY_SEC_DESC_REQ *pSMB; struct kvec iov[1]; cFYI(1, "GetCifsACL"); *pbuflen = 0; *acl_inf = NULL; rc = smb_init_nttransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0, 8 /* parm len */, tcon, (void **) &pSMB); if (rc) return rc; pSMB->MaxParameterCount = cpu_to_le32(4); /* BB TEST with big acls that might need to be e.g. larger than 16K */ pSMB->MaxSetupCount = 0; pSMB->Fid = fid; /* file handle always le */ pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP | CIFS_ACL_DACL); pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */ inc_rfc1001_len(pSMB, 11); iov[0].iov_base = (char *)pSMB; iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, 0); cifs_stats_inc(&tcon->num_acl_get); if (rc) { cFYI(1, "Send error in QuerySecDesc = %d", rc); } else { /* decode response */ __le32 *parm; __u32 parm_len; __u32 acl_len; struct smb_com_ntransact_rsp *pSMBr; char *pdata; /* validate_nttransact */ rc = validate_ntransact(iov[0].iov_base, (char **)&parm, &pdata, &parm_len, pbuflen); if (rc) goto qsec_out; pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base; cFYI(1, "smb %p parm %p data %p", pSMBr, parm, *acl_inf); if (le32_to_cpu(pSMBr->ParameterCount) != 4) { rc = -EIO; /* bad smb */ *pbuflen = 0; goto qsec_out; } /* BB check that data area is minimum length and as big as acl_len */ acl_len = le32_to_cpu(*parm); if (acl_len != *pbuflen) { cERROR(1, "acl length %d does not match %d", acl_len, *pbuflen); if (*pbuflen > acl_len) *pbuflen = acl_len; } /* check if buffer is big enough for the acl header followed by the smallest SID */ if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) || (*pbuflen >= 64 * 1024)) { cERROR(1, "bad acl length %d", *pbuflen); rc = -EINVAL; *pbuflen = 0; } else { *acl_inf = kmalloc(*pbuflen, GFP_KERNEL); if (*acl_inf == NULL) { *pbuflen = 0; rc = -ENOMEM; } memcpy(*acl_inf, pdata, *pbuflen); } } qsec_out: if (buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(iov[0].iov_base); else if (buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ return rc; } int CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, struct cifs_ntsd *pntsd, __u32 acllen) { __u16 byte_count, param_count, data_count, param_offset, data_offset; int rc = 0; int bytes_returned = 0; SET_SEC_DESC_REQ *pSMB = NULL; NTRANSACT_RSP *pSMBr = NULL; setCifsAclRetry: rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return (rc); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; param_count = 8; param_offset = offsetof(struct smb_com_transaction_ssec_req, Fid) - 4; data_count = acllen; data_offset = param_offset + param_count; byte_count = 3 /* pad */ + param_count; pSMB->DataCount = cpu_to_le32(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->MaxParameterCount = cpu_to_le32(4); pSMB->MaxDataCount = cpu_to_le32(16384); pSMB->ParameterCount = cpu_to_le32(param_count); pSMB->ParameterOffset = cpu_to_le32(param_offset); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->DataOffset = cpu_to_le32(data_offset); pSMB->SetupCount = 0; pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_SET_SECURITY_DESC); pSMB->ByteCount = cpu_to_le16(byte_count+data_count); pSMB->Fid = fid; /* file handle always le */ pSMB->Reserved2 = 0; pSMB->AclFlags = cpu_to_le32(CIFS_ACL_DACL); if (pntsd && acllen) { memcpy((char *) &pSMBr->hdr.Protocol + data_offset, (char *) pntsd, acllen); inc_rfc1001_len(pSMB, byte_count + data_count); } else inc_rfc1001_len(pSMB, byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cFYI(1, "SetCIFSACL bytes_returned: %d, rc: %d", bytes_returned, rc); if (rc) cFYI(1, "Set CIFS ACL returned %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setCifsAclRetry; return (rc); } #endif /* CONFIG_CIFS_ACL */ /* Legacy Query Path Information call for lookup to old servers such as Win9x/WinME */ int SMBQueryInformation(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, FILE_ALL_INFO *pFinfo, const struct nls_table *nls_codepage, int remap) { QUERY_INFORMATION_REQ *pSMB; QUERY_INFORMATION_RSP *pSMBr; int rc = 0; int bytes_returned; int name_len; cFYI(1, "In SMBQPath path %s", searchName); QInfRetry: rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } pSMB->BufferFormat = 0x04; name_len++; /* account for buffer type byte */ inc_rfc1001_len(pSMB, (__u16)name_len); pSMB->ByteCount = cpu_to_le16(name_len); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QueryInfo = %d", rc); } else if (pFinfo) { struct timespec ts; __u32 time = le32_to_cpu(pSMBr->last_write_time); /* decode response */ /* BB FIXME - add time zone adjustment BB */ memset(pFinfo, 0, sizeof(FILE_ALL_INFO)); ts.tv_nsec = 0; ts.tv_sec = time; /* decode time fields */ pFinfo->ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(ts)); pFinfo->LastWriteTime = pFinfo->ChangeTime; pFinfo->LastAccessTime = 0; pFinfo->AllocationSize = cpu_to_le64(le32_to_cpu(pSMBr->size)); pFinfo->EndOfFile = pFinfo->AllocationSize; pFinfo->Attributes = cpu_to_le32(le16_to_cpu(pSMBr->attr)); } else rc = -EIO; /* bad buffer passed in */ cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QInfRetry; return rc; } int CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon, u16 netfid, FILE_ALL_INFO *pFindData) { struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int rc = 0; int bytes_returned; __u16 params, byte_count; QFileInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QPathInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) /* BB add auto retry on EOPNOTSUPP? */ rc = -EIO; else if (get_bcc(&pSMBr->hdr) < 40) rc = -EIO; /* bad smb */ else if (pFindData) { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_ALL_INFO)); } else rc = -ENOMEM; } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFileInfoRetry; return rc; } int CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, FILE_ALL_INFO *pFindData, int legacy /* old style infolevel */, const struct nls_table *nls_codepage, int remap) { /* level 263 SMB_QUERY_FILE_ALL_INFO */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; /* cFYI(1, "In QPathInfo path %s", searchName); */ QPathInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; if (legacy) pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD); else pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QPathInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) /* BB add auto retry on EOPNOTSUPP? */ rc = -EIO; else if (!legacy && get_bcc(&pSMBr->hdr) < 40) rc = -EIO; /* bad smb */ else if (legacy && get_bcc(&pSMBr->hdr) < 24) rc = -EIO; /* 24 or 26 expected but we do not read last field */ else if (pFindData) { int size; __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); /* On legacy responses we do not read the last field, EAsize, fortunately since it varies by subdialect and also note it differs on Set vs. Get, ie two bytes or 4 bytes depending but we don't care here */ if (legacy) size = sizeof(FILE_INFO_STANDARD); else size = sizeof(FILE_ALL_INFO); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, size); } else rc = -ENOMEM; } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QPathInfoRetry; return rc; } int CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon, u16 netfid, FILE_UNIX_BASIC_INFO *pFindData) { struct smb_t2_qfi_req *pSMB = NULL; struct smb_t2_qfi_rsp *pSMBr = NULL; int rc = 0; int bytes_returned; __u16 params, byte_count; UnixQFileInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2 /* level */ + 2 /* fid */; pSMB->t2.TotalDataCount = 0; pSMB->t2.MaxParameterCount = cpu_to_le16(4); /* BB find exact max data count below from sess structure BB */ pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->t2.MaxSetupCount = 0; pSMB->t2.Reserved = 0; pSMB->t2.Flags = 0; pSMB->t2.Timeout = 0; pSMB->t2.Reserved2 = 0; pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, Fid) - 4); pSMB->t2.DataCount = 0; pSMB->t2.DataOffset = 0; pSMB->t2.SetupCount = 1; pSMB->t2.Reserved3 = 0; pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->t2.TotalParameterCount = cpu_to_le16(params); pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pSMB->Pad = 0; pSMB->Fid = netfid; inc_rfc1001_len(pSMB, byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QPathInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) { cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n" "Unix Extensions can be disabled on mount " "by specifying the nosfu mount option."); rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_UNIX_BASIC_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto UnixQFileInfoRetry; return rc; } int CIFSSMBUnixQPathInfo(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, FILE_UNIX_BASIC_INFO *pFindData, const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_FILE_UNIX_BASIC */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned = 0; int name_len; __u16 params, byte_count; cFYI(1, "In QPathInfo (Unix) the path %s", searchName); UnixQPathInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QPathInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) { cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n" "Unix Extensions can be disabled on mount " "by specifying the nosfu mount option."); rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); memcpy((char *) pFindData, (char *) &pSMBr->hdr.Protocol + data_offset, sizeof(FILE_UNIX_BASIC_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto UnixQPathInfoRetry; return rc; } /* xid, tcon, searchName and codepage are input parms, rest are returned */ int CIFSFindFirst(const int xid, struct cifs_tcon *tcon, const char *searchName, const struct nls_table *nls_codepage, __u16 *pnetfid, struct cifs_search_info *psrch_inf, int remap, const char dirsep) { /* level 257 SMB_ */ TRANSACTION2_FFIRST_REQ *pSMB = NULL; TRANSACTION2_FFIRST_RSP *pSMBr = NULL; T2_FFIRST_RSP_PARMS *parms; int rc = 0; int bytes_returned = 0; int name_len; __u16 params, byte_count; cFYI(1, "In FindFirst for %s", searchName); findFirstRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); /* We can not add the asterik earlier in case it got remapped to 0xF03A as if it were part of the directory name instead of a wildcard */ name_len *= 2; pSMB->FileName[name_len] = dirsep; pSMB->FileName[name_len+1] = 0; pSMB->FileName[name_len+2] = '*'; pSMB->FileName[name_len+3] = 0; name_len += 4; /* now the trailing null */ pSMB->FileName[name_len] = 0; /* null terminate just in case */ pSMB->FileName[name_len+1] = 0; name_len += 2; } else { /* BB add check for overrun of SMB buf BB */ name_len = strnlen(searchName, PATH_MAX); /* BB fix here and in unicode clause above ie if (name_len > buffersize-header) free buffer exit; BB */ strncpy(pSMB->FileName, searchName, name_len); pSMB->FileName[name_len] = dirsep; pSMB->FileName[name_len+1] = '*'; pSMB->FileName[name_len+2] = 0; name_len += 3; } params = 12 + name_len /* includes null */ ; pSMB->TotalDataCount = 0; /* no EAs */ pSMB->MaxParameterCount = cpu_to_le16(10); pSMB->MaxDataCount = cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; /* one byte, no need to make endian neutral */ pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST); pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY); pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO)); pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME); pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level); /* BB what should we set StorageType to? Does it matter? BB */ pSMB->SearchStorageType = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_ffirst); if (rc) {/* BB add logic to retry regular search if Unix search rejected unexpectedly by server */ /* BB Add code to handle unsupported level rc */ cFYI(1, "Error in FindFirst = %d", rc); cifs_buf_release(pSMB); /* BB eventually could optimize out free and realloc of buf */ /* for this case */ if (rc == -EAGAIN) goto findFirstRetry; } else { /* decode response */ /* BB remember to free buffer if error BB */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc == 0) { unsigned int lnoff; if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) psrch_inf->unicode = true; else psrch_inf->unicode = false; psrch_inf->ntwrk_buf_start = (char *)pSMBr; psrch_inf->smallBuf = 0; psrch_inf->srch_entries_start = (char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset); parms = (T2_FFIRST_RSP_PARMS *)((char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.ParameterOffset)); if (parms->EndofSearch) psrch_inf->endOfSearch = true; else psrch_inf->endOfSearch = false; psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount); psrch_inf->index_of_last_entry = 2 /* skip . and .. */ + psrch_inf->entries_in_buffer; lnoff = le16_to_cpu(parms->LastNameOffset); if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < lnoff) { cERROR(1, "ignoring corrupt resume name"); psrch_inf->last_entry = NULL; return rc; } psrch_inf->last_entry = psrch_inf->srch_entries_start + lnoff; *pnetfid = parms->SearchHandle; } else { cifs_buf_release(pSMB); } } return rc; } int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle, struct cifs_search_info *psrch_inf) { TRANSACTION2_FNEXT_REQ *pSMB = NULL; TRANSACTION2_FNEXT_RSP *pSMBr = NULL; T2_FNEXT_RSP_PARMS *parms; char *response_data; int rc = 0; int bytes_returned; unsigned int name_len; __u16 params, byte_count; cFYI(1, "In FindNext"); if (psrch_inf->endOfSearch) return -ENOENT; rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 14; /* includes 2 bytes of null string, converted to LE below*/ byte_count = 0; pSMB->TotalDataCount = 0; /* no EAs */ pSMB->MaxParameterCount = cpu_to_le16(8); pSMB->MaxDataCount = cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16( offsetof(struct smb_com_transaction2_fnext_req,SearchHandle) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_NEXT); pSMB->SearchHandle = searchHandle; /* always kept as le */ pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO)); pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level); pSMB->ResumeKey = psrch_inf->resume_key; pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME); name_len = psrch_inf->resume_name_len; params += name_len; if (name_len < PATH_MAX) { memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len); byte_count += name_len; /* 14 byte parm len above enough for 2 byte null terminator */ pSMB->ResumeFileName[name_len] = 0; pSMB->ResumeFileName[name_len+1] = 0; } else { rc = -EINVAL; goto FNext2_err_exit; } byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_fnext); if (rc) { if (rc == -EBADF) { psrch_inf->endOfSearch = true; cifs_buf_release(pSMB); rc = 0; /* search probably was closed at end of search*/ } else cFYI(1, "FindNext returned = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc == 0) { unsigned int lnoff; /* BB fixme add lock for file (srch_info) struct here */ if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) psrch_inf->unicode = true; else psrch_inf->unicode = false; response_data = (char *) &pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.ParameterOffset); parms = (T2_FNEXT_RSP_PARMS *)response_data; response_data = (char *)&pSMBr->hdr.Protocol + le16_to_cpu(pSMBr->t2.DataOffset); if (psrch_inf->smallBuf) cifs_small_buf_release( psrch_inf->ntwrk_buf_start); else cifs_buf_release(psrch_inf->ntwrk_buf_start); psrch_inf->srch_entries_start = response_data; psrch_inf->ntwrk_buf_start = (char *)pSMB; psrch_inf->smallBuf = 0; if (parms->EndofSearch) psrch_inf->endOfSearch = true; else psrch_inf->endOfSearch = false; psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount); psrch_inf->index_of_last_entry += psrch_inf->entries_in_buffer; lnoff = le16_to_cpu(parms->LastNameOffset); if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < lnoff) { cERROR(1, "ignoring corrupt resume name"); psrch_inf->last_entry = NULL; return rc; } else psrch_inf->last_entry = psrch_inf->srch_entries_start + lnoff; /* cFYI(1, "fnxt2 entries in buf %d index_of_last %d", psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */ /* BB fixme add unlock here */ } } /* BB On error, should we leave previous search buf (and count and last entry fields) intact or free the previous one? */ /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ FNext2_err_exit: if (rc != 0) cifs_buf_release(pSMB); return rc; } int CIFSFindClose(const int xid, struct cifs_tcon *tcon, const __u16 searchHandle) { int rc = 0; FINDCLOSE_REQ *pSMB = NULL; cFYI(1, "In CIFSSMBFindClose"); rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB); /* no sense returning error if session restarted as file handle has been closed */ if (rc == -EAGAIN) return 0; if (rc) return rc; pSMB->FileID = searchHandle; pSMB->ByteCount = 0; rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) cERROR(1, "Send error in FindClose = %d", rc); cifs_stats_inc(&tcon->num_fclose); /* Since session is dead, search handle closed on server already */ if (rc == -EAGAIN) rc = 0; return rc; } int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, __u64 *inode_number, const struct nls_table *nls_codepage, int remap) { int rc = 0; TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int name_len, bytes_returned; __u16 params, byte_count; cFYI(1, "In GetSrvInodeNum for %s", searchName); if (tcon == NULL) return -ENODEV; GetInodeNumberRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, name_len); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max data count below from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_INTERNAL_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "error %d in QueryInternalInfo", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB also check enough total bytes returned */ if (rc || get_bcc(&pSMBr->hdr) < 2) /* If rc should we check for EOPNOSUPP and disable the srvino flag? or in caller? */ rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); struct file_internal_info *pfinfo; /* BB Do we need a cast or hash here ? */ if (count < 8) { cFYI(1, "Illegal size ret in QryIntrnlInf"); rc = -EIO; goto GetInodeNumOut; } pfinfo = (struct file_internal_info *) (data_offset + (char *) &pSMBr->hdr.Protocol); *inode_number = le64_to_cpu(pfinfo->UniqueId); } } GetInodeNumOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto GetInodeNumberRetry; return rc; } /* parses DFS refferal V3 structure * caller is responsible for freeing target_nodes * returns: * on success - 0 * on failure - errno */ static int parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, unsigned int *num_of_nodes, struct dfs_info3_param **target_nodes, const struct nls_table *nls_codepage, int remap, const char *searchName) { int i, rc = 0; char *data_end; bool is_unicode; struct dfs_referral_level_3 *ref; if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; *num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals); if (*num_of_nodes < 1) { cERROR(1, "num_referrals: must be at least > 0," "but we get num_referrals = %d\n", *num_of_nodes); rc = -EINVAL; goto parse_DFS_referrals_exit; } ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals); if (ref->VersionNumber != cpu_to_le16(3)) { cERROR(1, "Referrals of V%d version are not supported," "should be V3", le16_to_cpu(ref->VersionNumber)); rc = -EINVAL; goto parse_DFS_referrals_exit; } /* get the upper boundary of the resp buffer */ data_end = (char *)(&(pSMBr->PathConsumed)) + le16_to_cpu(pSMBr->t2.DataCount); cFYI(1, "num_referrals: %d dfs flags: 0x%x ...\n", *num_of_nodes, le32_to_cpu(pSMBr->DFSFlags)); *target_nodes = kzalloc(sizeof(struct dfs_info3_param) * *num_of_nodes, GFP_KERNEL); if (*target_nodes == NULL) { cERROR(1, "Failed to allocate buffer for target_nodes\n"); rc = -ENOMEM; goto parse_DFS_referrals_exit; } /* collect necessary data from referrals */ for (i = 0; i < *num_of_nodes; i++) { char *temp; int max_len; struct dfs_info3_param *node = (*target_nodes)+i; node->flags = le32_to_cpu(pSMBr->DFSFlags); if (is_unicode) { __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, GFP_KERNEL); if (tmp == NULL) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } cifsConvertToUCS((__le16 *) tmp, searchName, PATH_MAX, nls_codepage, remap); node->path_consumed = cifs_ucs2_bytes(tmp, le16_to_cpu(pSMBr->PathConsumed), nls_codepage); kfree(tmp); } else node->path_consumed = le16_to_cpu(pSMBr->PathConsumed); node->server_type = le16_to_cpu(ref->ServerType); node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); /* copy DfsPath */ temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); max_len = data_end - temp; node->path_name = cifs_strndup_from_ucs(temp, max_len, is_unicode, nls_codepage); if (!node->path_name) { rc = -ENOMEM; goto parse_DFS_referrals_exit; } /* copy link target UNC */ temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); max_len = data_end - temp; node->node_name = cifs_strndup_from_ucs(temp, max_len, is_unicode, nls_codepage); if (!node->node_name) rc = -ENOMEM; } parse_DFS_referrals_exit: if (rc) { free_dfs_info_array(*target_nodes, *num_of_nodes); *target_nodes = NULL; *num_of_nodes = 0; } return rc; } int CIFSGetDFSRefer(const int xid, struct cifs_ses *ses, const unsigned char *searchName, struct dfs_info3_param **target_nodes, unsigned int *num_of_nodes, const struct nls_table *nls_codepage, int remap) { /* TRANS2_GET_DFS_REFERRAL */ TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL; TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; __u16 params, byte_count; *num_of_nodes = 0; *target_nodes = NULL; cFYI(1, "In GetDFSRefer the path %s", searchName); if (ses == NULL) return -ENODEV; getDFSRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, NULL, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; /* server pointer checked in called function, but should never be null here anyway */ pSMB->hdr.Mid = GetNextMid(ses->server); pSMB->hdr.Tid = ses->ipc_tid; pSMB->hdr.Uid = ses->Suid; if (ses->capabilities & CAP_STATUS32) pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS; if (ses->capabilities & CAP_DFS) pSMB->hdr.Flags2 |= SMBFLG2_DFS; if (ses->capabilities & CAP_UNICODE) { pSMB->hdr.Flags2 |= SMBFLG2_UNICODE; name_len = cifsConvertToUCS((__le16 *) pSMB->RequestFileName, searchName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(searchName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->RequestFileName, searchName, name_len); } if (ses->server) { if (ses->server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; } pSMB->hdr.Uid = ses->Suid; params = 2 /* level */ + name_len /*includes null */ ; pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = 0; /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(4000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_GET_DFS_REFERRAL); byte_count = params + 3 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->MaxReferralLevel = cpu_to_le16(3); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in GetDFSRefer = %d", rc); goto GetDFSRefExit; } rc = validate_t2((struct smb_t2_rsp *)pSMBr); /* BB Also check if enough total bytes returned? */ if (rc || get_bcc(&pSMBr->hdr) < 17) { rc = -EIO; /* bad smb */ goto GetDFSRefExit; } cFYI(1, "Decoding GetDFSRefer response BCC: %d Offset %d", get_bcc(&pSMBr->hdr), le16_to_cpu(pSMBr->t2.DataOffset)); /* parse returned result into more usable form */ rc = parse_DFS_referrals(pSMBr, num_of_nodes, target_nodes, nls_codepage, remap, searchName); GetDFSRefExit: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto getDFSRetry; return rc; } /* Query File System Info such as free space to old servers such as Win 9x */ int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_ALLOC_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "OldQFSInfo"); oldQFSInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_INFO_ALLOCATION); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QFSInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 18) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); cFYI(1, "qfsinf resp BCC: %d Offset %d", get_bcc(&pSMBr->hdr), data_offset); response_data = (FILE_SYSTEM_ALLOC_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le16_to_cpu(response_data->BytesPerSector) * le32_to_cpu(response_data-> SectorsPerAllocationUnit); FSData->f_blocks = le32_to_cpu(response_data->TotalAllocationUnits); FSData->f_bfree = FSData->f_bavail = le32_to_cpu(response_data->FreeAllocationUnits); cFYI(1, "Blocks: %lld Free: %lld Block size %ld", (unsigned long long)FSData->f_blocks, (unsigned long long)FSData->f_bfree, FSData->f_bsize); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto oldQFSInfoRetry; return rc; } int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "In QFSInfo"); QFSInfoRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_SIZE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QFSInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 24) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le32_to_cpu(response_data->BytesPerSector) * le32_to_cpu(response_data-> SectorsPerAllocationUnit); FSData->f_blocks = le64_to_cpu(response_data->TotalAllocationUnits); FSData->f_bfree = FSData->f_bavail = le64_to_cpu(response_data->FreeAllocationUnits); cFYI(1, "Blocks: %lld Free: %lld Block size %ld", (unsigned long long)FSData->f_blocks, (unsigned long long)FSData->f_bfree, FSData->f_bsize); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSInfoRetry; return rc; } int CIFSSMBQFSAttributeInfo(const int xid, struct cifs_tcon *tcon) { /* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_ATTRIBUTE_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "In QFSAttributeInfo"); QFSAttributeRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_ATTRIBUTE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cERROR(1, "Send error in QFSAttributeInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { /* BB also check if enough bytes returned */ rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_ATTRIBUTE_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsAttrInfo, response_data, sizeof(FILE_SYSTEM_ATTRIBUTE_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSAttributeRetry; return rc; } int CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon) { /* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_DEVICE_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "In QFSDeviceInfo"); QFSDeviceRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_DEVICE_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QFSDeviceInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_SYSTEM_DEVICE_INFO)) rc = -EIO; /* bad smb */ else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_DEVICE_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsDevInfo, response_data, sizeof(FILE_SYSTEM_DEVICE_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSDeviceRetry; return rc; } int CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon) { /* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_UNIX_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "In QFSUnixInfo"); QFSUnixRetry: rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof(struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_CIFS_UNIX_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cERROR(1, "Send error in QFSUnixInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_UNIX_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); memcpy(&tcon->fsUnixInfo, response_data, sizeof(FILE_SYSTEM_UNIX_INFO)); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSUnixRetry; return rc; } int CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon, __u64 cap) { /* level 0x200 SMB_SET_CIFS_UNIX_INFO */ TRANSACTION2_SETFSI_REQ *pSMB = NULL; TRANSACTION2_SETFSI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count; cFYI(1, "In SETFSUnixInfo"); SETFSUnixRetry: /* BB switch to small buf init to save memory */ rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 4; /* 2 bytes zero followed by info level. */ pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum) - 4; offset = param_offset + params; pSMB->MaxParameterCount = cpu_to_le16(4); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FS_INFORMATION); byte_count = 1 /* pad */ + params + 12; pSMB->DataCount = cpu_to_le16(12); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); /* Params. */ pSMB->FileNum = 0; pSMB->InformationLevel = cpu_to_le16(SMB_SET_CIFS_UNIX_INFO); /* Data. */ pSMB->ClientUnixMajor = cpu_to_le16(CIFS_UNIX_MAJOR_VERSION); pSMB->ClientUnixMinor = cpu_to_le16(CIFS_UNIX_MINOR_VERSION); pSMB->ClientUnixCap = cpu_to_le64(cap); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cERROR(1, "Send error in SETFSUnixInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc) rc = -EIO; /* bad smb */ } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SETFSUnixRetry; return rc; } int CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) { /* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */ TRANSACTION2_QFSI_REQ *pSMB = NULL; TRANSACTION2_QFSI_RSP *pSMBr = NULL; FILE_SYSTEM_POSIX_INFO *response_data; int rc = 0; int bytes_returned = 0; __u16 params, byte_count; cFYI(1, "In QFSPosixInfo"); QFSPosixRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; params = 2; /* level */ pSMB->TotalDataCount = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; byte_count = params + 1 /* pad */ ; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(offsetof(struct smb_com_transaction2_qfsi_req, InformationLevel) - 4); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION); pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_FS_INFO); inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QFSUnixInfo = %d", rc); } else { /* decode response */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 13) { rc = -EIO; /* bad smb */ } else { __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); response_data = (FILE_SYSTEM_POSIX_INFO *) (((char *) &pSMBr->hdr.Protocol) + data_offset); FSData->f_bsize = le32_to_cpu(response_data->BlockSize); FSData->f_blocks = le64_to_cpu(response_data->TotalBlocks); FSData->f_bfree = le64_to_cpu(response_data->BlocksAvail); if (response_data->UserBlocksAvail == cpu_to_le64(-1)) { FSData->f_bavail = FSData->f_bfree; } else { FSData->f_bavail = le64_to_cpu(response_data->UserBlocksAvail); } if (response_data->TotalFileNodes != cpu_to_le64(-1)) FSData->f_files = le64_to_cpu(response_data->TotalFileNodes); if (response_data->FreeFileNodes != cpu_to_le64(-1)) FSData->f_ffree = le64_to_cpu(response_data->FreeFileNodes); } } cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QFSPosixRetry; return rc; } /* We can not use write of zero bytes trick to set file size due to need for large file support. Also note that this SetPathInfo is preferred to SetFileInfo based method in next routine which is only needed to work around a sharing violation bug in Samba which this routine can run into */ int CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon, const char *fileName, __u64 size, bool SetAllocation, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; struct file_end_of_file_info *parm_data; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, byte_count, data_count, param_offset, offset; cFYI(1, "In SetEOF"); SetEOFRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; data_count = sizeof(struct file_end_of_file_info); pSMB->MaxParameterCount = cpu_to_le16(2); pSMB->MaxDataCount = cpu_to_le16(4100); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; if (SetAllocation) { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO); } else /* Set File Size */ { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO); } parm_data = (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) + offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + data_count; pSMB->DataCount = cpu_to_le16(data_count); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); parm_data->FileSize = cpu_to_le64(size); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "SetPathInfo (file size) returned %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetEOFRetry; return rc; } int CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size, __u16 fid, __u32 pid_of_opener, bool SetAllocation) { struct smb_com_transaction2_sfi_req *pSMB = NULL; struct file_end_of_file_info *parm_data; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cFYI(1, "SetFileSize (via SetFileInfo) %lld", (long long)size); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; count = sizeof(struct file_end_of_file_info); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); parm_data = (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) + offset); pSMB->DataOffset = cpu_to_le16(offset); parm_data->FileSize = cpu_to_le64(size); pSMB->Fid = fid; if (SetAllocation) { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO); } else /* Set File Size */ { if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO); } pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) { cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc); } /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } /* Some legacy servers such as NT4 require that the file times be set on an open handle, rather than by pathname - this is awkward due to potential access conflicts on the open, but it is unavoidable for these old servers since the only other choice is to go from 100 nanosecond DCE time and resort to the original setpathinfo level which takes the ancient DOS time format with 2 second granularity */ int CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon, const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cFYI(1, "Set Times (via SetFileInfo)"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; count = sizeof(FILE_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon, bool delete_file, __u16 fid, __u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; char *data_offset; int rc = 0; __u16 params, param_offset, offset, byte_count, count; cFYI(1, "Set File Disposition (via SetFileInfo)"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; count = 1; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_DISPOSITION_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); *data_offset = delete_file ? 1 : 0; rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) cFYI(1, "Send error in SetFileDisposition = %d", rc); return rc; } int CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon, const char *fileName, const FILE_BASIC_INFO *data, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; char *data_offset; __u16 params, param_offset, offset, byte_count, count; cFYI(1, "In SetTimes"); SetTimesRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; count = sizeof(FILE_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (char *) (&pSMB->hdr.Protocol) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU) pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2); else pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "SetPathInfo (times) returned %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetTimesRetry; return rc; } /* Can not be used to set time stamps yet (due to old DOS time format) */ /* Can be used to set attributes */ #if 0 /* Possibly not needed - since it turns out that strangely NT4 has a bug handling it anyway and NT4 was what we thought it would be needed for Do not delete it until we prove whether needed for Win9x though */ int CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon, char *fileName, __u16 dos_attrs, const struct nls_table *nls_codepage) { SETATTR_REQ *pSMB = NULL; SETATTR_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; cFYI(1, "In SetAttrLegacy"); SetAttrLgcyRetry: rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = ConvertToUCS((__le16 *) pSMB->fileName, fileName, PATH_MAX, nls_codepage); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->fileName, fileName, name_len); } pSMB->attr = cpu_to_le16(dos_attrs); pSMB->BufferFormat = 0x04; inc_rfc1001_len(pSMB, name_len + 1); pSMB->ByteCount = cpu_to_le16(name_len + 1); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "Error in LegacySetAttr = %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetAttrLgcyRetry; return rc; } #endif /* temporarily unneeded SetAttr legacy function */ static void cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset, const struct cifs_unix_set_info_args *args) { u64 mode = args->mode; /* * Samba server ignores set of file size to zero due to bugs in some * older clients, but we should be precise - we use SetFileSize to * set file size and do not want to truncate file size to zero * accidentally as happened on one Samba server beta by putting * zero instead of -1 here */ data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64); data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64); data_offset->LastStatusChange = cpu_to_le64(args->ctime); data_offset->LastAccessTime = cpu_to_le64(args->atime); data_offset->LastModificationTime = cpu_to_le64(args->mtime); data_offset->Uid = cpu_to_le64(args->uid); data_offset->Gid = cpu_to_le64(args->gid); /* better to leave device as zero when it is */ data_offset->DevMajor = cpu_to_le64(MAJOR(args->device)); data_offset->DevMinor = cpu_to_le64(MINOR(args->device)); data_offset->Permissions = cpu_to_le64(mode); if (S_ISREG(mode)) data_offset->Type = cpu_to_le32(UNIX_FILE); else if (S_ISDIR(mode)) data_offset->Type = cpu_to_le32(UNIX_DIR); else if (S_ISLNK(mode)) data_offset->Type = cpu_to_le32(UNIX_SYMLINK); else if (S_ISCHR(mode)) data_offset->Type = cpu_to_le32(UNIX_CHARDEV); else if (S_ISBLK(mode)) data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV); else if (S_ISFIFO(mode)) data_offset->Type = cpu_to_le32(UNIX_FIFO); else if (S_ISSOCK(mode)) data_offset->Type = cpu_to_le32(UNIX_SOCKET); } int CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon, const struct cifs_unix_set_info_args *args, u16 fid, u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; FILE_UNIX_BASIC_INFO *data_offset; int rc = 0; u16 params, param_offset, offset, byte_count, count; cFYI(1, "Set Unix Info (via SetFileInfo)"); rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); if (rc) return rc; pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); params = 6; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; data_offset = (FILE_UNIX_BASIC_INFO *) ((char *)(&pSMB->hdr.Protocol) + offset); count = sizeof(FILE_UNIX_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalDataCount = pSMB->DataCount; pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->Fid = fid; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); cifs_fill_unix_set_info(data_offset, args); rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc); /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ return rc; } int CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *tcon, char *fileName, const struct cifs_unix_set_info_args *args, const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; int name_len; int rc = 0; int bytes_returned = 0; FILE_UNIX_BASIC_INFO *data_offset; __u16 params, param_offset, offset, count, byte_count; cFYI(1, "In SetUID/GID/Mode"); setPermsRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; count = sizeof(FILE_UNIX_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; data_offset = (FILE_UNIX_BASIC_INFO *) ((char *) &pSMB->hdr.Protocol + offset); memset(data_offset, 0, count); pSMB->DataOffset = cpu_to_le16(offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->ParameterCount = cpu_to_le16(params); pSMB->DataCount = cpu_to_le16(count); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->TotalDataCount = pSMB->DataCount; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); cifs_fill_unix_set_info(data_offset, args); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "SetPathInfo (perms) returned %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto setPermsRetry; return rc; } #ifdef CONFIG_CIFS_XATTR /* * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common * function used by listxattr and getxattr type calls. When ea_name is set, * it looks for that attribute name and stuffs that value into the EAData * buffer. When ea_name is NULL, it stuffs a list of attribute names into the * buffer. In both cases, the return value is either the length of the * resulting data or a negative error code. If EAData is a NULL pointer then * the data isn't copied to it, but the length is returned. */ ssize_t CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon, const unsigned char *searchName, const unsigned char *ea_name, char *EAData, size_t buf_size, const struct nls_table *nls_codepage, int remap) { /* BB assumes one setup word */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int list_len; struct fealist *ea_response_data; struct fea *temp_fea; char *temp_ptr; char *end_of_smb; __u16 params, byte_count, data_offset; cFYI(1, "In Query All EAs path %s", searchName); QAllEAsRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { list_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); list_len++; /* trailing null */ list_len *= 2; } else { /* BB improve the check for buffer overruns BB */ list_len = strnlen(searchName, PATH_MAX); list_len++; /* trailing null */ strncpy(pSMB->FileName, searchName, list_len); } params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; pSMB->ParameterOffset = cpu_to_le16(offsetof( struct smb_com_transaction2_qpi_req, InformationLevel) - 4); pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); byte_count = params + 1 /* pad */ ; pSMB->TotalParameterCount = cpu_to_le16(params); pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS); pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, "Send error in QueryAllEAs = %d", rc); goto QAllEAsOut; } /* BB also check enough total bytes returned */ /* BB we need to improve the validity checking of these trans2 responses */ rc = validate_t2((struct smb_t2_rsp *)pSMBr); if (rc || get_bcc(&pSMBr->hdr) < 4) { rc = -EIO; /* bad smb */ goto QAllEAsOut; } /* check that length of list is not more than bcc */ /* check that each entry does not go beyond length of list */ /* check that each element of each entry does not go beyond end of list */ /* validate_trans2_offsets() */ /* BB check if start of smb + data_offset > &bcc+ bcc */ data_offset = le16_to_cpu(pSMBr->t2.DataOffset); ea_response_data = (struct fealist *) (((char *) &pSMBr->hdr.Protocol) + data_offset); list_len = le32_to_cpu(ea_response_data->list_len); cFYI(1, "ea length %d", list_len); if (list_len <= 8) { cFYI(1, "empty EA list returned from server"); goto QAllEAsOut; } /* make sure list_len doesn't go past end of SMB */ end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr); if ((char *)ea_response_data + list_len > end_of_smb) { cFYI(1, "EA list appears to go beyond SMB"); rc = -EIO; goto QAllEAsOut; } /* account for ea list len */ list_len -= 4; temp_fea = ea_response_data->list; temp_ptr = (char *)temp_fea; while (list_len > 0) { unsigned int name_len; __u16 value_len; list_len -= 4; temp_ptr += 4; /* make sure we can read name_len and value_len */ if (list_len < 0) { cFYI(1, "EA entry goes beyond length of list"); rc = -EIO; goto QAllEAsOut; } name_len = temp_fea->name_len; value_len = le16_to_cpu(temp_fea->value_len); list_len -= name_len + 1 + value_len; if (list_len < 0) { cFYI(1, "EA entry goes beyond length of list"); rc = -EIO; goto QAllEAsOut; } if (ea_name) { if (strncmp(ea_name, temp_ptr, name_len) == 0) { temp_ptr += name_len + 1; rc = value_len; if (buf_size == 0) goto QAllEAsOut; if ((size_t)value_len > buf_size) { rc = -ERANGE; goto QAllEAsOut; } memcpy(EAData, temp_ptr, value_len); goto QAllEAsOut; } } else { /* account for prefix user. and trailing null */ rc += (5 + 1 + name_len); if (rc < (int) buf_size) { memcpy(EAData, "user.", 5); EAData += 5; memcpy(EAData, temp_ptr, name_len); EAData += name_len; /* null terminate name */ *EAData = 0; ++EAData; } else if (buf_size == 0) { /* skip copy - calc size only */ } else { /* stop before overrun buffer */ rc = -ERANGE; break; } } temp_ptr += name_len + 1 + value_len; temp_fea = (struct fea *)temp_ptr; } /* didn't find the named attribute */ if (ea_name) rc = -ENODATA; QAllEAsOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) goto QAllEAsRetry; return (ssize_t)rc; } int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, const char *fileName, const char *ea_name, const void *ea_value, const __u16 ea_value_len, const struct nls_table *nls_codepage, int remap) { struct smb_com_transaction2_spi_req *pSMB = NULL; struct smb_com_transaction2_spi_rsp *pSMBr = NULL; struct fealist *parm_data; int name_len; int rc = 0; int bytes_returned = 0; __u16 params, param_offset, byte_count, offset, count; cFYI(1, "In SetEA"); SetEARetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ name_len = strnlen(fileName, PATH_MAX); name_len++; /* trailing null */ strncpy(pSMB->FileName, fileName, name_len); } params = 6 + name_len; /* done calculating parms using name_len of file name, now use name_len to calculate length of ea name we are going to create in the inode xattrs */ if (ea_name == NULL) name_len = 0; else name_len = strnlen(ea_name, 255); count = sizeof(*parm_data) + ea_value_len + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find max SMB PDU from sess */ pSMB->MaxDataCount = cpu_to_le16(1000); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_EA); parm_data = (struct fealist *) (((char *) &pSMB->hdr.Protocol) + offset); pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; pSMB->Reserved3 = 0; pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION); byte_count = 3 /* pad */ + params + count; pSMB->DataCount = cpu_to_le16(count); parm_data->list_len = cpu_to_le32(count); parm_data->list[0].EA_flags = 0; /* we checked above that name len is less than 255 */ parm_data->list[0].name_len = (__u8)name_len; /* EA names are always ASCII */ if (ea_name) strncpy(parm_data->list[0].name, ea_name, name_len); parm_data->list[0].name[name_len] = 0; parm_data->list[0].value_len = cpu_to_le16(ea_value_len); /* caller ensures that ea_value_len is less than 64K but we need to ensure that it fits within the smb */ /*BB add length check to see if it would fit in negotiated SMB buffer size BB */ /* if (ea_value_len > buffer_size - 512 (enough for header)) */ if (ea_value_len) memcpy(parm_data->list[0].name+name_len+1, ea_value, ea_value_len); pSMB->TotalDataCount = pSMB->DataCount; pSMB->ParameterCount = cpu_to_le16(params); pSMB->TotalParameterCount = pSMB->ParameterCount; pSMB->Reserved4 = 0; inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) cFYI(1, "SetPathInfo (EA) returned %d", rc); cifs_buf_release(pSMB); if (rc == -EAGAIN) goto SetEARetry; return rc; } #endif #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* BB unused temporarily */ /* * Years ago the kernel added a "dnotify" function for Samba server, * to allow network clients (such as Windows) to display updated * lists of files in directory listings automatically when * files are added by one user when another user has the * same directory open on their desktop. The Linux cifs kernel * client hooked into the kernel side of this interface for * the same reason, but ironically when the VFS moved from * "dnotify" to "inotify" it became harder to plug in Linux * network file system clients (the most obvious use case * for notify interfaces is when multiple users can update * the contents of the same directory - exactly what network * file systems can do) although the server (Samba) could * still use it. For the short term we leave the worker * function ifdeffed out (below) until inotify is fixed * in the VFS to make it easier to plug in network file * system clients. If inotify turns out to be permanently * incompatible for network fs clients, we could instead simply * expose this config flag by adding a future cifs (and smb2) notify ioctl. */ int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon, const int notify_subdirs, const __u16 netfid, __u32 filter, struct file *pfile, int multishot, const struct nls_table *nls_codepage) { int rc = 0; struct smb_com_transaction_change_notify_req *pSMB = NULL; struct smb_com_ntransaction_change_notify_rsp *pSMBr = NULL; struct dir_notify_req *dnotify_req; int bytes_returned; cFYI(1, "In CIFSSMBNotify for file handle %d", (int)netfid); rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB, (void **) &pSMBr); if (rc) return rc; pSMB->TotalParameterCount = 0 ; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le32(2); /* BB find exact data count max from sess structure BB */ pSMB->MaxDataCount = 0; /* same in little endian or be */ /* BB VERIFY verify which is correct for above BB */ pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFF00); pSMB->MaxSetupCount = 4; pSMB->Reserved = 0; pSMB->ParameterOffset = 0; pSMB->DataCount = 0; pSMB->DataOffset = 0; pSMB->SetupCount = 4; /* single byte does not need le conversion */ pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE); pSMB->ParameterCount = pSMB->TotalParameterCount; if (notify_subdirs) pSMB->WatchTree = 1; /* one byte - no le conversion needed */ pSMB->Reserved2 = 0; pSMB->CompletionFilter = cpu_to_le32(filter); pSMB->Fid = netfid; /* file handle always le */ pSMB->ByteCount = 0; rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_ASYNC_OP); if (rc) { cFYI(1, "Error in Notify = %d", rc); } else { /* Add file to outstanding requests */ /* BB change to kmem cache alloc */ dnotify_req = kmalloc( sizeof(struct dir_notify_req), GFP_KERNEL); if (dnotify_req) { dnotify_req->Pid = pSMB->hdr.Pid; dnotify_req->PidHigh = pSMB->hdr.PidHigh; dnotify_req->Mid = pSMB->hdr.Mid; dnotify_req->Tid = pSMB->hdr.Tid; dnotify_req->Uid = pSMB->hdr.Uid; dnotify_req->netfid = netfid; dnotify_req->pfile = pfile; dnotify_req->filter = filter; dnotify_req->multishot = multishot; spin_lock(&GlobalMid_Lock); list_add_tail(&dnotify_req->lhead, &GlobalDnotifyReqList); spin_unlock(&GlobalMid_Lock); } else rc = -ENOMEM; } cifs_buf_release(pSMB); return rc; } #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
gpl-2.0
Cold-D/linux
arch/sh/kernel/cpu/sh5/setup-sh5.c
1750
2743
/* * SH5-101/SH5-103 CPU Setup * * Copyright (C) 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/sh_timer.h> #include <asm/addrspace.h> static struct plat_sci_port scif0_platform_data = { .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .type = PORT_SCIF, }; static struct resource scif0_resources[] = { DEFINE_RES_MEM(PHYS_PERIPHERAL_BLOCK + 0x01030000, 0x100), DEFINE_RES_IRQ(39), DEFINE_RES_IRQ(40), DEFINE_RES_IRQ(42), }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .resource = scif0_resources, .num_resources = ARRAY_SIZE(scif0_resources), .dev = { .platform_data = &scif0_platform_data, }, }; static struct resource rtc_resources[] = { [0] = { .start = PHYS_PERIPHERAL_BLOCK + 0x01040000, .end = PHYS_PERIPHERAL_BLOCK + 0x01040000 + 0x58 - 1, .flags = IORESOURCE_IO, }, [1] = { /* Period IRQ */ .start = IRQ_PRI, .flags = IORESOURCE_IRQ, }, [2] = { /* Carry IRQ */ .start = IRQ_CUI, .flags = IORESOURCE_IRQ, }, [3] = { /* Alarm IRQ */ .start = IRQ_ATI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device rtc_device = { .name = "sh-rtc", .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, }; #define TMU_BLOCK_OFF 0x01020000 #define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF static struct sh_timer_config tmu0_platform_data = { .channels_mask = 7, }; static struct resource tmu0_resources[] = { DEFINE_RES_MEM(TMU_BASE, 0x30), DEFINE_RES_IRQ(IRQ_TUNI0), DEFINE_RES_IRQ(IRQ_TUNI1), DEFINE_RES_IRQ(IRQ_TUNI2), }; static struct platform_device tmu0_device = { .name = "sh-tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct platform_device *sh5_early_devices[] __initdata = { &scif0_device, &tmu0_device, }; static struct platform_device *sh5_devices[] __initdata = { &rtc_device, }; static int __init sh5_devices_setup(void) { int ret; ret = platform_add_devices(sh5_early_devices, ARRAY_SIZE(sh5_early_devices)); if (unlikely(ret != 0)) return ret; return platform_add_devices(sh5_devices, ARRAY_SIZE(sh5_devices)); } arch_initcall(sh5_devices_setup); void __init plat_early_device_setup(void) { early_platform_add_devices(sh5_early_devices, ARRAY_SIZE(sh5_early_devices)); }
gpl-2.0
Altaf-Mahdi/android_kernel_oneplus_msm8996
net/netfilter/nf_conntrack_amanda.c
2518
6249
/* Amanda extension for IP connection tracking * * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca> * based on HW's ip_conntrack_irc.c as well as other modules * (C) 2006 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/textsearch.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/udp.h> #include <linux/netfilter.h> #include <linux/gfp.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_helper.h> #include <linux/netfilter/nf_conntrack_amanda.h> static unsigned int master_timeout __read_mostly = 300; static char *ts_algo = "kmp"; MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>"); MODULE_DESCRIPTION("Amanda connection tracking module"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ip_conntrack_amanda"); MODULE_ALIAS_NFCT_HELPER("amanda"); module_param(master_timeout, uint, 0600); MODULE_PARM_DESC(master_timeout, "timeout for the master connection"); module_param(ts_algo, charp, 0400); MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)"); unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_amanda_hook); enum amanda_strings { SEARCH_CONNECT, SEARCH_NEWLINE, SEARCH_DATA, SEARCH_MESG, SEARCH_INDEX, }; static struct { const char *string; size_t len; struct ts_config *ts; } search[] __read_mostly = { [SEARCH_CONNECT] = { .string = "CONNECT ", .len = 8, }, [SEARCH_NEWLINE] = { .string = "\n", .len = 1, }, [SEARCH_DATA] = { .string = "DATA ", .len = 5, }, [SEARCH_MESG] = { .string = "MESG ", .len = 5, }, [SEARCH_INDEX] = { .string = "INDEX ", .len = 6, }, }; static int amanda_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { struct ts_state ts; struct nf_conntrack_expect *exp; struct nf_conntrack_tuple *tuple; unsigned int dataoff, start, stop, off, i; char pbuf[sizeof("65535")], *tmp; u_int16_t len; __be16 port; int ret = NF_ACCEPT; typeof(nf_nat_amanda_hook) nf_nat_amanda; /* Only look at packets from the Amanda server */ if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) return NF_ACCEPT; /* increase the UDP timeout of the master connection as replies from * Amanda clients to the server can be quite delayed */ nf_ct_refresh(ct, skb, master_timeout * HZ); /* No data? */ dataoff = protoff + sizeof(struct udphdr); if (dataoff >= skb->len) { net_err_ratelimited("amanda_help: skblen = %u\n", skb->len); return NF_ACCEPT; } memset(&ts, 0, sizeof(ts)); start = skb_find_text(skb, dataoff, skb->len, search[SEARCH_CONNECT].ts, &ts); if (start == UINT_MAX) goto out; start += dataoff + search[SEARCH_CONNECT].len; memset(&ts, 0, sizeof(ts)); stop = skb_find_text(skb, start, skb->len, search[SEARCH_NEWLINE].ts, &ts); if (stop == UINT_MAX) goto out; stop += start; for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) { memset(&ts, 0, sizeof(ts)); off = skb_find_text(skb, start, stop, search[i].ts, &ts); if (off == UINT_MAX) continue; off += start + search[i].len; len = min_t(unsigned int, sizeof(pbuf) - 1, stop - off); if (skb_copy_bits(skb, off, pbuf, len)) break; pbuf[len] = '\0'; port = htons(simple_strtoul(pbuf, &tmp, 10)); len = tmp - pbuf; if (port == 0 || len > 5) break; exp = nf_ct_expect_alloc(ct); if (exp == NULL) { nf_ct_helper_log(skb, ct, "cannot alloc expectation"); ret = NF_DROP; goto out; } tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &tuple->src.u3, &tuple->dst.u3, IPPROTO_TCP, NULL, &port); nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook); if (nf_nat_amanda && ct->status & IPS_NAT_MASK) ret = nf_nat_amanda(skb, ctinfo, protoff, off - dataoff, len, exp); else if (nf_ct_expect_related(exp) != 0) { nf_ct_helper_log(skb, ct, "cannot add expectation"); ret = NF_DROP; } nf_ct_expect_put(exp); } out: return ret; } static const struct nf_conntrack_expect_policy amanda_exp_policy = { .max_expected = 3, .timeout = 180, }; static struct nf_conntrack_helper amanda_helper[2] __read_mostly = { { .name = "amanda", .me = THIS_MODULE, .help = amanda_help, .tuple.src.l3num = AF_INET, .tuple.src.u.udp.port = cpu_to_be16(10080), .tuple.dst.protonum = IPPROTO_UDP, .expect_policy = &amanda_exp_policy, }, { .name = "amanda", .me = THIS_MODULE, .help = amanda_help, .tuple.src.l3num = AF_INET6, .tuple.src.u.udp.port = cpu_to_be16(10080), .tuple.dst.protonum = IPPROTO_UDP, .expect_policy = &amanda_exp_policy, }, }; static void __exit nf_conntrack_amanda_fini(void) { int i; nf_conntrack_helper_unregister(&amanda_helper[0]); nf_conntrack_helper_unregister(&amanda_helper[1]); for (i = 0; i < ARRAY_SIZE(search); i++) textsearch_destroy(search[i].ts); } static int __init nf_conntrack_amanda_init(void) { int ret, i; for (i = 0; i < ARRAY_SIZE(search); i++) { search[i].ts = textsearch_prepare(ts_algo, search[i].string, search[i].len, GFP_KERNEL, TS_AUTOLOAD); if (IS_ERR(search[i].ts)) { ret = PTR_ERR(search[i].ts); goto err1; } } ret = nf_conntrack_helper_register(&amanda_helper[0]); if (ret < 0) goto err1; ret = nf_conntrack_helper_register(&amanda_helper[1]); if (ret < 0) goto err2; return 0; err2: nf_conntrack_helper_unregister(&amanda_helper[0]); err1: while (--i >= 0) textsearch_destroy(search[i].ts); return ret; } module_init(nf_conntrack_amanda_init); module_exit(nf_conntrack_amanda_fini);
gpl-2.0
Dee-UK/D33_KK_Kernel
drivers/media/dvb/dvb-usb/m920x.c
2774
26644
/* DVB USB compliant linux driver for MSI Mega Sky 580 DVB-T USB2.0 receiver * * Copyright (C) 2006 Aapo Tahkola (aet@rasterburn.org) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "m920x.h" #include "mt352.h" #include "mt352_priv.h" #include "qt1010.h" #include "tda1004x.h" #include "tda827x.h" #include <media/tuner.h> #include "tuner-simple.h" #include <asm/unaligned.h> /* debug */ static int dvb_usb_m920x_debug; module_param_named(debug,dvb_usb_m920x_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int m920x_set_filter(struct dvb_usb_device *d, int type, int idx, int pid); static inline int m920x_read(struct usb_device *udev, u8 request, u16 value, u16 index, void *data, int size) { int ret; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request, USB_TYPE_VENDOR | USB_DIR_IN, value, index, data, size, 2000); if (ret < 0) { printk(KERN_INFO "m920x_read = error: %d\n", ret); return ret; } if (ret != size) { deb("m920x_read = no data\n"); return -EIO; } return 0; } static inline int m920x_write(struct usb_device *udev, u8 request, u16 value, u16 index) { int ret; ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, NULL, 0, 2000); return ret; } static int m920x_init(struct dvb_usb_device *d, struct m920x_inits *rc_seq) { int ret = 0, i, epi, flags = 0; int adap_enabled[M9206_MAX_ADAPTERS] = { 0 }; /* Remote controller init. */ if (d->props.rc.legacy.rc_query) { deb("Initialising remote control\n"); while (rc_seq->address) { if ((ret = m920x_write(d->udev, M9206_CORE, rc_seq->data, rc_seq->address)) != 0) { deb("Initialising remote control failed\n"); return ret; } rc_seq++; } deb("Initialising remote control success\n"); } for (i = 0; i < d->props.num_adapters; i++) flags |= d->adapter[i].props.caps; /* Some devices(Dposh) might crash if we attempt touch at all. */ if (flags & DVB_USB_ADAP_HAS_PID_FILTER) { for (i = 0; i < d->props.num_adapters; i++) { epi = d->adapter[i].props.stream.endpoint - 0x81; if (epi < 0 || epi >= M9206_MAX_ADAPTERS) { printk(KERN_INFO "m920x: Unexpected adapter endpoint!\n"); return -EINVAL; } adap_enabled[epi] = 1; } for (i = 0; i < M9206_MAX_ADAPTERS; i++) { if (adap_enabled[i]) continue; if ((ret = m920x_set_filter(d, 0x81 + i, 0, 0x0)) != 0) return ret; if ((ret = m920x_set_filter(d, 0x81 + i, 0, 0x02f5)) != 0) return ret; } } return ret; } static int m920x_init_ep(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *alt; if ((alt = usb_altnum_to_altsetting(intf, 1)) == NULL) { deb("No alt found!\n"); return -ENODEV; } return usb_set_interface(udev, alt->desc.bInterfaceNumber, alt->desc.bAlternateSetting); } static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { struct m920x_state *m = d->priv; int i, ret = 0; u8 *rc_state; rc_state = kmalloc(2, GFP_KERNEL); if (!rc_state) return -ENOMEM; if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE, rc_state, 1)) != 0) goto out; if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY, rc_state + 1, 1)) != 0) goto out; for (i = 0; i < d->props.rc.legacy.rc_map_size; i++) if (rc5_data(&d->props.rc.legacy.rc_map_table[i]) == rc_state[1]) { *event = d->props.rc.legacy.rc_map_table[i].keycode; switch(rc_state[0]) { case 0x80: *state = REMOTE_NO_KEY_PRESSED; goto out; case 0x88: /* framing error or "invalid code" */ case 0x99: case 0xc0: case 0xd8: *state = REMOTE_NO_KEY_PRESSED; m->rep_count = 0; goto out; case 0x93: case 0x92: case 0x83: /* pinnacle PCTV310e */ case 0x82: m->rep_count = 0; *state = REMOTE_KEY_PRESSED; goto out; case 0x91: case 0x81: /* pinnacle PCTV310e */ /* prevent immediate auto-repeat */ if (++m->rep_count > 2) *state = REMOTE_KEY_REPEAT; else *state = REMOTE_NO_KEY_PRESSED; goto out; default: deb("Unexpected rc state %02x\n", rc_state[0]); *state = REMOTE_NO_KEY_PRESSED; goto out; } } if (rc_state[1] != 0) deb("Unknown rc key %02x\n", rc_state[1]); *state = REMOTE_NO_KEY_PRESSED; out: kfree(rc_state); return ret; } /* I2C */ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i, j; int ret = 0; if (!num) return -EINVAL; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { if (msg[i].flags & (I2C_M_NO_RD_ACK | I2C_M_IGNORE_NAK | I2C_M_TEN) || msg[i].len == 0) { /* For a 0 byte message, I think sending the address * to index 0x80|0x40 would be the correct thing to * do. However, zero byte messages are only used for * probing, and since we don't know how to get the * slave's ack, we can't probe. */ ret = -ENOTSUPP; goto unlock; } /* Send START & address/RW bit */ if (!(msg[i].flags & I2C_M_NOSTART)) { if ((ret = m920x_write(d->udev, M9206_I2C, (msg[i].addr << 1) | (msg[i].flags & I2C_M_RD ? 0x01 : 0), 0x80)) != 0) goto unlock; /* Should check for ack here, if we knew how. */ } if (msg[i].flags & I2C_M_RD) { for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? * Send STOP, otherwise send ACK. */ int stop = (i+1 == num && j+1 == msg[i].len) ? 0x40 : 0x01; if ((ret = m920x_read(d->udev, M9206_I2C, 0x0, 0x20 | stop, &msg[i].buf[j], 1)) != 0) goto unlock; } } else { for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? Then send STOP. */ int stop = (i+1 == num && j+1 == msg[i].len) ? 0x40 : 0x00; if ((ret = m920x_write(d->udev, M9206_I2C, msg[i].buf[j], stop)) != 0) goto unlock; /* Should check for ack here too. */ } } } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static u32 m920x_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm m920x_i2c_algo = { .master_xfer = m920x_i2c_xfer, .functionality = m920x_i2c_func, }; /* pid filter */ static int m920x_set_filter(struct dvb_usb_device *d, int type, int idx, int pid) { int ret = 0; if (pid >= 0x8000) return -EINVAL; pid |= 0x8000; if ((ret = m920x_write(d->udev, M9206_FILTER, pid, (type << 8) | (idx * 4) )) != 0) return ret; if ((ret = m920x_write(d->udev, M9206_FILTER, 0, (type << 8) | (idx * 4) )) != 0) return ret; return ret; } static int m920x_update_filters(struct dvb_usb_adapter *adap) { struct m920x_state *m = adap->dev->priv; int enabled = m->filtering_enabled[adap->id]; int i, ret = 0, filter = 0; int ep = adap->props.stream.endpoint; for (i = 0; i < M9206_MAX_FILTERS; i++) if (m->filters[adap->id][i] == 8192) enabled = 0; /* Disable all filters */ if ((ret = m920x_set_filter(adap->dev, ep, 1, enabled)) != 0) return ret; for (i = 0; i < M9206_MAX_FILTERS; i++) if ((ret = m920x_set_filter(adap->dev, ep, i + 2, 0)) != 0) return ret; /* Set */ if (enabled) { for (i = 0; i < M9206_MAX_FILTERS; i++) { if (m->filters[adap->id][i] == 0) continue; if ((ret = m920x_set_filter(adap->dev, ep, filter + 2, m->filters[adap->id][i])) != 0) return ret; filter++; } } return ret; } static int m920x_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct m920x_state *m = adap->dev->priv; m->filtering_enabled[adap->id] = onoff ? 1 : 0; return m920x_update_filters(adap); } static int m920x_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { struct m920x_state *m = adap->dev->priv; m->filters[adap->id][index] = onoff ? pid : 0; return m920x_update_filters(adap); } static int m920x_firmware_download(struct usb_device *udev, const struct firmware *fw) { u16 value, index, size; u8 *read, *buff; int i, pass, ret = 0; buff = kmalloc(65536, GFP_KERNEL); if (buff == NULL) return -ENOMEM; read = kmalloc(4, GFP_KERNEL); if (!read) { kfree(buff); return -ENOMEM; } if ((ret = m920x_read(udev, M9206_FILTER, 0x0, 0x8000, read, 4)) != 0) goto done; deb("%x %x %x %x\n", read[0], read[1], read[2], read[3]); if ((ret = m920x_read(udev, M9206_FW, 0x0, 0x0, read, 1)) != 0) goto done; deb("%x\n", read[0]); for (pass = 0; pass < 2; pass++) { for (i = 0; i + (sizeof(u16) * 3) < fw->size;) { value = get_unaligned_le16(fw->data + i); i += sizeof(u16); index = get_unaligned_le16(fw->data + i); i += sizeof(u16); size = get_unaligned_le16(fw->data + i); i += sizeof(u16); if (pass == 1) { /* Will stall if using fw->data ... */ memcpy(buff, fw->data + i, size); ret = usb_control_msg(udev, usb_sndctrlpipe(udev,0), M9206_FW, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, buff, size, 20); if (ret != size) { deb("error while uploading fw!\n"); ret = -EIO; goto done; } msleep(3); } i += size; } if (i != fw->size) { deb("bad firmware file!\n"); ret = -EINVAL; goto done; } } msleep(36); /* m920x will disconnect itself from the bus after this. */ (void) m920x_write(udev, M9206_CORE, 0x01, M9206_FW_GO); deb("firmware uploaded!\n"); done: kfree(read); kfree(buff); return ret; } /* Callbacks for DVB USB */ static int m920x_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { struct usb_host_interface *alt; alt = usb_altnum_to_altsetting(usb_ifnum_to_if(udev, 0), 1); *cold = (alt == NULL) ? 1 : 0; return 0; } /* demod configurations */ static int m920x_mt352_demod_init(struct dvb_frontend *fe) { int ret; u8 config[] = { CONFIG, 0x3d }; u8 clock[] = { CLOCK_CTL, 0x30 }; u8 reset[] = { RESET, 0x80 }; u8 adc_ctl[] = { ADC_CTL_1, 0x40 }; u8 agc[] = { AGC_TARGET, 0x1c, 0x20 }; u8 sec_agc[] = { 0x69, 0x00, 0xff, 0xff, 0x40, 0xff, 0x00, 0x40, 0x40 }; u8 unk1[] = { 0x93, 0x1a }; u8 unk2[] = { 0xb5, 0x7a }; deb("Demod init!\n"); if ((ret = mt352_write(fe, config, ARRAY_SIZE(config))) != 0) return ret; if ((ret = mt352_write(fe, clock, ARRAY_SIZE(clock))) != 0) return ret; if ((ret = mt352_write(fe, reset, ARRAY_SIZE(reset))) != 0) return ret; if ((ret = mt352_write(fe, adc_ctl, ARRAY_SIZE(adc_ctl))) != 0) return ret; if ((ret = mt352_write(fe, agc, ARRAY_SIZE(agc))) != 0) return ret; if ((ret = mt352_write(fe, sec_agc, ARRAY_SIZE(sec_agc))) != 0) return ret; if ((ret = mt352_write(fe, unk1, ARRAY_SIZE(unk1))) != 0) return ret; if ((ret = mt352_write(fe, unk2, ARRAY_SIZE(unk2))) != 0) return ret; return 0; } static struct mt352_config m920x_mt352_config = { .demod_address = 0x0f, .no_tuner = 1, .demod_init = m920x_mt352_demod_init, }; static struct tda1004x_config m920x_tda10046_08_config = { .demod_address = 0x08, .invert = 0, .invert_oclk = 0, .ts_mode = TDA10046_TS_SERIAL, .xtal_freq = TDA10046_XTAL_16M, .if_freq = TDA10046_FREQ_045, .agc_config = TDA10046_AGC_TDA827X, .gpio_config = TDA10046_GPTRI, .request_firmware = NULL, }; static struct tda1004x_config m920x_tda10046_0b_config = { .demod_address = 0x0b, .invert = 0, .invert_oclk = 0, .ts_mode = TDA10046_TS_SERIAL, .xtal_freq = TDA10046_XTAL_16M, .if_freq = TDA10046_FREQ_045, .agc_config = TDA10046_AGC_TDA827X, .gpio_config = TDA10046_GPTRI, .request_firmware = NULL, /* uses firmware EEPROM */ }; /* tuner configurations */ static struct qt1010_config m920x_qt1010_config = { .i2c_address = 0x62 }; /* Callbacks for DVB USB */ static int m920x_mt352_frontend_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if ((adap->fe = dvb_attach(mt352_attach, &m920x_mt352_config, &adap->dev->i2c_adap)) == NULL) return -EIO; return 0; } static int m920x_tda10046_08_frontend_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if ((adap->fe = dvb_attach(tda10046_attach, &m920x_tda10046_08_config, &adap->dev->i2c_adap)) == NULL) return -EIO; return 0; } static int m920x_tda10046_0b_frontend_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if ((adap->fe = dvb_attach(tda10046_attach, &m920x_tda10046_0b_config, &adap->dev->i2c_adap)) == NULL) return -EIO; return 0; } static int m920x_qt1010_tuner_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if (dvb_attach(qt1010_attach, adap->fe, &adap->dev->i2c_adap, &m920x_qt1010_config) == NULL) return -ENODEV; return 0; } static int m920x_tda8275_60_tuner_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if (dvb_attach(tda827x_attach, adap->fe, 0x60, &adap->dev->i2c_adap, NULL) == NULL) return -ENODEV; return 0; } static int m920x_tda8275_61_tuner_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if (dvb_attach(tda827x_attach, adap->fe, 0x61, &adap->dev->i2c_adap, NULL) == NULL) return -ENODEV; return 0; } static int m920x_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(simple_tuner_attach, adap->fe, &adap->dev->i2c_adap, 0x61, TUNER_PHILIPS_FMD1216ME_MK3); return 0; } /* device-specific initialization */ static struct m920x_inits megasky_rc_init [] = { { M9206_RC_INIT2, 0xa8 }, { M9206_RC_INIT1, 0x51 }, { } /* terminating entry */ }; static struct m920x_inits tvwalkertwin_rc_init [] = { { M9206_RC_INIT2, 0x00 }, { M9206_RC_INIT1, 0xef }, { 0xff28, 0x00 }, { 0xff23, 0x00 }, { 0xff21, 0x30 }, { } /* terminating entry */ }; static struct m920x_inits pinnacle310e_init[] = { /* without these the tuner don't work */ { 0xff20, 0x9b }, { 0xff22, 0x70 }, /* rc settings */ { 0xff50, 0x80 }, { M9206_RC_INIT1, 0x00 }, { M9206_RC_INIT2, 0xff }, { } /* terminating entry */ }; /* ir keymaps */ static struct rc_map_table rc_map_megasky_table[] = { { 0x0012, KEY_POWER }, { 0x001e, KEY_CYCLEWINDOWS }, /* min/max */ { 0x0002, KEY_CHANNELUP }, { 0x0005, KEY_CHANNELDOWN }, { 0x0003, KEY_VOLUMEUP }, { 0x0006, KEY_VOLUMEDOWN }, { 0x0004, KEY_MUTE }, { 0x0007, KEY_OK }, /* TS */ { 0x0008, KEY_STOP }, { 0x0009, KEY_MENU }, /* swap */ { 0x000a, KEY_REWIND }, { 0x001b, KEY_PAUSE }, { 0x001f, KEY_FASTFORWARD }, { 0x000c, KEY_RECORD }, { 0x000d, KEY_CAMERA }, /* screenshot */ { 0x000e, KEY_COFFEE }, /* "MTS" */ }; static struct rc_map_table rc_map_tvwalkertwin_table[] = { { 0x0001, KEY_ZOOM }, /* Full Screen */ { 0x0002, KEY_CAMERA }, /* snapshot */ { 0x0003, KEY_MUTE }, { 0x0004, KEY_REWIND }, { 0x0005, KEY_PLAYPAUSE }, /* Play/Pause */ { 0x0006, KEY_FASTFORWARD }, { 0x0007, KEY_RECORD }, { 0x0008, KEY_STOP }, { 0x0009, KEY_TIME }, /* Timeshift */ { 0x000c, KEY_COFFEE }, /* Recall */ { 0x000e, KEY_CHANNELUP }, { 0x0012, KEY_POWER }, { 0x0015, KEY_MENU }, /* source */ { 0x0018, KEY_CYCLEWINDOWS }, /* TWIN PIP */ { 0x001a, KEY_CHANNELDOWN }, { 0x001b, KEY_VOLUMEDOWN }, { 0x001e, KEY_VOLUMEUP }, }; static struct rc_map_table rc_map_pinnacle310e_table[] = { { 0x16, KEY_POWER }, { 0x17, KEY_FAVORITES }, { 0x0f, KEY_TEXT }, { 0x48, KEY_PROGRAM }, /* preview */ { 0x1c, KEY_EPG }, { 0x04, KEY_LIST }, /* record list */ { 0x03, KEY_1 }, { 0x01, KEY_2 }, { 0x06, KEY_3 }, { 0x09, KEY_4 }, { 0x1d, KEY_5 }, { 0x1f, KEY_6 }, { 0x0d, KEY_7 }, { 0x19, KEY_8 }, { 0x1b, KEY_9 }, { 0x15, KEY_0 }, { 0x0c, KEY_CANCEL }, { 0x4a, KEY_CLEAR }, { 0x13, KEY_BACK }, { 0x00, KEY_TAB }, { 0x4b, KEY_UP }, { 0x4e, KEY_LEFT }, { 0x52, KEY_RIGHT }, { 0x51, KEY_DOWN }, { 0x4f, KEY_ENTER }, /* could also be KEY_OK */ { 0x1e, KEY_VOLUMEUP }, { 0x0a, KEY_VOLUMEDOWN }, { 0x05, KEY_CHANNELUP }, { 0x02, KEY_CHANNELDOWN }, { 0x11, KEY_RECORD }, { 0x14, KEY_PLAY }, { 0x4c, KEY_PAUSE }, { 0x1a, KEY_STOP }, { 0x40, KEY_REWIND }, { 0x12, KEY_FASTFORWARD }, { 0x41, KEY_PREVIOUSSONG }, /* Replay */ { 0x42, KEY_NEXTSONG }, /* Skip */ { 0x54, KEY_CAMERA }, /* Capture */ /* { 0x50, KEY_SAP }, */ /* Sap */ { 0x47, KEY_CYCLEWINDOWS }, /* Pip */ { 0x4d, KEY_SCREEN }, /* FullScreen */ { 0x08, KEY_SUBTITLE }, { 0x0e, KEY_MUTE }, /* { 0x49, KEY_LR }, */ /* L/R */ { 0x07, KEY_SLEEP }, /* Hibernate */ { 0x08, KEY_VIDEO }, /* A/V */ { 0x0e, KEY_MENU }, /* Recall */ { 0x45, KEY_ZOOMIN }, { 0x46, KEY_ZOOMOUT }, { 0x18, KEY_RED }, /* Red */ { 0x53, KEY_GREEN }, /* Green */ { 0x5e, KEY_YELLOW }, /* Yellow */ { 0x5f, KEY_BLUE }, /* Blue */ }; /* DVB USB Driver stuff */ static struct dvb_usb_device_properties megasky_properties; static struct dvb_usb_device_properties digivox_mini_ii_properties; static struct dvb_usb_device_properties tvwalkertwin_properties; static struct dvb_usb_device_properties dposh_properties; static struct dvb_usb_device_properties pinnacle_pctv310e_properties; static int m920x_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *d = NULL; int ret; struct m920x_inits *rc_init_seq = NULL; int bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber; deb("Probing for m920x device at interface %d\n", bInterfaceNumber); if (bInterfaceNumber == 0) { /* Single-tuner device, or first interface on * multi-tuner device */ ret = dvb_usb_device_init(intf, &megasky_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { rc_init_seq = megasky_rc_init; goto found; } ret = dvb_usb_device_init(intf, &digivox_mini_ii_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { /* No remote control, so no rc_init_seq */ goto found; } /* This configures both tuners on the TV Walker Twin */ ret = dvb_usb_device_init(intf, &tvwalkertwin_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { rc_init_seq = tvwalkertwin_rc_init; goto found; } ret = dvb_usb_device_init(intf, &dposh_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { /* Remote controller not supported yet. */ goto found; } ret = dvb_usb_device_init(intf, &pinnacle_pctv310e_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { rc_init_seq = pinnacle310e_init; goto found; } return ret; } else { /* Another interface on a multi-tuner device */ /* The LifeView TV Walker Twin gets here, but struct * tvwalkertwin_properties already configured both * tuners, so there is nothing for us to do here */ } found: if ((ret = m920x_init_ep(intf)) < 0) return ret; if (d && (ret = m920x_init(d, rc_init_seq)) != 0) return ret; return ret; } static struct usb_device_id m920x_table [] = { { USB_DEVICE(USB_VID_MSI, USB_PID_MSI_MEGASKY580) }, { USB_DEVICE(USB_VID_ANUBIS_ELECTRONIC, USB_PID_MSI_DIGI_VOX_MINI_II) }, { USB_DEVICE(USB_VID_ANUBIS_ELECTRONIC, USB_PID_LIFEVIEW_TV_WALKER_TWIN_COLD) }, { USB_DEVICE(USB_VID_ANUBIS_ELECTRONIC, USB_PID_LIFEVIEW_TV_WALKER_TWIN_WARM) }, { USB_DEVICE(USB_VID_DPOSH, USB_PID_DPOSH_M9206_COLD) }, { USB_DEVICE(USB_VID_DPOSH, USB_PID_DPOSH_M9206_WARM) }, { USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_PINNACLE_PCTV310E) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, m920x_table); static struct dvb_usb_device_properties megasky_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-megasky-02.fw", .download_firmware = m920x_firmware_download, .rc.legacy = { .rc_interval = 100, .rc_map_table = rc_map_megasky_table, .rc_map_size = ARRAY_SIZE(rc_map_megasky_table), .rc_query = m920x_rc_query, }, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_mt352_frontend_attach, .tuner_attach = m920x_qt1010_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { "MSI Mega Sky 580 DVB-T USB2.0", { &m920x_table[0], NULL }, { NULL }, } } }; static struct dvb_usb_device_properties digivox_mini_ii_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-digivox-02.fw", .download_firmware = m920x_firmware_download, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_tda10046_08_frontend_attach, .tuner_attach = m920x_tda8275_60_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 0x4000, } } }, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { "MSI DIGI VOX mini II DVB-T USB2.0", { &m920x_table[1], NULL }, { NULL }, }, } }; /* LifeView TV Walker Twin support by Nick Andrew <nick@nick-andrew.net> * * LifeView TV Walker Twin has 1 x M9206, 2 x TDA10046, 2 x TDA8275A * TDA10046 #0 is located at i2c address 0x08 * TDA10046 #1 is located at i2c address 0x0b * TDA8275A #0 is located at i2c address 0x60 * TDA8275A #1 is located at i2c address 0x61 */ static struct dvb_usb_device_properties tvwalkertwin_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-tvwalkert.fw", .download_firmware = m920x_firmware_download, .rc.legacy = { .rc_interval = 100, .rc_map_table = rc_map_tvwalkertwin_table, .rc_map_size = ARRAY_SIZE(rc_map_tvwalkertwin_table), .rc_query = m920x_rc_query, }, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 2, .adapter = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_tda10046_08_frontend_attach, .tuner_attach = m920x_tda8275_60_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }},{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_tda10046_0b_frontend_attach, .tuner_attach = m920x_tda8275_61_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 512, } } }, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { .name = "LifeView TV Walker Twin DVB-T USB2.0", .cold_ids = { &m920x_table[2], NULL }, .warm_ids = { &m920x_table[3], NULL }, }, } }; static struct dvb_usb_device_properties dposh_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dposh-01.fw", .download_firmware = m920x_firmware_download, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ /* Hardware pid filters don't work with this device/firmware */ .frontend_attach = m920x_mt352_frontend_attach, .tuner_attach = m920x_qt1010_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { .name = "Dposh DVB-T USB2.0", .cold_ids = { &m920x_table[4], NULL }, .warm_ids = { &m920x_table[5], NULL }, }, } }; static struct dvb_usb_device_properties pinnacle_pctv310e_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .download_firmware = NULL, .rc.legacy = { .rc_interval = 100, .rc_map_table = rc_map_pinnacle310e_table, .rc_map_size = ARRAY_SIZE(rc_map_pinnacle310e_table), .rc_query = m920x_rc_query, }, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_mt352_frontend_attach, .tuner_attach = m920x_fmd1216me_tuner_attach, .stream = { .type = USB_ISOC, .count = 5, .endpoint = 0x84, .u = { .isoc = { .framesperurb = 128, .framesize = 564, .interval = 1, } } }, } }, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { "Pinnacle PCTV 310e", { &m920x_table[6], NULL }, { NULL }, } } }; static struct usb_driver m920x_driver = { .name = "dvb_usb_m920x", .probe = m920x_probe, .disconnect = dvb_usb_device_exit, .id_table = m920x_table, }; /* module stuff */ static int __init m920x_module_init(void) { int ret; if ((ret = usb_register(&m920x_driver))) { err("usb_register failed. Error number %d", ret); return ret; } return 0; } static void __exit m920x_module_exit(void) { /* deregister this driver from the USB subsystem */ usb_deregister(&m920x_driver); } module_init (m920x_module_init); module_exit (m920x_module_exit); MODULE_AUTHOR("Aapo Tahkola <aet@rasterburn.org>"); MODULE_DESCRIPTION("DVB Driver for ULI M920x"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); /* * Local variables: * c-basic-offset: 8 */
gpl-2.0
Ezekeel/GLaDOS-nexus-7
drivers/media/dvb/dvb-usb/m920x.c
2774
26644
/* DVB USB compliant linux driver for MSI Mega Sky 580 DVB-T USB2.0 receiver * * Copyright (C) 2006 Aapo Tahkola (aet@rasterburn.org) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "m920x.h" #include "mt352.h" #include "mt352_priv.h" #include "qt1010.h" #include "tda1004x.h" #include "tda827x.h" #include <media/tuner.h> #include "tuner-simple.h" #include <asm/unaligned.h> /* debug */ static int dvb_usb_m920x_debug; module_param_named(debug,dvb_usb_m920x_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int m920x_set_filter(struct dvb_usb_device *d, int type, int idx, int pid); static inline int m920x_read(struct usb_device *udev, u8 request, u16 value, u16 index, void *data, int size) { int ret; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request, USB_TYPE_VENDOR | USB_DIR_IN, value, index, data, size, 2000); if (ret < 0) { printk(KERN_INFO "m920x_read = error: %d\n", ret); return ret; } if (ret != size) { deb("m920x_read = no data\n"); return -EIO; } return 0; } static inline int m920x_write(struct usb_device *udev, u8 request, u16 value, u16 index) { int ret; ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, NULL, 0, 2000); return ret; } static int m920x_init(struct dvb_usb_device *d, struct m920x_inits *rc_seq) { int ret = 0, i, epi, flags = 0; int adap_enabled[M9206_MAX_ADAPTERS] = { 0 }; /* Remote controller init. */ if (d->props.rc.legacy.rc_query) { deb("Initialising remote control\n"); while (rc_seq->address) { if ((ret = m920x_write(d->udev, M9206_CORE, rc_seq->data, rc_seq->address)) != 0) { deb("Initialising remote control failed\n"); return ret; } rc_seq++; } deb("Initialising remote control success\n"); } for (i = 0; i < d->props.num_adapters; i++) flags |= d->adapter[i].props.caps; /* Some devices(Dposh) might crash if we attempt touch at all. */ if (flags & DVB_USB_ADAP_HAS_PID_FILTER) { for (i = 0; i < d->props.num_adapters; i++) { epi = d->adapter[i].props.stream.endpoint - 0x81; if (epi < 0 || epi >= M9206_MAX_ADAPTERS) { printk(KERN_INFO "m920x: Unexpected adapter endpoint!\n"); return -EINVAL; } adap_enabled[epi] = 1; } for (i = 0; i < M9206_MAX_ADAPTERS; i++) { if (adap_enabled[i]) continue; if ((ret = m920x_set_filter(d, 0x81 + i, 0, 0x0)) != 0) return ret; if ((ret = m920x_set_filter(d, 0x81 + i, 0, 0x02f5)) != 0) return ret; } } return ret; } static int m920x_init_ep(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *alt; if ((alt = usb_altnum_to_altsetting(intf, 1)) == NULL) { deb("No alt found!\n"); return -ENODEV; } return usb_set_interface(udev, alt->desc.bInterfaceNumber, alt->desc.bAlternateSetting); } static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { struct m920x_state *m = d->priv; int i, ret = 0; u8 *rc_state; rc_state = kmalloc(2, GFP_KERNEL); if (!rc_state) return -ENOMEM; if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE, rc_state, 1)) != 0) goto out; if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY, rc_state + 1, 1)) != 0) goto out; for (i = 0; i < d->props.rc.legacy.rc_map_size; i++) if (rc5_data(&d->props.rc.legacy.rc_map_table[i]) == rc_state[1]) { *event = d->props.rc.legacy.rc_map_table[i].keycode; switch(rc_state[0]) { case 0x80: *state = REMOTE_NO_KEY_PRESSED; goto out; case 0x88: /* framing error or "invalid code" */ case 0x99: case 0xc0: case 0xd8: *state = REMOTE_NO_KEY_PRESSED; m->rep_count = 0; goto out; case 0x93: case 0x92: case 0x83: /* pinnacle PCTV310e */ case 0x82: m->rep_count = 0; *state = REMOTE_KEY_PRESSED; goto out; case 0x91: case 0x81: /* pinnacle PCTV310e */ /* prevent immediate auto-repeat */ if (++m->rep_count > 2) *state = REMOTE_KEY_REPEAT; else *state = REMOTE_NO_KEY_PRESSED; goto out; default: deb("Unexpected rc state %02x\n", rc_state[0]); *state = REMOTE_NO_KEY_PRESSED; goto out; } } if (rc_state[1] != 0) deb("Unknown rc key %02x\n", rc_state[1]); *state = REMOTE_NO_KEY_PRESSED; out: kfree(rc_state); return ret; } /* I2C */ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i, j; int ret = 0; if (!num) return -EINVAL; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { if (msg[i].flags & (I2C_M_NO_RD_ACK | I2C_M_IGNORE_NAK | I2C_M_TEN) || msg[i].len == 0) { /* For a 0 byte message, I think sending the address * to index 0x80|0x40 would be the correct thing to * do. However, zero byte messages are only used for * probing, and since we don't know how to get the * slave's ack, we can't probe. */ ret = -ENOTSUPP; goto unlock; } /* Send START & address/RW bit */ if (!(msg[i].flags & I2C_M_NOSTART)) { if ((ret = m920x_write(d->udev, M9206_I2C, (msg[i].addr << 1) | (msg[i].flags & I2C_M_RD ? 0x01 : 0), 0x80)) != 0) goto unlock; /* Should check for ack here, if we knew how. */ } if (msg[i].flags & I2C_M_RD) { for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? * Send STOP, otherwise send ACK. */ int stop = (i+1 == num && j+1 == msg[i].len) ? 0x40 : 0x01; if ((ret = m920x_read(d->udev, M9206_I2C, 0x0, 0x20 | stop, &msg[i].buf[j], 1)) != 0) goto unlock; } } else { for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? Then send STOP. */ int stop = (i+1 == num && j+1 == msg[i].len) ? 0x40 : 0x00; if ((ret = m920x_write(d->udev, M9206_I2C, msg[i].buf[j], stop)) != 0) goto unlock; /* Should check for ack here too. */ } } } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static u32 m920x_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm m920x_i2c_algo = { .master_xfer = m920x_i2c_xfer, .functionality = m920x_i2c_func, }; /* pid filter */ static int m920x_set_filter(struct dvb_usb_device *d, int type, int idx, int pid) { int ret = 0; if (pid >= 0x8000) return -EINVAL; pid |= 0x8000; if ((ret = m920x_write(d->udev, M9206_FILTER, pid, (type << 8) | (idx * 4) )) != 0) return ret; if ((ret = m920x_write(d->udev, M9206_FILTER, 0, (type << 8) | (idx * 4) )) != 0) return ret; return ret; } static int m920x_update_filters(struct dvb_usb_adapter *adap) { struct m920x_state *m = adap->dev->priv; int enabled = m->filtering_enabled[adap->id]; int i, ret = 0, filter = 0; int ep = adap->props.stream.endpoint; for (i = 0; i < M9206_MAX_FILTERS; i++) if (m->filters[adap->id][i] == 8192) enabled = 0; /* Disable all filters */ if ((ret = m920x_set_filter(adap->dev, ep, 1, enabled)) != 0) return ret; for (i = 0; i < M9206_MAX_FILTERS; i++) if ((ret = m920x_set_filter(adap->dev, ep, i + 2, 0)) != 0) return ret; /* Set */ if (enabled) { for (i = 0; i < M9206_MAX_FILTERS; i++) { if (m->filters[adap->id][i] == 0) continue; if ((ret = m920x_set_filter(adap->dev, ep, filter + 2, m->filters[adap->id][i])) != 0) return ret; filter++; } } return ret; } static int m920x_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct m920x_state *m = adap->dev->priv; m->filtering_enabled[adap->id] = onoff ? 1 : 0; return m920x_update_filters(adap); } static int m920x_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { struct m920x_state *m = adap->dev->priv; m->filters[adap->id][index] = onoff ? pid : 0; return m920x_update_filters(adap); } static int m920x_firmware_download(struct usb_device *udev, const struct firmware *fw) { u16 value, index, size; u8 *read, *buff; int i, pass, ret = 0; buff = kmalloc(65536, GFP_KERNEL); if (buff == NULL) return -ENOMEM; read = kmalloc(4, GFP_KERNEL); if (!read) { kfree(buff); return -ENOMEM; } if ((ret = m920x_read(udev, M9206_FILTER, 0x0, 0x8000, read, 4)) != 0) goto done; deb("%x %x %x %x\n", read[0], read[1], read[2], read[3]); if ((ret = m920x_read(udev, M9206_FW, 0x0, 0x0, read, 1)) != 0) goto done; deb("%x\n", read[0]); for (pass = 0; pass < 2; pass++) { for (i = 0; i + (sizeof(u16) * 3) < fw->size;) { value = get_unaligned_le16(fw->data + i); i += sizeof(u16); index = get_unaligned_le16(fw->data + i); i += sizeof(u16); size = get_unaligned_le16(fw->data + i); i += sizeof(u16); if (pass == 1) { /* Will stall if using fw->data ... */ memcpy(buff, fw->data + i, size); ret = usb_control_msg(udev, usb_sndctrlpipe(udev,0), M9206_FW, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, buff, size, 20); if (ret != size) { deb("error while uploading fw!\n"); ret = -EIO; goto done; } msleep(3); } i += size; } if (i != fw->size) { deb("bad firmware file!\n"); ret = -EINVAL; goto done; } } msleep(36); /* m920x will disconnect itself from the bus after this. */ (void) m920x_write(udev, M9206_CORE, 0x01, M9206_FW_GO); deb("firmware uploaded!\n"); done: kfree(read); kfree(buff); return ret; } /* Callbacks for DVB USB */ static int m920x_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { struct usb_host_interface *alt; alt = usb_altnum_to_altsetting(usb_ifnum_to_if(udev, 0), 1); *cold = (alt == NULL) ? 1 : 0; return 0; } /* demod configurations */ static int m920x_mt352_demod_init(struct dvb_frontend *fe) { int ret; u8 config[] = { CONFIG, 0x3d }; u8 clock[] = { CLOCK_CTL, 0x30 }; u8 reset[] = { RESET, 0x80 }; u8 adc_ctl[] = { ADC_CTL_1, 0x40 }; u8 agc[] = { AGC_TARGET, 0x1c, 0x20 }; u8 sec_agc[] = { 0x69, 0x00, 0xff, 0xff, 0x40, 0xff, 0x00, 0x40, 0x40 }; u8 unk1[] = { 0x93, 0x1a }; u8 unk2[] = { 0xb5, 0x7a }; deb("Demod init!\n"); if ((ret = mt352_write(fe, config, ARRAY_SIZE(config))) != 0) return ret; if ((ret = mt352_write(fe, clock, ARRAY_SIZE(clock))) != 0) return ret; if ((ret = mt352_write(fe, reset, ARRAY_SIZE(reset))) != 0) return ret; if ((ret = mt352_write(fe, adc_ctl, ARRAY_SIZE(adc_ctl))) != 0) return ret; if ((ret = mt352_write(fe, agc, ARRAY_SIZE(agc))) != 0) return ret; if ((ret = mt352_write(fe, sec_agc, ARRAY_SIZE(sec_agc))) != 0) return ret; if ((ret = mt352_write(fe, unk1, ARRAY_SIZE(unk1))) != 0) return ret; if ((ret = mt352_write(fe, unk2, ARRAY_SIZE(unk2))) != 0) return ret; return 0; } static struct mt352_config m920x_mt352_config = { .demod_address = 0x0f, .no_tuner = 1, .demod_init = m920x_mt352_demod_init, }; static struct tda1004x_config m920x_tda10046_08_config = { .demod_address = 0x08, .invert = 0, .invert_oclk = 0, .ts_mode = TDA10046_TS_SERIAL, .xtal_freq = TDA10046_XTAL_16M, .if_freq = TDA10046_FREQ_045, .agc_config = TDA10046_AGC_TDA827X, .gpio_config = TDA10046_GPTRI, .request_firmware = NULL, }; static struct tda1004x_config m920x_tda10046_0b_config = { .demod_address = 0x0b, .invert = 0, .invert_oclk = 0, .ts_mode = TDA10046_TS_SERIAL, .xtal_freq = TDA10046_XTAL_16M, .if_freq = TDA10046_FREQ_045, .agc_config = TDA10046_AGC_TDA827X, .gpio_config = TDA10046_GPTRI, .request_firmware = NULL, /* uses firmware EEPROM */ }; /* tuner configurations */ static struct qt1010_config m920x_qt1010_config = { .i2c_address = 0x62 }; /* Callbacks for DVB USB */ static int m920x_mt352_frontend_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if ((adap->fe = dvb_attach(mt352_attach, &m920x_mt352_config, &adap->dev->i2c_adap)) == NULL) return -EIO; return 0; } static int m920x_tda10046_08_frontend_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if ((adap->fe = dvb_attach(tda10046_attach, &m920x_tda10046_08_config, &adap->dev->i2c_adap)) == NULL) return -EIO; return 0; } static int m920x_tda10046_0b_frontend_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if ((adap->fe = dvb_attach(tda10046_attach, &m920x_tda10046_0b_config, &adap->dev->i2c_adap)) == NULL) return -EIO; return 0; } static int m920x_qt1010_tuner_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if (dvb_attach(qt1010_attach, adap->fe, &adap->dev->i2c_adap, &m920x_qt1010_config) == NULL) return -ENODEV; return 0; } static int m920x_tda8275_60_tuner_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if (dvb_attach(tda827x_attach, adap->fe, 0x60, &adap->dev->i2c_adap, NULL) == NULL) return -ENODEV; return 0; } static int m920x_tda8275_61_tuner_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if (dvb_attach(tda827x_attach, adap->fe, 0x61, &adap->dev->i2c_adap, NULL) == NULL) return -ENODEV; return 0; } static int m920x_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(simple_tuner_attach, adap->fe, &adap->dev->i2c_adap, 0x61, TUNER_PHILIPS_FMD1216ME_MK3); return 0; } /* device-specific initialization */ static struct m920x_inits megasky_rc_init [] = { { M9206_RC_INIT2, 0xa8 }, { M9206_RC_INIT1, 0x51 }, { } /* terminating entry */ }; static struct m920x_inits tvwalkertwin_rc_init [] = { { M9206_RC_INIT2, 0x00 }, { M9206_RC_INIT1, 0xef }, { 0xff28, 0x00 }, { 0xff23, 0x00 }, { 0xff21, 0x30 }, { } /* terminating entry */ }; static struct m920x_inits pinnacle310e_init[] = { /* without these the tuner don't work */ { 0xff20, 0x9b }, { 0xff22, 0x70 }, /* rc settings */ { 0xff50, 0x80 }, { M9206_RC_INIT1, 0x00 }, { M9206_RC_INIT2, 0xff }, { } /* terminating entry */ }; /* ir keymaps */ static struct rc_map_table rc_map_megasky_table[] = { { 0x0012, KEY_POWER }, { 0x001e, KEY_CYCLEWINDOWS }, /* min/max */ { 0x0002, KEY_CHANNELUP }, { 0x0005, KEY_CHANNELDOWN }, { 0x0003, KEY_VOLUMEUP }, { 0x0006, KEY_VOLUMEDOWN }, { 0x0004, KEY_MUTE }, { 0x0007, KEY_OK }, /* TS */ { 0x0008, KEY_STOP }, { 0x0009, KEY_MENU }, /* swap */ { 0x000a, KEY_REWIND }, { 0x001b, KEY_PAUSE }, { 0x001f, KEY_FASTFORWARD }, { 0x000c, KEY_RECORD }, { 0x000d, KEY_CAMERA }, /* screenshot */ { 0x000e, KEY_COFFEE }, /* "MTS" */ }; static struct rc_map_table rc_map_tvwalkertwin_table[] = { { 0x0001, KEY_ZOOM }, /* Full Screen */ { 0x0002, KEY_CAMERA }, /* snapshot */ { 0x0003, KEY_MUTE }, { 0x0004, KEY_REWIND }, { 0x0005, KEY_PLAYPAUSE }, /* Play/Pause */ { 0x0006, KEY_FASTFORWARD }, { 0x0007, KEY_RECORD }, { 0x0008, KEY_STOP }, { 0x0009, KEY_TIME }, /* Timeshift */ { 0x000c, KEY_COFFEE }, /* Recall */ { 0x000e, KEY_CHANNELUP }, { 0x0012, KEY_POWER }, { 0x0015, KEY_MENU }, /* source */ { 0x0018, KEY_CYCLEWINDOWS }, /* TWIN PIP */ { 0x001a, KEY_CHANNELDOWN }, { 0x001b, KEY_VOLUMEDOWN }, { 0x001e, KEY_VOLUMEUP }, }; static struct rc_map_table rc_map_pinnacle310e_table[] = { { 0x16, KEY_POWER }, { 0x17, KEY_FAVORITES }, { 0x0f, KEY_TEXT }, { 0x48, KEY_PROGRAM }, /* preview */ { 0x1c, KEY_EPG }, { 0x04, KEY_LIST }, /* record list */ { 0x03, KEY_1 }, { 0x01, KEY_2 }, { 0x06, KEY_3 }, { 0x09, KEY_4 }, { 0x1d, KEY_5 }, { 0x1f, KEY_6 }, { 0x0d, KEY_7 }, { 0x19, KEY_8 }, { 0x1b, KEY_9 }, { 0x15, KEY_0 }, { 0x0c, KEY_CANCEL }, { 0x4a, KEY_CLEAR }, { 0x13, KEY_BACK }, { 0x00, KEY_TAB }, { 0x4b, KEY_UP }, { 0x4e, KEY_LEFT }, { 0x52, KEY_RIGHT }, { 0x51, KEY_DOWN }, { 0x4f, KEY_ENTER }, /* could also be KEY_OK */ { 0x1e, KEY_VOLUMEUP }, { 0x0a, KEY_VOLUMEDOWN }, { 0x05, KEY_CHANNELUP }, { 0x02, KEY_CHANNELDOWN }, { 0x11, KEY_RECORD }, { 0x14, KEY_PLAY }, { 0x4c, KEY_PAUSE }, { 0x1a, KEY_STOP }, { 0x40, KEY_REWIND }, { 0x12, KEY_FASTFORWARD }, { 0x41, KEY_PREVIOUSSONG }, /* Replay */ { 0x42, KEY_NEXTSONG }, /* Skip */ { 0x54, KEY_CAMERA }, /* Capture */ /* { 0x50, KEY_SAP }, */ /* Sap */ { 0x47, KEY_CYCLEWINDOWS }, /* Pip */ { 0x4d, KEY_SCREEN }, /* FullScreen */ { 0x08, KEY_SUBTITLE }, { 0x0e, KEY_MUTE }, /* { 0x49, KEY_LR }, */ /* L/R */ { 0x07, KEY_SLEEP }, /* Hibernate */ { 0x08, KEY_VIDEO }, /* A/V */ { 0x0e, KEY_MENU }, /* Recall */ { 0x45, KEY_ZOOMIN }, { 0x46, KEY_ZOOMOUT }, { 0x18, KEY_RED }, /* Red */ { 0x53, KEY_GREEN }, /* Green */ { 0x5e, KEY_YELLOW }, /* Yellow */ { 0x5f, KEY_BLUE }, /* Blue */ }; /* DVB USB Driver stuff */ static struct dvb_usb_device_properties megasky_properties; static struct dvb_usb_device_properties digivox_mini_ii_properties; static struct dvb_usb_device_properties tvwalkertwin_properties; static struct dvb_usb_device_properties dposh_properties; static struct dvb_usb_device_properties pinnacle_pctv310e_properties; static int m920x_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *d = NULL; int ret; struct m920x_inits *rc_init_seq = NULL; int bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber; deb("Probing for m920x device at interface %d\n", bInterfaceNumber); if (bInterfaceNumber == 0) { /* Single-tuner device, or first interface on * multi-tuner device */ ret = dvb_usb_device_init(intf, &megasky_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { rc_init_seq = megasky_rc_init; goto found; } ret = dvb_usb_device_init(intf, &digivox_mini_ii_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { /* No remote control, so no rc_init_seq */ goto found; } /* This configures both tuners on the TV Walker Twin */ ret = dvb_usb_device_init(intf, &tvwalkertwin_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { rc_init_seq = tvwalkertwin_rc_init; goto found; } ret = dvb_usb_device_init(intf, &dposh_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { /* Remote controller not supported yet. */ goto found; } ret = dvb_usb_device_init(intf, &pinnacle_pctv310e_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { rc_init_seq = pinnacle310e_init; goto found; } return ret; } else { /* Another interface on a multi-tuner device */ /* The LifeView TV Walker Twin gets here, but struct * tvwalkertwin_properties already configured both * tuners, so there is nothing for us to do here */ } found: if ((ret = m920x_init_ep(intf)) < 0) return ret; if (d && (ret = m920x_init(d, rc_init_seq)) != 0) return ret; return ret; } static struct usb_device_id m920x_table [] = { { USB_DEVICE(USB_VID_MSI, USB_PID_MSI_MEGASKY580) }, { USB_DEVICE(USB_VID_ANUBIS_ELECTRONIC, USB_PID_MSI_DIGI_VOX_MINI_II) }, { USB_DEVICE(USB_VID_ANUBIS_ELECTRONIC, USB_PID_LIFEVIEW_TV_WALKER_TWIN_COLD) }, { USB_DEVICE(USB_VID_ANUBIS_ELECTRONIC, USB_PID_LIFEVIEW_TV_WALKER_TWIN_WARM) }, { USB_DEVICE(USB_VID_DPOSH, USB_PID_DPOSH_M9206_COLD) }, { USB_DEVICE(USB_VID_DPOSH, USB_PID_DPOSH_M9206_WARM) }, { USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_PINNACLE_PCTV310E) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, m920x_table); static struct dvb_usb_device_properties megasky_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-megasky-02.fw", .download_firmware = m920x_firmware_download, .rc.legacy = { .rc_interval = 100, .rc_map_table = rc_map_megasky_table, .rc_map_size = ARRAY_SIZE(rc_map_megasky_table), .rc_query = m920x_rc_query, }, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_mt352_frontend_attach, .tuner_attach = m920x_qt1010_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { "MSI Mega Sky 580 DVB-T USB2.0", { &m920x_table[0], NULL }, { NULL }, } } }; static struct dvb_usb_device_properties digivox_mini_ii_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-digivox-02.fw", .download_firmware = m920x_firmware_download, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_tda10046_08_frontend_attach, .tuner_attach = m920x_tda8275_60_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 0x4000, } } }, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { "MSI DIGI VOX mini II DVB-T USB2.0", { &m920x_table[1], NULL }, { NULL }, }, } }; /* LifeView TV Walker Twin support by Nick Andrew <nick@nick-andrew.net> * * LifeView TV Walker Twin has 1 x M9206, 2 x TDA10046, 2 x TDA8275A * TDA10046 #0 is located at i2c address 0x08 * TDA10046 #1 is located at i2c address 0x0b * TDA8275A #0 is located at i2c address 0x60 * TDA8275A #1 is located at i2c address 0x61 */ static struct dvb_usb_device_properties tvwalkertwin_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-tvwalkert.fw", .download_firmware = m920x_firmware_download, .rc.legacy = { .rc_interval = 100, .rc_map_table = rc_map_tvwalkertwin_table, .rc_map_size = ARRAY_SIZE(rc_map_tvwalkertwin_table), .rc_query = m920x_rc_query, }, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 2, .adapter = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_tda10046_08_frontend_attach, .tuner_attach = m920x_tda8275_60_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }},{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_tda10046_0b_frontend_attach, .tuner_attach = m920x_tda8275_61_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 512, } } }, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { .name = "LifeView TV Walker Twin DVB-T USB2.0", .cold_ids = { &m920x_table[2], NULL }, .warm_ids = { &m920x_table[3], NULL }, }, } }; static struct dvb_usb_device_properties dposh_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dposh-01.fw", .download_firmware = m920x_firmware_download, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ /* Hardware pid filters don't work with this device/firmware */ .frontend_attach = m920x_mt352_frontend_attach, .tuner_attach = m920x_qt1010_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { .name = "Dposh DVB-T USB2.0", .cold_ids = { &m920x_table[4], NULL }, .warm_ids = { &m920x_table[5], NULL }, }, } }; static struct dvb_usb_device_properties pinnacle_pctv310e_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .download_firmware = NULL, .rc.legacy = { .rc_interval = 100, .rc_map_table = rc_map_pinnacle310e_table, .rc_map_size = ARRAY_SIZE(rc_map_pinnacle310e_table), .rc_query = m920x_rc_query, }, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_mt352_frontend_attach, .tuner_attach = m920x_fmd1216me_tuner_attach, .stream = { .type = USB_ISOC, .count = 5, .endpoint = 0x84, .u = { .isoc = { .framesperurb = 128, .framesize = 564, .interval = 1, } } }, } }, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { "Pinnacle PCTV 310e", { &m920x_table[6], NULL }, { NULL }, } } }; static struct usb_driver m920x_driver = { .name = "dvb_usb_m920x", .probe = m920x_probe, .disconnect = dvb_usb_device_exit, .id_table = m920x_table, }; /* module stuff */ static int __init m920x_module_init(void) { int ret; if ((ret = usb_register(&m920x_driver))) { err("usb_register failed. Error number %d", ret); return ret; } return 0; } static void __exit m920x_module_exit(void) { /* deregister this driver from the USB subsystem */ usb_deregister(&m920x_driver); } module_init (m920x_module_init); module_exit (m920x_module_exit); MODULE_AUTHOR("Aapo Tahkola <aet@rasterburn.org>"); MODULE_DESCRIPTION("DVB Driver for ULI M920x"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); /* * Local variables: * c-basic-offset: 8 */
gpl-2.0
Flemmard/htc7x30-3.0
drivers/s390/block/dasd_diag.c
3030
18393
/* * File...........: linux/drivers/s390/block/dasd_diag.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Based on.......: linux/drivers/s390/block/mdisk.c * ...............: by Hartmunt Penner <hpenner@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * */ #define KMSG_COMPONENT "dasd" #include <linux/kernel_stat.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/hdreg.h> #include <linux/bio.h> #include <linux/module.h> #include <linux/init.h> #include <linux/jiffies.h> #include <asm/dasd.h> #include <asm/debug.h> #include <asm/ebcdic.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/vtoc.h> #include <asm/diag.h> #include "dasd_int.h" #include "dasd_diag.h" #define PRINTK_HEADER "dasd(diag):" MODULE_LICENSE("GPL"); /* The maximum number of blocks per request (max_blocks) is dependent on the * amount of storage that is available in the static I/O buffer for each * device. Currently each device gets 2 pages. We want to fit two requests * into the available memory so that we can immediately start the next if one * finishes. */ #define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \ sizeof(struct dasd_diag_req)) / \ sizeof(struct dasd_diag_bio)) / 2) #define DIAG_MAX_RETRIES 32 #define DIAG_TIMEOUT 50 static struct dasd_discipline dasd_diag_discipline; struct dasd_diag_private { struct dasd_diag_characteristics rdc_data; struct dasd_diag_rw_io iob; struct dasd_diag_init_io iib; blocknum_t pt_block; struct ccw_dev_id dev_id; }; struct dasd_diag_req { unsigned int block_count; struct dasd_diag_bio bio[0]; }; static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */ /* Perform DIAG250 call with block I/O parameter list iob (input and output) * and function code cmd. * In case of an exception return 3. Otherwise return result of bitwise OR of * resulting condition code and DIAG return code. */ static inline int dia250(void *iob, int cmd) { register unsigned long reg2 asm ("2") = (unsigned long) iob; typedef union { struct dasd_diag_init_io init_io; struct dasd_diag_rw_io rw_io; } addr_type; int rc; rc = 3; asm volatile( " diag 2,%2,0x250\n" "0: ipm %0\n" " srl %0,28\n" " or %0,3\n" "1:\n" EX_TABLE(0b,1b) : "+d" (rc), "=m" (*(addr_type *) iob) : "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob) : "3", "cc"); return rc; } /* Initialize block I/O to DIAG device using the specified blocksize and * block offset. On success, return zero and set end_block to contain the * number of blocks on the device minus the specified offset. Return non-zero * otherwise. */ static inline int mdsk_init_io(struct dasd_device *device, unsigned int blocksize, blocknum_t offset, blocknum_t *end_block) { struct dasd_diag_private *private; struct dasd_diag_init_io *iib; int rc; private = (struct dasd_diag_private *) device->private; iib = &private->iib; memset(iib, 0, sizeof (struct dasd_diag_init_io)); iib->dev_nr = private->dev_id.devno; iib->block_size = blocksize; iib->offset = offset; iib->flaga = DASD_DIAG_FLAGA_DEFAULT; rc = dia250(iib, INIT_BIO); if ((rc & 3) == 0 && end_block) *end_block = iib->end_block; return rc; } /* Remove block I/O environment for device. Return zero on success, non-zero * otherwise. */ static inline int mdsk_term_io(struct dasd_device * device) { struct dasd_diag_private *private; struct dasd_diag_init_io *iib; int rc; private = (struct dasd_diag_private *) device->private; iib = &private->iib; memset(iib, 0, sizeof (struct dasd_diag_init_io)); iib->dev_nr = private->dev_id.devno; rc = dia250(iib, TERM_BIO); return rc; } /* Error recovery for failed DIAG requests - try to reestablish the DIAG * environment. */ static void dasd_diag_erp(struct dasd_device *device) { int rc; mdsk_term_io(device); rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); if (rc == 4) { if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags))) pr_warning("%s: The access mode of a DIAG device " "changed to read-only\n", dev_name(&device->cdev->dev)); rc = 0; } if (rc) pr_warning("%s: DIAG ERP failed with " "rc=%d\n", dev_name(&device->cdev->dev), rc); } /* Start a given request at the device. Return zero on success, non-zero * otherwise. */ static int dasd_start_diag(struct dasd_ccw_req * cqr) { struct dasd_device *device; struct dasd_diag_private *private; struct dasd_diag_req *dreq; int rc; device = cqr->startdev; if (cqr->retries < 0) { DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p " "- no retry left)", cqr); cqr->status = DASD_CQR_ERROR; return -EIO; } private = (struct dasd_diag_private *) device->private; dreq = (struct dasd_diag_req *) cqr->data; private->iob.dev_nr = private->dev_id.devno; private->iob.key = 0; private->iob.flags = DASD_DIAG_RWFLAG_ASYNC; private->iob.block_count = dreq->block_count; private->iob.interrupt_params = (addr_t) cqr; private->iob.bio_list = dreq->bio; private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; cqr->startclk = get_clock(); cqr->starttime = jiffies; cqr->retries--; rc = dia250(&private->iob, RW_BIO); switch (rc) { case 0: /* Synchronous I/O finished successfully */ cqr->stopclk = get_clock(); cqr->status = DASD_CQR_SUCCESS; /* Indicate to calling function that only a dasd_schedule_bh() and no timer is needed */ rc = -EACCES; break; case 8: /* Asynchronous I/O was started */ cqr->status = DASD_CQR_IN_IO; rc = 0; break; default: /* Error condition */ cqr->status = DASD_CQR_QUEUED; DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc); dasd_diag_erp(device); rc = -EIO; break; } cqr->intrc = rc; return rc; } /* Terminate given request at the device. */ static int dasd_diag_term_IO(struct dasd_ccw_req * cqr) { struct dasd_device *device; device = cqr->startdev; mdsk_term_io(device); mdsk_init_io(device, device->block->bp_block, 0, NULL); cqr->status = DASD_CQR_CLEAR_PENDING; cqr->stopclk = get_clock(); dasd_schedule_device_bh(device); return 0; } /* Handle external interruption. */ static void dasd_ext_handler(unsigned int ext_int_code, unsigned int param32, unsigned long param64) { struct dasd_ccw_req *cqr, *next; struct dasd_device *device; unsigned long long expires; unsigned long flags; addr_t ip; int rc; switch (ext_int_code >> 24) { case DASD_DIAG_CODE_31BIT: ip = (addr_t) param32; break; case DASD_DIAG_CODE_64BIT: ip = (addr_t) param64; break; default: return; } kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++; if (!ip) { /* no intparm: unsolicited interrupt */ DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited " "interrupt"); return; } cqr = (struct dasd_ccw_req *) ip; device = (struct dasd_device *) cqr->startdev; if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { DBF_DEV_EVENT(DBF_WARNING, device, " magic number of dasd_ccw_req 0x%08X doesn't" " match discipline 0x%08X", cqr->magic, *(int *) (&device->discipline->name)); return; } /* get irq lock to modify request queue */ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); /* Check for a pending clear operation */ if (cqr->status == DASD_CQR_CLEAR_PENDING) { cqr->status = DASD_CQR_CLEARED; dasd_device_clear_timer(device); dasd_schedule_device_bh(device); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); return; } cqr->stopclk = get_clock(); expires = 0; if ((ext_int_code & 0xff0000) == 0) { cqr->status = DASD_CQR_SUCCESS; /* Start first request on queue if possible -> fast_io. */ if (!list_empty(&device->ccw_queue)) { next = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if (next->status == DASD_CQR_QUEUED) { rc = dasd_start_diag(next); if (rc == 0) expires = next->expires; } } } else { cqr->status = DASD_CQR_QUEUED; DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " "request %p was %d (%d retries left)", cqr, (ext_int_code >> 16) & 0xff, cqr->retries); dasd_diag_erp(device); } if (expires != 0) dasd_device_set_timer(device, expires); else dasd_device_clear_timer(device); dasd_schedule_device_bh(device); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); } /* Check whether device can be controlled by DIAG discipline. Return zero on * success, non-zero otherwise. */ static int dasd_diag_check_device(struct dasd_device *device) { struct dasd_block *block; struct dasd_diag_private *private; struct dasd_diag_characteristics *rdc_data; struct dasd_diag_bio bio; struct vtoc_cms_label *label; blocknum_t end_block; unsigned int sb, bsize; int rc; private = (struct dasd_diag_private *) device->private; if (private == NULL) { private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL); if (private == NULL) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Allocating memory for private DASD data " "failed\n"); return -ENOMEM; } ccw_device_get_id(device->cdev, &private->dev_id); device->private = (void *) private; } block = dasd_alloc_block(); if (IS_ERR(block)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "could not allocate dasd block structure"); device->private = NULL; kfree(private); return PTR_ERR(block); } device->block = block; block->base = device; /* Read Device Characteristics */ rdc_data = (void *) &(private->rdc_data); rdc_data->dev_nr = private->dev_id.devno; rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics); rc = diag210((struct diag210 *) rdc_data); if (rc) { DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device " "information (rc=%d)", rc); rc = -EOPNOTSUPP; goto out; } device->default_expires = DIAG_TIMEOUT; /* Figure out position of label block */ switch (private->rdc_data.vdev_class) { case DEV_CLASS_FBA: private->pt_block = 1; break; case DEV_CLASS_ECKD: private->pt_block = 2; break; default: pr_warning("%s: Device type %d is not supported " "in DIAG mode\n", dev_name(&device->cdev->dev), private->rdc_data.vdev_class); rc = -EOPNOTSUPP; goto out; } DBF_DEV_EVENT(DBF_INFO, device, "%04X: %04X on real %04X/%02X", rdc_data->dev_nr, rdc_data->vdev_type, rdc_data->rdev_type, rdc_data->rdev_model); /* terminate all outstanding operations */ mdsk_term_io(device); /* figure out blocksize of device */ label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL); if (label == NULL) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "No memory to allocate initialization request"); rc = -ENOMEM; goto out; } rc = 0; end_block = 0; /* try all sizes - needed for ECKD devices */ for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) { mdsk_init_io(device, bsize, 0, &end_block); memset(&bio, 0, sizeof (struct dasd_diag_bio)); bio.type = MDSK_READ_REQ; bio.block_number = private->pt_block + 1; bio.buffer = label; memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io)); private->iob.dev_nr = rdc_data->dev_nr; private->iob.key = 0; private->iob.flags = 0; /* do synchronous io */ private->iob.block_count = 1; private->iob.interrupt_params = 0; private->iob.bio_list = &bio; private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; rc = dia250(&private->iob, RW_BIO); if (rc == 3) { pr_warning("%s: A 64-bit DIAG call failed\n", dev_name(&device->cdev->dev)); rc = -EOPNOTSUPP; goto out_label; } mdsk_term_io(device); if (rc == 0) break; } if (bsize > PAGE_SIZE) { pr_warning("%s: Accessing the DASD failed because of an " "incorrect format (rc=%d)\n", dev_name(&device->cdev->dev), rc); rc = -EIO; goto out_label; } /* check for label block */ if (memcmp(label->label_id, DASD_DIAG_CMS1, sizeof(DASD_DIAG_CMS1)) == 0) { /* get formatted blocksize from label block */ bsize = (unsigned int) label->block_size; block->blocks = (unsigned long) label->block_count; } else block->blocks = end_block; block->bp_block = bsize; block->s2b_shift = 0; /* bits to shift 512 to get a block */ for (sb = 512; sb < bsize; sb = sb << 1) block->s2b_shift++; rc = mdsk_init_io(device, block->bp_block, 0, NULL); if (rc && (rc != 4)) { pr_warning("%s: DIAG initialization failed with rc=%d\n", dev_name(&device->cdev->dev), rc); rc = -EIO; } else { if (rc == 4) set_bit(DASD_FLAG_DEVICE_RO, &device->flags); pr_info("%s: New DASD with %ld byte/block, total size %ld " "KB%s\n", dev_name(&device->cdev->dev), (unsigned long) block->bp_block, (unsigned long) (block->blocks << block->s2b_shift) >> 1, (rc == 4) ? ", read-only device" : ""); rc = 0; } out_label: free_page((long) label); out: if (rc) { device->block = NULL; dasd_free_block(block); device->private = NULL; kfree(private); } return rc; } /* Fill in virtual disk geometry for device. Return zero on success, non-zero * otherwise. */ static int dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) { if (dasd_check_blocksize(block->bp_block) != 0) return -EINVAL; geo->cylinders = (block->blocks << block->s2b_shift) >> 10; geo->heads = 16; geo->sectors = 128 >> block->s2b_shift; return 0; } static dasd_erp_fn_t dasd_diag_erp_action(struct dasd_ccw_req * cqr) { return dasd_default_erp_action; } static dasd_erp_fn_t dasd_diag_erp_postaction(struct dasd_ccw_req * cqr) { return dasd_default_erp_postaction; } /* Create DASD request from block device request. Return pointer to new * request on success, ERR_PTR otherwise. */ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, struct dasd_block *block, struct request *req) { struct dasd_ccw_req *cqr; struct dasd_diag_req *dreq; struct dasd_diag_bio *dbio; struct req_iterator iter; struct bio_vec *bv; char *dst; unsigned int count, datasize; sector_t recid, first_rec, last_rec; unsigned int blksize, off; unsigned char rw_cmd; if (rq_data_dir(req) == READ) rw_cmd = MDSK_READ_REQ; else if (rq_data_dir(req) == WRITE) rw_cmd = MDSK_WRITE_REQ; else return ERR_PTR(-EINVAL); blksize = block->bp_block; /* Calculate record id of first and last block. */ first_rec = blk_rq_pos(req) >> block->s2b_shift; last_rec = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; /* Check struct bio and count the number of blocks for the request. */ count = 0; rq_for_each_segment(bv, req, iter) { if (bv->bv_len & (blksize - 1)) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); count += bv->bv_len >> (block->s2b_shift + 9); } /* Paranoia. */ if (count != last_rec - first_rec + 1) return ERR_PTR(-EINVAL); /* Build the request */ datasize = sizeof(struct dasd_diag_req) + count*sizeof(struct dasd_diag_bio); cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev); if (IS_ERR(cqr)) return cqr; dreq = (struct dasd_diag_req *) cqr->data; dreq->block_count = count; dbio = dreq->bio; recid = first_rec; rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; for (off = 0; off < bv->bv_len; off += blksize) { memset(dbio, 0, sizeof (struct dasd_diag_bio)); dbio->type = rw_cmd; dbio->block_number = recid + 1; dbio->buffer = dst; dbio++; dst += blksize; recid++; } } cqr->retries = DIAG_MAX_RETRIES; cqr->buildclk = get_clock(); if (blk_noretry_request(req) || block->base->features & DASD_FEATURE_FAILFAST) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->startdev = memdev; cqr->memdev = memdev; cqr->block = block; cqr->expires = memdev->default_expires * HZ; cqr->status = DASD_CQR_FILLED; return cqr; } /* Release DASD request. Return non-zero if request was successful, zero * otherwise. */ static int dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req) { int status; status = cqr->status == DASD_CQR_DONE; dasd_sfree_request(cqr, cqr->memdev); return status; } static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr) { cqr->status = DASD_CQR_FILLED; }; /* Fill in IOCTL data for device. */ static int dasd_diag_fill_info(struct dasd_device * device, struct dasd_information2_t * info) { struct dasd_diag_private *private; private = (struct dasd_diag_private *) device->private; info->label_block = (unsigned int) private->pt_block; info->FBA_layout = 1; info->format = DASD_FORMAT_LDL; info->characteristics_size = sizeof (struct dasd_diag_characteristics); memcpy(info->characteristics, &((struct dasd_diag_private *) device->private)->rdc_data, sizeof (struct dasd_diag_characteristics)); info->confdata_size = 0; return 0; } static void dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, struct irb *stat) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "dump sense not available for DIAG data"); } static struct dasd_discipline dasd_diag_discipline = { .owner = THIS_MODULE, .name = "DIAG", .ebcname = "DIAG", .max_blocks = DIAG_MAX_BLOCKS, .check_device = dasd_diag_check_device, .verify_path = dasd_generic_verify_path, .fill_geometry = dasd_diag_fill_geometry, .start_IO = dasd_start_diag, .term_IO = dasd_diag_term_IO, .handle_terminated_request = dasd_diag_handle_terminated_request, .erp_action = dasd_diag_erp_action, .erp_postaction = dasd_diag_erp_postaction, .build_cp = dasd_diag_build_cp, .free_cp = dasd_diag_free_cp, .dump_sense = dasd_diag_dump_sense, .fill_info = dasd_diag_fill_info, }; static int __init dasd_diag_init(void) { if (!MACHINE_IS_VM) { pr_info("Discipline %s cannot be used without z/VM\n", dasd_diag_discipline.name); return -ENODEV; } ASCEBC(dasd_diag_discipline.ebcname, 4); service_subclass_irq_register(); register_external_interrupt(0x2603, dasd_ext_handler); dasd_diag_discipline_pointer = &dasd_diag_discipline; return 0; } static void __exit dasd_diag_cleanup(void) { unregister_external_interrupt(0x2603, dasd_ext_handler); service_subclass_irq_unregister(); dasd_diag_discipline_pointer = NULL; } module_init(dasd_diag_init); module_exit(dasd_diag_cleanup);
gpl-2.0
crazyleen/linux-source-3.2
arch/mips/dec/ecc-berr.c
4566
7708
/* * Bus error event handling code for systems equipped with ECC * handling logic, i.e. DECstation/DECsystem 5000/200 (KN02), * 5000/240 (KN03), 5000/260 (KN05) and DECsystem 5900 (KN03), * 5900/260 (KN05) systems. * * Copyright (c) 2003, 2005 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/types.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/irq_regs.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/system.h> #include <asm/traps.h> #include <asm/dec/ecc.h> #include <asm/dec/kn02.h> #include <asm/dec/kn03.h> #include <asm/dec/kn05.h> static volatile u32 *kn0x_erraddr; static volatile u32 *kn0x_chksyn; static inline void dec_ecc_be_ack(void) { *kn0x_erraddr = 0; /* any write clears the IRQ */ iob(); } static int dec_ecc_be_backend(struct pt_regs *regs, int is_fixup, int invoker) { static const char excstr[] = "exception"; static const char intstr[] = "interrupt"; static const char cpustr[] = "CPU"; static const char dmastr[] = "DMA"; static const char readstr[] = "read"; static const char mreadstr[] = "memory read"; static const char writestr[] = "write"; static const char mwritstr[] = "partial memory write"; static const char timestr[] = "timeout"; static const char overstr[] = "overrun"; static const char eccstr[] = "ECC error"; const char *kind, *agent, *cycle, *event; const char *status = "", *xbit = "", *fmt = ""; unsigned long address; u16 syn = 0, sngl; int i = 0; u32 erraddr = *kn0x_erraddr; u32 chksyn = *kn0x_chksyn; int action = MIPS_BE_FATAL; /* For non-ECC ack ASAP, so that any subsequent errors get caught. */ if ((erraddr & (KN0X_EAR_VALID | KN0X_EAR_ECCERR)) == KN0X_EAR_VALID) dec_ecc_be_ack(); kind = invoker ? intstr : excstr; if (!(erraddr & KN0X_EAR_VALID)) { /* No idea what happened. */ printk(KERN_ALERT "Unidentified bus error %s\n", kind); return action; } agent = (erraddr & KN0X_EAR_CPU) ? cpustr : dmastr; if (erraddr & KN0X_EAR_ECCERR) { /* An ECC error on a CPU or DMA transaction. */ cycle = (erraddr & KN0X_EAR_WRITE) ? mwritstr : mreadstr; event = eccstr; } else { /* A CPU timeout or a DMA overrun. */ cycle = (erraddr & KN0X_EAR_WRITE) ? writestr : readstr; event = (erraddr & KN0X_EAR_CPU) ? timestr : overstr; } address = erraddr & KN0X_EAR_ADDRESS; /* For ECC errors on reads adjust for MT pipelining. */ if ((erraddr & (KN0X_EAR_WRITE | KN0X_EAR_ECCERR)) == KN0X_EAR_ECCERR) address = (address & ~0xfffLL) | ((address - 5) & 0xfffLL); address <<= 2; /* Only CPU errors are fixable. */ if (erraddr & KN0X_EAR_CPU && is_fixup) action = MIPS_BE_FIXUP; if (erraddr & KN0X_EAR_ECCERR) { static const u8 data_sbit[32] = { 0x4f, 0x4a, 0x52, 0x54, 0x57, 0x58, 0x5b, 0x5d, 0x23, 0x25, 0x26, 0x29, 0x2a, 0x2c, 0x31, 0x34, 0x0e, 0x0b, 0x13, 0x15, 0x16, 0x19, 0x1a, 0x1c, 0x62, 0x64, 0x67, 0x68, 0x6b, 0x6d, 0x70, 0x75, }; static const u8 data_mbit[25] = { 0x07, 0x0d, 0x1f, 0x2f, 0x32, 0x37, 0x38, 0x3b, 0x3d, 0x3e, 0x43, 0x45, 0x46, 0x49, 0x4c, 0x51, 0x5e, 0x61, 0x6e, 0x73, 0x76, 0x79, 0x7a, 0x7c, 0x7f, }; static const char sbestr[] = "corrected single"; static const char dbestr[] = "uncorrectable double"; static const char mbestr[] = "uncorrectable multiple"; if (!(address & 0x4)) syn = chksyn; /* Low bank. */ else syn = chksyn >> 16; /* High bank. */ if (!(syn & KN0X_ESR_VLDLO)) { /* Ack now, no rewrite will happen. */ dec_ecc_be_ack(); fmt = KERN_ALERT "%s" "invalid\n"; } else { sngl = syn & KN0X_ESR_SNGLO; syn &= KN0X_ESR_SYNLO; /* * Multibit errors may be tagged incorrectly; * check the syndrome explicitly. */ for (i = 0; i < 25; i++) if (syn == data_mbit[i]) break; if (i < 25) { status = mbestr; } else if (!sngl) { status = dbestr; } else { volatile u32 *ptr = (void *)CKSEG1ADDR(address); *ptr = *ptr; /* Rewrite. */ iob(); status = sbestr; action = MIPS_BE_DISCARD; } /* Ack now, now we've rewritten (or not). */ dec_ecc_be_ack(); if (syn && syn == (syn & -syn)) { if (syn == 0x01) { fmt = KERN_ALERT "%s" "%#04x -- %s bit error " "at check bit C%s\n"; xbit = "X"; } else { fmt = KERN_ALERT "%s" "%#04x -- %s bit error " "at check bit C%s%u\n"; } i = syn >> 2; } else { for (i = 0; i < 32; i++) if (syn == data_sbit[i]) break; if (i < 32) fmt = KERN_ALERT "%s" "%#04x -- %s bit error " "at data bit D%s%u\n"; else fmt = KERN_ALERT "%s" "%#04x -- %s bit error\n"; } } } if (action != MIPS_BE_FIXUP) printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n", kind, agent, cycle, event, address); if (action != MIPS_BE_FIXUP && erraddr & KN0X_EAR_ECCERR) printk(fmt, " ECC syndrome ", syn, status, xbit, i); return action; } int dec_ecc_be_handler(struct pt_regs *regs, int is_fixup) { return dec_ecc_be_backend(regs, is_fixup, 0); } irqreturn_t dec_ecc_be_interrupt(int irq, void *dev_id) { struct pt_regs *regs = get_irq_regs(); int action = dec_ecc_be_backend(regs, 0, 1); if (action == MIPS_BE_DISCARD) return IRQ_HANDLED; /* * FIXME: Find the affected processes and kill them, otherwise * we must die. * * The interrupt is asynchronously delivered thus EPC and RA * may be irrelevant, but are printed for a reference. */ printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n", regs->cp0_epc, regs->regs[31]); die("Unrecoverable bus error", regs); } /* * Initialization differs a bit between KN02 and KN03/KN05, so we * need two variants. Once set up, all systems can be handled the * same way. */ static inline void dec_kn02_be_init(void) { volatile u32 *csr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); kn0x_erraddr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_ERRADDR); kn0x_chksyn = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CHKSYN); /* Preset write-only bits of the Control Register cache. */ cached_kn02_csr = *csr | KN02_CSR_LEDS; /* Set normal ECC detection and generation. */ cached_kn02_csr &= ~(KN02_CSR_DIAGCHK | KN02_CSR_DIAGGEN); /* Enable ECC correction. */ cached_kn02_csr |= KN02_CSR_CORRECT; *csr = cached_kn02_csr; iob(); } static inline void dec_kn03_be_init(void) { volatile u32 *mcr = (void *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_MCR); volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR); kn0x_erraddr = (void *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_ERRADDR); kn0x_chksyn = (void *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_CHKSYN); /* * Set normal ECC detection and generation, enable ECC correction. * For KN05 we also need to make sure EE (?) is enabled in the MB. * Otherwise DBE/IBE exceptions would be masked but bus error * interrupts would still arrive, resulting in an inevitable crash * if get_dbe() triggers one. */ *mcr = (*mcr & ~(KN03_MCR_DIAGCHK | KN03_MCR_DIAGGEN)) | KN03_MCR_CORRECT; if (current_cpu_type() == CPU_R4400SC) *mbcs |= KN4K_MB_CSR_EE; fast_iob(); } void __init dec_ecc_be_init(void) { if (mips_machtype == MACH_DS5000_200) dec_kn02_be_init(); else dec_kn03_be_init(); /* Clear any leftover errors from the firmware. */ dec_ecc_be_ack(); }
gpl-2.0
tcreech/tilegx-linux-3.4.68-politestackrehome
fs/ubifs/tnc.c
4822
89184
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file implements TNC (Tree Node Cache) which caches indexing nodes of * the UBIFS B-tree. * * At the moment the locking rules of the TNC tree are quite simple and * straightforward. We just have a mutex and lock it when we traverse the * tree. If a znode is not in memory, we read it from flash while still having * the mutex locked. */ #include <linux/crc32.h> #include <linux/slab.h> #include "ubifs.h" /* * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. * @NAME_LESS: name corresponding to the first argument is less than second * @NAME_MATCHES: names match * @NAME_GREATER: name corresponding to the second argument is greater than * first * @NOT_ON_MEDIA: node referred by zbranch does not exist on the media * * These constants were introduce to improve readability. */ enum { NAME_LESS = 0, NAME_MATCHES = 1, NAME_GREATER = 2, NOT_ON_MEDIA = 3, }; /** * insert_old_idx - record an index node obsoleted since the last commit start. * @c: UBIFS file-system description object * @lnum: LEB number of obsoleted index node * @offs: offset of obsoleted index node * * Returns %0 on success, and a negative error code on failure. * * For recovery, there must always be a complete intact version of the index on * flash at all times. That is called the "old index". It is the index as at the * time of the last successful commit. Many of the index nodes in the old index * may be dirty, but they must not be erased until the next successful commit * (at which point that index becomes the old index). * * That means that the garbage collection and the in-the-gaps method of * committing must be able to determine if an index node is in the old index. * Most of the old index nodes can be found by looking up the TNC using the * 'lookup_znode()' function. However, some of the old index nodes may have * been deleted from the current index or may have been changed so much that * they cannot be easily found. In those cases, an entry is added to an RB-tree. * That is what this function does. The RB-tree is ordered by LEB number and * offset because they uniquely identify the old index node. */ static int insert_old_idx(struct ubifs_info *c, int lnum, int offs) { struct ubifs_old_idx *old_idx, *o; struct rb_node **p, *parent = NULL; old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS); if (unlikely(!old_idx)) return -ENOMEM; old_idx->lnum = lnum; old_idx->offs = offs; p = &c->old_idx.rb_node; while (*p) { parent = *p; o = rb_entry(parent, struct ubifs_old_idx, rb); if (lnum < o->lnum) p = &(*p)->rb_left; else if (lnum > o->lnum) p = &(*p)->rb_right; else if (offs < o->offs) p = &(*p)->rb_left; else if (offs > o->offs) p = &(*p)->rb_right; else { ubifs_err("old idx added twice!"); kfree(old_idx); return 0; } } rb_link_node(&old_idx->rb, parent, p); rb_insert_color(&old_idx->rb, &c->old_idx); return 0; } /** * insert_old_idx_znode - record a znode obsoleted since last commit start. * @c: UBIFS file-system description object * @znode: znode of obsoleted index node * * Returns %0 on success, and a negative error code on failure. */ int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode) { if (znode->parent) { struct ubifs_zbranch *zbr; zbr = &znode->parent->zbranch[znode->iip]; if (zbr->len) return insert_old_idx(c, zbr->lnum, zbr->offs); } else if (c->zroot.len) return insert_old_idx(c, c->zroot.lnum, c->zroot.offs); return 0; } /** * ins_clr_old_idx_znode - record a znode obsoleted since last commit start. * @c: UBIFS file-system description object * @znode: znode of obsoleted index node * * Returns %0 on success, and a negative error code on failure. */ static int ins_clr_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode) { int err; if (znode->parent) { struct ubifs_zbranch *zbr; zbr = &znode->parent->zbranch[znode->iip]; if (zbr->len) { err = insert_old_idx(c, zbr->lnum, zbr->offs); if (err) return err; zbr->lnum = 0; zbr->offs = 0; zbr->len = 0; } } else if (c->zroot.len) { err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs); if (err) return err; c->zroot.lnum = 0; c->zroot.offs = 0; c->zroot.len = 0; } return 0; } /** * destroy_old_idx - destroy the old_idx RB-tree. * @c: UBIFS file-system description object * * During start commit, the old_idx RB-tree is used to avoid overwriting index * nodes that were in the index last commit but have since been deleted. This * is necessary for recovery i.e. the old index must be kept intact until the * new index is successfully written. The old-idx RB-tree is used for the * in-the-gaps method of writing index nodes and is destroyed every commit. */ void destroy_old_idx(struct ubifs_info *c) { struct rb_node *this = c->old_idx.rb_node; struct ubifs_old_idx *old_idx; while (this) { if (this->rb_left) { this = this->rb_left; continue; } else if (this->rb_right) { this = this->rb_right; continue; } old_idx = rb_entry(this, struct ubifs_old_idx, rb); this = rb_parent(this); if (this) { if (this->rb_left == &old_idx->rb) this->rb_left = NULL; else this->rb_right = NULL; } kfree(old_idx); } c->old_idx = RB_ROOT; } /** * copy_znode - copy a dirty znode. * @c: UBIFS file-system description object * @znode: znode to copy * * A dirty znode being committed may not be changed, so it is copied. */ static struct ubifs_znode *copy_znode(struct ubifs_info *c, struct ubifs_znode *znode) { struct ubifs_znode *zn; zn = kmalloc(c->max_znode_sz, GFP_NOFS); if (unlikely(!zn)) return ERR_PTR(-ENOMEM); memcpy(zn, znode, c->max_znode_sz); zn->cnext = NULL; __set_bit(DIRTY_ZNODE, &zn->flags); __clear_bit(COW_ZNODE, &zn->flags); ubifs_assert(!ubifs_zn_obsolete(znode)); __set_bit(OBSOLETE_ZNODE, &znode->flags); if (znode->level != 0) { int i; const int n = zn->child_cnt; /* The children now have new parent */ for (i = 0; i < n; i++) { struct ubifs_zbranch *zbr = &zn->zbranch[i]; if (zbr->znode) zbr->znode->parent = zn; } } atomic_long_inc(&c->dirty_zn_cnt); return zn; } /** * add_idx_dirt - add dirt due to a dirty znode. * @c: UBIFS file-system description object * @lnum: LEB number of index node * @dirt: size of index node * * This function updates lprops dirty space and the new size of the index. */ static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt) { c->calc_idx_sz -= ALIGN(dirt, 8); return ubifs_add_dirt(c, lnum, dirt); } /** * dirty_cow_znode - ensure a znode is not being committed. * @c: UBIFS file-system description object * @zbr: branch of znode to check * * Returns dirtied znode on success or negative error code on failure. */ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr) { struct ubifs_znode *znode = zbr->znode; struct ubifs_znode *zn; int err; if (!ubifs_zn_cow(znode)) { /* znode is not being committed */ if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) { atomic_long_inc(&c->dirty_zn_cnt); atomic_long_dec(&c->clean_zn_cnt); atomic_long_dec(&ubifs_clean_zn_cnt); err = add_idx_dirt(c, zbr->lnum, zbr->len); if (unlikely(err)) return ERR_PTR(err); } return znode; } zn = copy_znode(c, znode); if (IS_ERR(zn)) return zn; if (zbr->len) { err = insert_old_idx(c, zbr->lnum, zbr->offs); if (unlikely(err)) return ERR_PTR(err); err = add_idx_dirt(c, zbr->lnum, zbr->len); } else err = 0; zbr->znode = zn; zbr->lnum = 0; zbr->offs = 0; zbr->len = 0; if (unlikely(err)) return ERR_PTR(err); return zn; } /** * lnc_add - add a leaf node to the leaf node cache. * @c: UBIFS file-system description object * @zbr: zbranch of leaf node * @node: leaf node * * Leaf nodes are non-index nodes directory entry nodes or data nodes. The * purpose of the leaf node cache is to save re-reading the same leaf node over * and over again. Most things are cached by VFS, however the file system must * cache directory entries for readdir and for resolving hash collisions. The * present implementation of the leaf node cache is extremely simple, and * allows for error returns that are not used but that may be needed if a more * complex implementation is created. * * Note, this function does not add the @node object to LNC directly, but * allocates a copy of the object and adds the copy to LNC. The reason for this * is that @node has been allocated outside of the TNC subsystem and will be * used with @c->tnc_mutex unlock upon return from the TNC subsystem. But LNC * may be changed at any time, e.g. freed by the shrinker. */ static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr, const void *node) { int err; void *lnc_node; const struct ubifs_dent_node *dent = node; ubifs_assert(!zbr->leaf); ubifs_assert(zbr->len != 0); ubifs_assert(is_hash_key(c, &zbr->key)); err = ubifs_validate_entry(c, dent); if (err) { dbg_dump_stack(); dbg_dump_node(c, dent); return err; } lnc_node = kmemdup(node, zbr->len, GFP_NOFS); if (!lnc_node) /* We don't have to have the cache, so no error */ return 0; zbr->leaf = lnc_node; return 0; } /** * lnc_add_directly - add a leaf node to the leaf-node-cache. * @c: UBIFS file-system description object * @zbr: zbranch of leaf node * @node: leaf node * * This function is similar to 'lnc_add()', but it does not create a copy of * @node but inserts @node to TNC directly. */ static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *node) { int err; ubifs_assert(!zbr->leaf); ubifs_assert(zbr->len != 0); err = ubifs_validate_entry(c, node); if (err) { dbg_dump_stack(); dbg_dump_node(c, node); return err; } zbr->leaf = node; return 0; } /** * lnc_free - remove a leaf node from the leaf node cache. * @zbr: zbranch of leaf node * @node: leaf node */ static void lnc_free(struct ubifs_zbranch *zbr) { if (!zbr->leaf) return; kfree(zbr->leaf); zbr->leaf = NULL; } /** * tnc_read_node_nm - read a "hashed" leaf node. * @c: UBIFS file-system description object * @zbr: key and position of the node * @node: node is returned here * * This function reads a "hashed" node defined by @zbr from the leaf node cache * (in it is there) or from the hash media, in which case the node is also * added to LNC. Returns zero in case of success or a negative negative error * code in case of failure. */ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *node) { int err; ubifs_assert(is_hash_key(c, &zbr->key)); if (zbr->leaf) { /* Read from the leaf node cache */ ubifs_assert(zbr->len != 0); memcpy(node, zbr->leaf, zbr->len); return 0; } err = ubifs_tnc_read_node(c, zbr, node); if (err) return err; /* Add the node to the leaf node cache */ err = lnc_add(c, zbr, node); return err; } /** * try_read_node - read a node if it is a node. * @c: UBIFS file-system description object * @buf: buffer to read to * @type: node type * @len: node length (not aligned) * @lnum: LEB number of node to read * @offs: offset of node to read * * This function tries to read a node of known type and length, checks it and * stores it in @buf. This function returns %1 if a node is present and %0 if * a node is not present. A negative error code is returned for I/O errors. * This function performs that same function as ubifs_read_node except that * it does not require that there is actually a node present and instead * the return code indicates if a node was read. * * Note, this function does not check CRC of data nodes if @c->no_chk_data_crc * is true (it is controlled by corresponding mount option). However, if * @c->mounting or @c->remounting_rw is true (we are mounting or re-mounting to * R/W mode), @c->no_chk_data_crc is ignored and CRC is checked. This is * because during mounting or re-mounting from R/O mode to R/W mode we may read * journal nodes (when replying the journal or doing the recovery) and the * journal nodes may potentially be corrupted, so checking is required. */ static int try_read_node(const struct ubifs_info *c, void *buf, int type, int len, int lnum, int offs) { int err, node_len; struct ubifs_ch *ch = buf; uint32_t crc, node_crc; dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); err = ubifs_leb_read(c, lnum, buf, offs, len, 1); if (err) { ubifs_err("cannot read node type %d from LEB %d:%d, error %d", type, lnum, offs, err); return err; } if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) return 0; if (ch->node_type != type) return 0; node_len = le32_to_cpu(ch->len); if (node_len != len) return 0; if (type == UBIFS_DATA_NODE && c->no_chk_data_crc && !c->mounting && !c->remounting_rw) return 1; crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); node_crc = le32_to_cpu(ch->crc); if (crc != node_crc) return 0; return 1; } /** * fallible_read_node - try to read a leaf node. * @c: UBIFS file-system description object * @key: key of node to read * @zbr: position of node * @node: node returned * * This function tries to read a node and returns %1 if the node is read, %0 * if the node is not present, and a negative error code in the case of error. */ static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_zbranch *zbr, void *node) { int ret; dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs); ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum, zbr->offs); if (ret == 1) { union ubifs_key node_key; struct ubifs_dent_node *dent = node; /* All nodes have key in the same place */ key_read(c, &dent->key, &node_key); if (keys_cmp(c, key, &node_key) != 0) ret = 0; } if (ret == 0 && c->replaying) dbg_mntk(key, "dangling branch LEB %d:%d len %d, key ", zbr->lnum, zbr->offs, zbr->len); return ret; } /** * matches_name - determine if a direntry or xattr entry matches a given name. * @c: UBIFS file-system description object * @zbr: zbranch of dent * @nm: name to match * * This function checks if xentry/direntry referred by zbranch @zbr matches name * @nm. Returns %NAME_MATCHES if it does, %NAME_LESS if the name referred by * @zbr is less than @nm, and %NAME_GREATER if it is greater than @nm. In case * of failure, a negative error code is returned. */ static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr, const struct qstr *nm) { struct ubifs_dent_node *dent; int nlen, err; /* If possible, match against the dent in the leaf node cache */ if (!zbr->leaf) { dent = kmalloc(zbr->len, GFP_NOFS); if (!dent) return -ENOMEM; err = ubifs_tnc_read_node(c, zbr, dent); if (err) goto out_free; /* Add the node to the leaf node cache */ err = lnc_add_directly(c, zbr, dent); if (err) goto out_free; } else dent = zbr->leaf; nlen = le16_to_cpu(dent->nlen); err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len)); if (err == 0) { if (nlen == nm->len) return NAME_MATCHES; else if (nlen < nm->len) return NAME_LESS; else return NAME_GREATER; } else if (err < 0) return NAME_LESS; else return NAME_GREATER; out_free: kfree(dent); return err; } /** * get_znode - get a TNC znode that may not be loaded yet. * @c: UBIFS file-system description object * @znode: parent znode * @n: znode branch slot number * * This function returns the znode or a negative error code. */ static struct ubifs_znode *get_znode(struct ubifs_info *c, struct ubifs_znode *znode, int n) { struct ubifs_zbranch *zbr; zbr = &znode->zbranch[n]; if (zbr->znode) znode = zbr->znode; else znode = ubifs_load_znode(c, zbr, znode, n); return znode; } /** * tnc_next - find next TNC entry. * @c: UBIFS file-system description object * @zn: znode is passed and returned here * @n: znode branch slot number is passed and returned here * * This function returns %0 if the next TNC entry is found, %-ENOENT if there is * no next entry, or a negative error code otherwise. */ static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n) { struct ubifs_znode *znode = *zn; int nn = *n; nn += 1; if (nn < znode->child_cnt) { *n = nn; return 0; } while (1) { struct ubifs_znode *zp; zp = znode->parent; if (!zp) return -ENOENT; nn = znode->iip + 1; znode = zp; if (nn < znode->child_cnt) { znode = get_znode(c, znode, nn); if (IS_ERR(znode)) return PTR_ERR(znode); while (znode->level != 0) { znode = get_znode(c, znode, 0); if (IS_ERR(znode)) return PTR_ERR(znode); } nn = 0; break; } } *zn = znode; *n = nn; return 0; } /** * tnc_prev - find previous TNC entry. * @c: UBIFS file-system description object * @zn: znode is returned here * @n: znode branch slot number is passed and returned here * * This function returns %0 if the previous TNC entry is found, %-ENOENT if * there is no next entry, or a negative error code otherwise. */ static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n) { struct ubifs_znode *znode = *zn; int nn = *n; if (nn > 0) { *n = nn - 1; return 0; } while (1) { struct ubifs_znode *zp; zp = znode->parent; if (!zp) return -ENOENT; nn = znode->iip - 1; znode = zp; if (nn >= 0) { znode = get_znode(c, znode, nn); if (IS_ERR(znode)) return PTR_ERR(znode); while (znode->level != 0) { nn = znode->child_cnt - 1; znode = get_znode(c, znode, nn); if (IS_ERR(znode)) return PTR_ERR(znode); } nn = znode->child_cnt - 1; break; } } *zn = znode; *n = nn; return 0; } /** * resolve_collision - resolve a collision. * @c: UBIFS file-system description object * @key: key of a directory or extended attribute entry * @zn: znode is returned here * @n: zbranch number is passed and returned here * @nm: name of the entry * * This function is called for "hashed" keys to make sure that the found key * really corresponds to the looked up node (directory or extended attribute * entry). It returns %1 and sets @zn and @n if the collision is resolved. * %0 is returned if @nm is not found and @zn and @n are set to the previous * entry, i.e. to the entry after which @nm could follow if it were in TNC. * This means that @n may be set to %-1 if the leftmost key in @zn is the * previous one. A negative error code is returned on failures. */ static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n, const struct qstr *nm) { int err; err = matches_name(c, &(*zn)->zbranch[*n], nm); if (unlikely(err < 0)) return err; if (err == NAME_MATCHES) return 1; if (err == NAME_GREATER) { /* Look left */ while (1) { err = tnc_prev(c, zn, n); if (err == -ENOENT) { ubifs_assert(*n == 0); *n = -1; return 0; } if (err < 0) return err; if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) { /* * We have found the branch after which we would * like to insert, but inserting in this znode * may still be wrong. Consider the following 3 * znodes, in the case where we are resolving a * collision with Key2. * * znode zp * ---------------------- * level 1 | Key0 | Key1 | * ----------------------- * | | * znode za | | znode zb * ------------ ------------ * level 0 | Key0 | | Key2 | * ------------ ------------ * * The lookup finds Key2 in znode zb. Lets say * there is no match and the name is greater so * we look left. When we find Key0, we end up * here. If we return now, we will insert into * znode za at slot n = 1. But that is invalid * according to the parent's keys. Key2 must * be inserted into znode zb. * * Note, this problem is not relevant for the * case when we go right, because * 'tnc_insert()' would correct the parent key. */ if (*n == (*zn)->child_cnt - 1) { err = tnc_next(c, zn, n); if (err) { /* Should be impossible */ ubifs_assert(0); if (err == -ENOENT) err = -EINVAL; return err; } ubifs_assert(*n == 0); *n = -1; } return 0; } err = matches_name(c, &(*zn)->zbranch[*n], nm); if (err < 0) return err; if (err == NAME_LESS) return 0; if (err == NAME_MATCHES) return 1; ubifs_assert(err == NAME_GREATER); } } else { int nn = *n; struct ubifs_znode *znode = *zn; /* Look right */ while (1) { err = tnc_next(c, &znode, &nn); if (err == -ENOENT) return 0; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) return 0; err = matches_name(c, &znode->zbranch[nn], nm); if (err < 0) return err; if (err == NAME_GREATER) return 0; *zn = znode; *n = nn; if (err == NAME_MATCHES) return 1; ubifs_assert(err == NAME_LESS); } } } /** * fallible_matches_name - determine if a dent matches a given name. * @c: UBIFS file-system description object * @zbr: zbranch of dent * @nm: name to match * * This is a "fallible" version of 'matches_name()' function which does not * panic if the direntry/xentry referred by @zbr does not exist on the media. * * This function checks if xentry/direntry referred by zbranch @zbr matches name * @nm. Returns %NAME_MATCHES it does, %NAME_LESS if the name referred by @zbr * is less than @nm, %NAME_GREATER if it is greater than @nm, and @NOT_ON_MEDIA * if xentry/direntry referred by @zbr does not exist on the media. A negative * error code is returned in case of failure. */ static int fallible_matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr, const struct qstr *nm) { struct ubifs_dent_node *dent; int nlen, err; /* If possible, match against the dent in the leaf node cache */ if (!zbr->leaf) { dent = kmalloc(zbr->len, GFP_NOFS); if (!dent) return -ENOMEM; err = fallible_read_node(c, &zbr->key, zbr, dent); if (err < 0) goto out_free; if (err == 0) { /* The node was not present */ err = NOT_ON_MEDIA; goto out_free; } ubifs_assert(err == 1); err = lnc_add_directly(c, zbr, dent); if (err) goto out_free; } else dent = zbr->leaf; nlen = le16_to_cpu(dent->nlen); err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len)); if (err == 0) { if (nlen == nm->len) return NAME_MATCHES; else if (nlen < nm->len) return NAME_LESS; else return NAME_GREATER; } else if (err < 0) return NAME_LESS; else return NAME_GREATER; out_free: kfree(dent); return err; } /** * fallible_resolve_collision - resolve a collision even if nodes are missing. * @c: UBIFS file-system description object * @key: key * @zn: znode is returned here * @n: branch number is passed and returned here * @nm: name of directory entry * @adding: indicates caller is adding a key to the TNC * * This is a "fallible" version of the 'resolve_collision()' function which * does not panic if one of the nodes referred to by TNC does not exist on the * media. This may happen when replaying the journal if a deleted node was * Garbage-collected and the commit was not done. A branch that refers to a node * that is not present is called a dangling branch. The following are the return * codes for this function: * o if @nm was found, %1 is returned and @zn and @n are set to the found * branch; * o if we are @adding and @nm was not found, %0 is returned; * o if we are not @adding and @nm was not found, but a dangling branch was * found, then %1 is returned and @zn and @n are set to the dangling branch; * o a negative error code is returned in case of failure. */ static int fallible_resolve_collision(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n, const struct qstr *nm, int adding) { struct ubifs_znode *o_znode = NULL, *znode = *zn; int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n; cmp = fallible_matches_name(c, &znode->zbranch[nn], nm); if (unlikely(cmp < 0)) return cmp; if (cmp == NAME_MATCHES) return 1; if (cmp == NOT_ON_MEDIA) { o_znode = znode; o_n = nn; /* * We are unlucky and hit a dangling branch straight away. * Now we do not really know where to go to find the needed * branch - to the left or to the right. Well, let's try left. */ unsure = 1; } else if (!adding) unsure = 1; /* Remove a dangling branch wherever it is */ if (cmp == NAME_GREATER || unsure) { /* Look left */ while (1) { err = tnc_prev(c, zn, n); if (err == -ENOENT) { ubifs_assert(*n == 0); *n = -1; break; } if (err < 0) return err; if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) { /* See comments in 'resolve_collision()' */ if (*n == (*zn)->child_cnt - 1) { err = tnc_next(c, zn, n); if (err) { /* Should be impossible */ ubifs_assert(0); if (err == -ENOENT) err = -EINVAL; return err; } ubifs_assert(*n == 0); *n = -1; } break; } err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm); if (err < 0) return err; if (err == NAME_MATCHES) return 1; if (err == NOT_ON_MEDIA) { o_znode = *zn; o_n = *n; continue; } if (!adding) continue; if (err == NAME_LESS) break; else unsure = 0; } } if (cmp == NAME_LESS || unsure) { /* Look right */ *zn = znode; *n = nn; while (1) { err = tnc_next(c, &znode, &nn); if (err == -ENOENT) break; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) break; err = fallible_matches_name(c, &znode->zbranch[nn], nm); if (err < 0) return err; if (err == NAME_GREATER) break; *zn = znode; *n = nn; if (err == NAME_MATCHES) return 1; if (err == NOT_ON_MEDIA) { o_znode = znode; o_n = nn; } } } /* Never match a dangling branch when adding */ if (adding || !o_znode) return 0; dbg_mntk(key, "dangling match LEB %d:%d len %d key ", o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs, o_znode->zbranch[o_n].len); *zn = o_znode; *n = o_n; return 1; } /** * matches_position - determine if a zbranch matches a given position. * @zbr: zbranch of dent * @lnum: LEB number of dent to match * @offs: offset of dent to match * * This function returns %1 if @lnum:@offs matches, and %0 otherwise. */ static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs) { if (zbr->lnum == lnum && zbr->offs == offs) return 1; else return 0; } /** * resolve_collision_directly - resolve a collision directly. * @c: UBIFS file-system description object * @key: key of directory entry * @zn: znode is passed and returned here * @n: zbranch number is passed and returned here * @lnum: LEB number of dent node to match * @offs: offset of dent node to match * * This function is used for "hashed" keys to make sure the found directory or * extended attribute entry node is what was looked for. It is used when the * flash address of the right node is known (@lnum:@offs) which makes it much * easier to resolve collisions (no need to read entries and match full * names). This function returns %1 and sets @zn and @n if the collision is * resolved, %0 if @lnum:@offs is not found and @zn and @n are set to the * previous directory entry. Otherwise a negative error code is returned. */ static int resolve_collision_directly(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n, int lnum, int offs) { struct ubifs_znode *znode; int nn, err; znode = *zn; nn = *n; if (matches_position(&znode->zbranch[nn], lnum, offs)) return 1; /* Look left */ while (1) { err = tnc_prev(c, &znode, &nn); if (err == -ENOENT) break; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) break; if (matches_position(&znode->zbranch[nn], lnum, offs)) { *zn = znode; *n = nn; return 1; } } /* Look right */ znode = *zn; nn = *n; while (1) { err = tnc_next(c, &znode, &nn); if (err == -ENOENT) return 0; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) return 0; *zn = znode; *n = nn; if (matches_position(&znode->zbranch[nn], lnum, offs)) return 1; } } /** * dirty_cow_bottom_up - dirty a znode and its ancestors. * @c: UBIFS file-system description object * @znode: znode to dirty * * If we do not have a unique key that resides in a znode, then we cannot * dirty that znode from the top down (i.e. by using lookup_level0_dirty) * This function records the path back to the last dirty ancestor, and then * dirties the znodes on that path. */ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c, struct ubifs_znode *znode) { struct ubifs_znode *zp; int *path = c->bottom_up_buf, p = 0; ubifs_assert(c->zroot.znode); ubifs_assert(znode); if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) { kfree(c->bottom_up_buf); c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int), GFP_NOFS); if (!c->bottom_up_buf) return ERR_PTR(-ENOMEM); path = c->bottom_up_buf; } if (c->zroot.znode->level) { /* Go up until parent is dirty */ while (1) { int n; zp = znode->parent; if (!zp) break; n = znode->iip; ubifs_assert(p < c->zroot.znode->level); path[p++] = n; if (!zp->cnext && ubifs_zn_dirty(znode)) break; znode = zp; } } /* Come back down, dirtying as we go */ while (1) { struct ubifs_zbranch *zbr; zp = znode->parent; if (zp) { ubifs_assert(path[p - 1] >= 0); ubifs_assert(path[p - 1] < zp->child_cnt); zbr = &zp->zbranch[path[--p]]; znode = dirty_cow_znode(c, zbr); } else { ubifs_assert(znode == c->zroot.znode); znode = dirty_cow_znode(c, &c->zroot); } if (IS_ERR(znode) || !p) break; ubifs_assert(path[p - 1] >= 0); ubifs_assert(path[p - 1] < znode->child_cnt); znode = znode->zbranch[path[p - 1]].znode; } return znode; } /** * ubifs_lookup_level0 - search for zero-level znode. * @c: UBIFS file-system description object * @key: key to lookup * @zn: znode is returned here * @n: znode branch slot number is returned here * * This function looks up the TNC tree and search for zero-level znode which * refers key @key. The found zero-level znode is returned in @zn. There are 3 * cases: * o exact match, i.e. the found zero-level znode contains key @key, then %1 * is returned and slot number of the matched branch is stored in @n; * o not exact match, which means that zero-level znode does not contain * @key, then %0 is returned and slot number of the closest branch is stored * in @n; * o @key is so small that it is even less than the lowest key of the * leftmost zero-level node, then %0 is returned and %0 is stored in @n. * * Note, when the TNC tree is traversed, some znodes may be absent, then this * function reads corresponding indexing nodes and inserts them to TNC. In * case of failure, a negative error code is returned. */ int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n) { int err, exact; struct ubifs_znode *znode; unsigned long time = get_seconds(); dbg_tnck(key, "search key "); ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY); znode = c->zroot.znode; if (unlikely(!znode)) { znode = ubifs_load_znode(c, &c->zroot, NULL, 0); if (IS_ERR(znode)) return PTR_ERR(znode); } znode->time = time; while (1) { struct ubifs_zbranch *zbr; exact = ubifs_search_zbranch(c, znode, key, n); if (znode->level == 0) break; if (*n < 0) *n = 0; zbr = &znode->zbranch[*n]; if (zbr->znode) { znode->time = time; znode = zbr->znode; continue; } /* znode is not in TNC cache, load it from the media */ znode = ubifs_load_znode(c, zbr, znode, *n); if (IS_ERR(znode)) return PTR_ERR(znode); } *zn = znode; if (exact || !is_hash_key(c, key) || *n != -1) { dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n); return exact; } /* * Here is a tricky place. We have not found the key and this is a * "hashed" key, which may collide. The rest of the code deals with * situations like this: * * | 3 | 5 | * / \ * | 3 | 5 | | 6 | 7 | (x) * * Or more a complex example: * * | 1 | 5 | * / \ * | 1 | 3 | | 5 | 8 | * \ / * | 5 | 5 | | 6 | 7 | (x) * * In the examples, if we are looking for key "5", we may reach nodes * marked with "(x)". In this case what we have do is to look at the * left and see if there is "5" key there. If there is, we have to * return it. * * Note, this whole situation is possible because we allow to have * elements which are equivalent to the next key in the parent in the * children of current znode. For example, this happens if we split a * znode like this: | 3 | 5 | 5 | 6 | 7 |, which results in something * like this: * | 3 | 5 | * / \ * | 3 | 5 | | 5 | 6 | 7 | * ^ * And this becomes what is at the first "picture" after key "5" marked * with "^" is removed. What could be done is we could prohibit * splitting in the middle of the colliding sequence. Also, when * removing the leftmost key, we would have to correct the key of the * parent node, which would introduce additional complications. Namely, * if we changed the leftmost key of the parent znode, the garbage * collector would be unable to find it (GC is doing this when GC'ing * indexing LEBs). Although we already have an additional RB-tree where * we save such changed znodes (see 'ins_clr_old_idx_znode()') until * after the commit. But anyway, this does not look easy to implement * so we did not try this. */ err = tnc_prev(c, &znode, n); if (err == -ENOENT) { dbg_tnc("found 0, lvl %d, n -1", znode->level); *n = -1; return 0; } if (unlikely(err < 0)) return err; if (keys_cmp(c, key, &znode->zbranch[*n].key)) { dbg_tnc("found 0, lvl %d, n -1", znode->level); *n = -1; return 0; } dbg_tnc("found 1, lvl %d, n %d", znode->level, *n); *zn = znode; return 1; } /** * lookup_level0_dirty - search for zero-level znode dirtying. * @c: UBIFS file-system description object * @key: key to lookup * @zn: znode is returned here * @n: znode branch slot number is returned here * * This function looks up the TNC tree and search for zero-level znode which * refers key @key. The found zero-level znode is returned in @zn. There are 3 * cases: * o exact match, i.e. the found zero-level znode contains key @key, then %1 * is returned and slot number of the matched branch is stored in @n; * o not exact match, which means that zero-level znode does not contain @key * then %0 is returned and slot number of the closed branch is stored in * @n; * o @key is so small that it is even less than the lowest key of the * leftmost zero-level node, then %0 is returned and %-1 is stored in @n. * * Additionally all znodes in the path from the root to the located zero-level * znode are marked as dirty. * * Note, when the TNC tree is traversed, some znodes may be absent, then this * function reads corresponding indexing nodes and inserts them to TNC. In * case of failure, a negative error code is returned. */ static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n) { int err, exact; struct ubifs_znode *znode; unsigned long time = get_seconds(); dbg_tnck(key, "search and dirty key "); znode = c->zroot.znode; if (unlikely(!znode)) { znode = ubifs_load_znode(c, &c->zroot, NULL, 0); if (IS_ERR(znode)) return PTR_ERR(znode); } znode = dirty_cow_znode(c, &c->zroot); if (IS_ERR(znode)) return PTR_ERR(znode); znode->time = time; while (1) { struct ubifs_zbranch *zbr; exact = ubifs_search_zbranch(c, znode, key, n); if (znode->level == 0) break; if (*n < 0) *n = 0; zbr = &znode->zbranch[*n]; if (zbr->znode) { znode->time = time; znode = dirty_cow_znode(c, zbr); if (IS_ERR(znode)) return PTR_ERR(znode); continue; } /* znode is not in TNC cache, load it from the media */ znode = ubifs_load_znode(c, zbr, znode, *n); if (IS_ERR(znode)) return PTR_ERR(znode); znode = dirty_cow_znode(c, zbr); if (IS_ERR(znode)) return PTR_ERR(znode); } *zn = znode; if (exact || !is_hash_key(c, key) || *n != -1) { dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n); return exact; } /* * See huge comment at 'lookup_level0_dirty()' what is the rest of the * code. */ err = tnc_prev(c, &znode, n); if (err == -ENOENT) { *n = -1; dbg_tnc("found 0, lvl %d, n -1", znode->level); return 0; } if (unlikely(err < 0)) return err; if (keys_cmp(c, key, &znode->zbranch[*n].key)) { *n = -1; dbg_tnc("found 0, lvl %d, n -1", znode->level); return 0; } if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) return PTR_ERR(znode); } dbg_tnc("found 1, lvl %d, n %d", znode->level, *n); *zn = znode; return 1; } /** * maybe_leb_gced - determine if a LEB may have been garbage collected. * @c: UBIFS file-system description object * @lnum: LEB number * @gc_seq1: garbage collection sequence number * * This function determines if @lnum may have been garbage collected since * sequence number @gc_seq1. If it may have been then %1 is returned, otherwise * %0 is returned. */ static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1) { int gc_seq2, gced_lnum; gced_lnum = c->gced_lnum; smp_rmb(); gc_seq2 = c->gc_seq; /* Same seq means no GC */ if (gc_seq1 == gc_seq2) return 0; /* Different by more than 1 means we don't know */ if (gc_seq1 + 1 != gc_seq2) return 1; /* * We have seen the sequence number has increased by 1. Now we need to * be sure we read the right LEB number, so read it again. */ smp_rmb(); if (gced_lnum != c->gced_lnum) return 1; /* Finally we can check lnum */ if (gced_lnum == lnum) return 1; return 0; } /** * ubifs_tnc_locate - look up a file-system node and return it and its location. * @c: UBIFS file-system description object * @key: node key to lookup * @node: the node is returned here * @lnum: LEB number is returned here * @offs: offset is returned here * * This function looks up and reads node with key @key. The caller has to make * sure the @node buffer is large enough to fit the node. Returns zero in case * of success, %-ENOENT if the node was not found, and a negative error code in * case of failure. The node location can be returned in @lnum and @offs. */ int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, void *node, int *lnum, int *offs) { int found, n, err, safely = 0, gc_seq1; struct ubifs_znode *znode; struct ubifs_zbranch zbr, *zt; again: mutex_lock(&c->tnc_mutex); found = ubifs_lookup_level0(c, key, &znode, &n); if (!found) { err = -ENOENT; goto out; } else if (found < 0) { err = found; goto out; } zt = &znode->zbranch[n]; if (lnum) { *lnum = zt->lnum; *offs = zt->offs; } if (is_hash_key(c, key)) { /* * In this case the leaf node cache gets used, so we pass the * address of the zbranch and keep the mutex locked */ err = tnc_read_node_nm(c, zt, node); goto out; } if (safely) { err = ubifs_tnc_read_node(c, zt, node); goto out; } /* Drop the TNC mutex prematurely and race with garbage collection */ zbr = znode->zbranch[n]; gc_seq1 = c->gc_seq; mutex_unlock(&c->tnc_mutex); if (ubifs_get_wbuf(c, zbr.lnum)) { /* We do not GC journal heads */ err = ubifs_tnc_read_node(c, &zbr, node); return err; } err = fallible_read_node(c, key, &zbr, node); if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) { /* * The node may have been GC'ed out from under us so try again * while keeping the TNC mutex locked. */ safely = 1; goto again; } return 0; out: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_get_bu_keys - lookup keys for bulk-read. * @c: UBIFS file-system description object * @bu: bulk-read parameters and results * * Lookup consecutive data node keys for the same inode that reside * consecutively in the same LEB. This function returns zero in case of success * and a negative error code in case of failure. * * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares * maximum possible amount of nodes for bulk-read. */ int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) { int n, err = 0, lnum = -1, uninitialized_var(offs); int uninitialized_var(len); unsigned int block = key_block(c, &bu->key); struct ubifs_znode *znode; bu->cnt = 0; bu->blk_cnt = 0; bu->eof = 0; mutex_lock(&c->tnc_mutex); /* Find first key */ err = ubifs_lookup_level0(c, &bu->key, &znode, &n); if (err < 0) goto out; if (err) { /* Key found */ len = znode->zbranch[n].len; /* The buffer must be big enough for at least 1 node */ if (len > bu->buf_len) { err = -EINVAL; goto out; } /* Add this key */ bu->zbranch[bu->cnt++] = znode->zbranch[n]; bu->blk_cnt += 1; lnum = znode->zbranch[n].lnum; offs = ALIGN(znode->zbranch[n].offs + len, 8); } while (1) { struct ubifs_zbranch *zbr; union ubifs_key *key; unsigned int next_block; /* Find next key */ err = tnc_next(c, &znode, &n); if (err) goto out; zbr = &znode->zbranch[n]; key = &zbr->key; /* See if there is another data key for this file */ if (key_inum(c, key) != key_inum(c, &bu->key) || key_type(c, key) != UBIFS_DATA_KEY) { err = -ENOENT; goto out; } if (lnum < 0) { /* First key found */ lnum = zbr->lnum; offs = ALIGN(zbr->offs + zbr->len, 8); len = zbr->len; if (len > bu->buf_len) { err = -EINVAL; goto out; } } else { /* * The data nodes must be in consecutive positions in * the same LEB. */ if (zbr->lnum != lnum || zbr->offs != offs) goto out; offs += ALIGN(zbr->len, 8); len = ALIGN(len, 8) + zbr->len; /* Must not exceed buffer length */ if (len > bu->buf_len) goto out; } /* Allow for holes */ next_block = key_block(c, key); bu->blk_cnt += (next_block - block - 1); if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) goto out; block = next_block; /* Add this key */ bu->zbranch[bu->cnt++] = *zbr; bu->blk_cnt += 1; /* See if we have room for more */ if (bu->cnt >= UBIFS_MAX_BULK_READ) goto out; if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) goto out; } out: if (err == -ENOENT) { bu->eof = 1; err = 0; } bu->gc_seq = c->gc_seq; mutex_unlock(&c->tnc_mutex); if (err) return err; /* * An enormous hole could cause bulk-read to encompass too many * page cache pages, so limit the number here. */ if (bu->blk_cnt > UBIFS_MAX_BULK_READ) bu->blk_cnt = UBIFS_MAX_BULK_READ; /* * Ensure that bulk-read covers a whole number of page cache * pages. */ if (UBIFS_BLOCKS_PER_PAGE == 1 || !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1))) return 0; if (bu->eof) { /* At the end of file we can round up */ bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1; return 0; } /* Exclude data nodes that do not make up a whole page cache page */ block = key_block(c, &bu->key) + bu->blk_cnt; block &= ~(UBIFS_BLOCKS_PER_PAGE - 1); while (bu->cnt) { if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block) break; bu->cnt -= 1; } return 0; } /** * read_wbuf - bulk-read from a LEB with a wbuf. * @wbuf: wbuf that may overlap the read * @buf: buffer into which to read * @len: read length * @lnum: LEB number from which to read * @offs: offset from which to read * * This functions returns %0 on success or a negative error code on failure. */ static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum, int offs) { const struct ubifs_info *c = wbuf->c; int rlen, overlap; dbg_io("LEB %d:%d, length %d", lnum, offs, len); ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(offs + len <= c->leb_size); spin_lock(&wbuf->lock); overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); if (!overlap) { /* We may safely unlock the write-buffer and read the data */ spin_unlock(&wbuf->lock); return ubifs_leb_read(c, lnum, buf, offs, len, 0); } /* Don't read under wbuf */ rlen = wbuf->offs - offs; if (rlen < 0) rlen = 0; /* Copy the rest from the write-buffer */ memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen); spin_unlock(&wbuf->lock); if (rlen > 0) /* Read everything that goes before write-buffer */ return ubifs_leb_read(c, lnum, buf, offs, rlen, 0); return 0; } /** * validate_data_node - validate data nodes for bulk-read. * @c: UBIFS file-system description object * @buf: buffer containing data node to validate * @zbr: zbranch of data node to validate * * This functions returns %0 on success or a negative error code on failure. */ static int validate_data_node(struct ubifs_info *c, void *buf, struct ubifs_zbranch *zbr) { union ubifs_key key1; struct ubifs_ch *ch = buf; int err, len; if (ch->node_type != UBIFS_DATA_NODE) { ubifs_err("bad node type (%d but expected %d)", ch->node_type, UBIFS_DATA_NODE); goto out_err; } err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0); if (err) { ubifs_err("expected node type %d", UBIFS_DATA_NODE); goto out; } len = le32_to_cpu(ch->len); if (len != zbr->len) { ubifs_err("bad node length %d, expected %d", len, zbr->len); goto out_err; } /* Make sure the key of the read node is correct */ key_read(c, buf + UBIFS_KEY_OFFSET, &key1); if (!keys_eq(c, &zbr->key, &key1)) { ubifs_err("bad key in node at LEB %d:%d", zbr->lnum, zbr->offs); dbg_tnck(&zbr->key, "looked for key "); dbg_tnck(&key1, "found node's key "); goto out_err; } return 0; out_err: err = -EINVAL; out: ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs); dbg_dump_node(c, buf); dbg_dump_stack(); return err; } /** * ubifs_tnc_bulk_read - read a number of data nodes in one go. * @c: UBIFS file-system description object * @bu: bulk-read parameters and results * * This functions reads and validates the data nodes that were identified by the * 'ubifs_tnc_get_bu_keys()' function. This functions returns %0 on success, * -EAGAIN to indicate a race with GC, or another negative error code on * failure. */ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu) { int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i; struct ubifs_wbuf *wbuf; void *buf; len = bu->zbranch[bu->cnt - 1].offs; len += bu->zbranch[bu->cnt - 1].len - offs; if (len > bu->buf_len) { ubifs_err("buffer too small %d vs %d", bu->buf_len, len); return -EINVAL; } /* Do the read */ wbuf = ubifs_get_wbuf(c, lnum); if (wbuf) err = read_wbuf(wbuf, bu->buf, len, lnum, offs); else err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0); /* Check for a race with GC */ if (maybe_leb_gced(c, lnum, bu->gc_seq)) return -EAGAIN; if (err && err != -EBADMSG) { ubifs_err("failed to read from LEB %d:%d, error %d", lnum, offs, err); dbg_dump_stack(); dbg_tnck(&bu->key, "key "); return err; } /* Validate the nodes read */ buf = bu->buf; for (i = 0; i < bu->cnt; i++) { err = validate_data_node(c, buf, &bu->zbranch[i]); if (err) return err; buf = buf + ALIGN(bu->zbranch[i].len, 8); } return 0; } /** * do_lookup_nm- look up a "hashed" node. * @c: UBIFS file-system description object * @key: node key to lookup * @node: the node is returned here * @nm: node name * * This function look up and reads a node which contains name hash in the key. * Since the hash may have collisions, there may be many nodes with the same * key, so we have to sequentially look to all of them until the needed one is * found. This function returns zero in case of success, %-ENOENT if the node * was not found, and a negative error code in case of failure. */ static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, void *node, const struct qstr *nm) { int found, n, err; struct ubifs_znode *znode; dbg_tnck(key, "name '%.*s' key ", nm->len, nm->name); mutex_lock(&c->tnc_mutex); found = ubifs_lookup_level0(c, key, &znode, &n); if (!found) { err = -ENOENT; goto out_unlock; } else if (found < 0) { err = found; goto out_unlock; } ubifs_assert(n >= 0); err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (unlikely(err < 0)) goto out_unlock; if (err == 0) { err = -ENOENT; goto out_unlock; } err = tnc_read_node_nm(c, &znode->zbranch[n], node); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_lookup_nm - look up a "hashed" node. * @c: UBIFS file-system description object * @key: node key to lookup * @node: the node is returned here * @nm: node name * * This function look up and reads a node which contains name hash in the key. * Since the hash may have collisions, there may be many nodes with the same * key, so we have to sequentially look to all of them until the needed one is * found. This function returns zero in case of success, %-ENOENT if the node * was not found, and a negative error code in case of failure. */ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, void *node, const struct qstr *nm) { int err, len; const struct ubifs_dent_node *dent = node; /* * We assume that in most of the cases there are no name collisions and * 'ubifs_tnc_lookup()' returns us the right direntry. */ err = ubifs_tnc_lookup(c, key, node); if (err) return err; len = le16_to_cpu(dent->nlen); if (nm->len == len && !memcmp(dent->name, nm->name, len)) return 0; /* * Unluckily, there are hash collisions and we have to iterate over * them look at each direntry with colliding name hash sequentially. */ return do_lookup_nm(c, key, node, nm); } /** * correct_parent_keys - correct parent znodes' keys. * @c: UBIFS file-system description object * @znode: znode to correct parent znodes for * * This is a helper function for 'tnc_insert()'. When the key of the leftmost * zbranch changes, keys of parent znodes have to be corrected. This helper * function is called in such situations and corrects the keys if needed. */ static void correct_parent_keys(const struct ubifs_info *c, struct ubifs_znode *znode) { union ubifs_key *key, *key1; ubifs_assert(znode->parent); ubifs_assert(znode->iip == 0); key = &znode->zbranch[0].key; key1 = &znode->parent->zbranch[0].key; while (keys_cmp(c, key, key1) < 0) { key_copy(c, key, key1); znode = znode->parent; znode->alt = 1; if (!znode->parent || znode->iip) break; key1 = &znode->parent->zbranch[0].key; } } /** * insert_zbranch - insert a zbranch into a znode. * @znode: znode into which to insert * @zbr: zbranch to insert * @n: slot number to insert to * * This is a helper function for 'tnc_insert()'. UBIFS does not allow "gaps" in * znode's array of zbranches and keeps zbranches consolidated, so when a new * zbranch has to be inserted to the @znode->zbranches[]' array at the @n-th * slot, zbranches starting from @n have to be moved right. */ static void insert_zbranch(struct ubifs_znode *znode, const struct ubifs_zbranch *zbr, int n) { int i; ubifs_assert(ubifs_zn_dirty(znode)); if (znode->level) { for (i = znode->child_cnt; i > n; i--) { znode->zbranch[i] = znode->zbranch[i - 1]; if (znode->zbranch[i].znode) znode->zbranch[i].znode->iip = i; } if (zbr->znode) zbr->znode->iip = n; } else for (i = znode->child_cnt; i > n; i--) znode->zbranch[i] = znode->zbranch[i - 1]; znode->zbranch[n] = *zbr; znode->child_cnt += 1; /* * After inserting at slot zero, the lower bound of the key range of * this znode may have changed. If this znode is subsequently split * then the upper bound of the key range may change, and furthermore * it could change to be lower than the original lower bound. If that * happens, then it will no longer be possible to find this znode in the * TNC using the key from the index node on flash. That is bad because * if it is not found, we will assume it is obsolete and may overwrite * it. Then if there is an unclean unmount, we will start using the * old index which will be broken. * * So we first mark znodes that have insertions at slot zero, and then * if they are split we add their lnum/offs to the old_idx tree. */ if (n == 0) znode->alt = 1; } /** * tnc_insert - insert a node into TNC. * @c: UBIFS file-system description object * @znode: znode to insert into * @zbr: branch to insert * @n: slot number to insert new zbranch to * * This function inserts a new node described by @zbr into znode @znode. If * znode does not have a free slot for new zbranch, it is split. Parent znodes * are splat as well if needed. Returns zero in case of success or a negative * error code in case of failure. */ static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode, struct ubifs_zbranch *zbr, int n) { struct ubifs_znode *zn, *zi, *zp; int i, keep, move, appending = 0; union ubifs_key *key = &zbr->key, *key1; ubifs_assert(n >= 0 && n <= c->fanout); /* Implement naive insert for now */ again: zp = znode->parent; if (znode->child_cnt < c->fanout) { ubifs_assert(n != c->fanout); dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level); insert_zbranch(znode, zbr, n); /* Ensure parent's key is correct */ if (n == 0 && zp && znode->iip == 0) correct_parent_keys(c, znode); return 0; } /* * Unfortunately, @znode does not have more empty slots and we have to * split it. */ dbg_tnck(key, "splitting level %d, key ", znode->level); if (znode->alt) /* * We can no longer be sure of finding this znode by key, so we * record it in the old_idx tree. */ ins_clr_old_idx_znode(c, znode); zn = kzalloc(c->max_znode_sz, GFP_NOFS); if (!zn) return -ENOMEM; zn->parent = zp; zn->level = znode->level; /* Decide where to split */ if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) { /* Try not to split consecutive data keys */ if (n == c->fanout) { key1 = &znode->zbranch[n - 1].key; if (key_inum(c, key1) == key_inum(c, key) && key_type(c, key1) == UBIFS_DATA_KEY) appending = 1; } else goto check_split; } else if (appending && n != c->fanout) { /* Try not to split consecutive data keys */ appending = 0; check_split: if (n >= (c->fanout + 1) / 2) { key1 = &znode->zbranch[0].key; if (key_inum(c, key1) == key_inum(c, key) && key_type(c, key1) == UBIFS_DATA_KEY) { key1 = &znode->zbranch[n].key; if (key_inum(c, key1) != key_inum(c, key) || key_type(c, key1) != UBIFS_DATA_KEY) { keep = n; move = c->fanout - keep; zi = znode; goto do_split; } } } } if (appending) { keep = c->fanout; move = 0; } else { keep = (c->fanout + 1) / 2; move = c->fanout - keep; } /* * Although we don't at present, we could look at the neighbors and see * if we can move some zbranches there. */ if (n < keep) { /* Insert into existing znode */ zi = znode; move += 1; keep -= 1; } else { /* Insert into new znode */ zi = zn; n -= keep; /* Re-parent */ if (zn->level != 0) zbr->znode->parent = zn; } do_split: __set_bit(DIRTY_ZNODE, &zn->flags); atomic_long_inc(&c->dirty_zn_cnt); zn->child_cnt = move; znode->child_cnt = keep; dbg_tnc("moving %d, keeping %d", move, keep); /* Move zbranch */ for (i = 0; i < move; i++) { zn->zbranch[i] = znode->zbranch[keep + i]; /* Re-parent */ if (zn->level != 0) if (zn->zbranch[i].znode) { zn->zbranch[i].znode->parent = zn; zn->zbranch[i].znode->iip = i; } } /* Insert new key and branch */ dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level); insert_zbranch(zi, zbr, n); /* Insert new znode (produced by spitting) into the parent */ if (zp) { if (n == 0 && zi == znode && znode->iip == 0) correct_parent_keys(c, znode); /* Locate insertion point */ n = znode->iip + 1; /* Tail recursion */ zbr->key = zn->zbranch[0].key; zbr->znode = zn; zbr->lnum = 0; zbr->offs = 0; zbr->len = 0; znode = zp; goto again; } /* We have to split root znode */ dbg_tnc("creating new zroot at level %d", znode->level + 1); zi = kzalloc(c->max_znode_sz, GFP_NOFS); if (!zi) return -ENOMEM; zi->child_cnt = 2; zi->level = znode->level + 1; __set_bit(DIRTY_ZNODE, &zi->flags); atomic_long_inc(&c->dirty_zn_cnt); zi->zbranch[0].key = znode->zbranch[0].key; zi->zbranch[0].znode = znode; zi->zbranch[0].lnum = c->zroot.lnum; zi->zbranch[0].offs = c->zroot.offs; zi->zbranch[0].len = c->zroot.len; zi->zbranch[1].key = zn->zbranch[0].key; zi->zbranch[1].znode = zn; c->zroot.lnum = 0; c->zroot.offs = 0; c->zroot.len = 0; c->zroot.znode = zi; zn->parent = zi; zn->iip = 1; znode->parent = zi; znode->iip = 0; return 0; } /** * ubifs_tnc_add - add a node to TNC. * @c: UBIFS file-system description object * @key: key to add * @lnum: LEB number of node * @offs: node offset * @len: node length * * This function adds a node with key @key to TNC. The node may be new or it may * obsolete some existing one. Returns %0 on success or negative error code on * failure. */ int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum, int offs, int len) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "%d:%d, len %d, key ", lnum, offs, len); found = lookup_level0_dirty(c, key, &znode, &n); if (!found) { struct ubifs_zbranch zbr; zbr.znode = NULL; zbr.lnum = lnum; zbr.offs = offs; zbr.len = len; key_copy(c, key, &zbr.key); err = tnc_insert(c, znode, &zbr, n + 1); } else if (found == 1) { struct ubifs_zbranch *zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; } else err = found; if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_replace - replace a node in the TNC only if the old node is found. * @c: UBIFS file-system description object * @key: key to add * @old_lnum: LEB number of old node * @old_offs: old node offset * @lnum: LEB number of node * @offs: node offset * @len: node length * * This function replaces a node with key @key in the TNC only if the old node * is found. This function is called by garbage collection when node are moved. * Returns %0 on success or negative error code on failure. */ int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key, int old_lnum, int old_offs, int lnum, int offs, int len) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "old LEB %d:%d, new LEB %d:%d, len %d, key ", old_lnum, old_offs, lnum, offs, len); found = lookup_level0_dirty(c, key, &znode, &n); if (found < 0) { err = found; goto out_unlock; } if (found == 1) { struct ubifs_zbranch *zbr = &znode->zbranch[n]; found = 0; if (zbr->lnum == old_lnum && zbr->offs == old_offs) { lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); if (err) goto out_unlock; zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; found = 1; } else if (is_hash_key(c, key)) { found = resolve_collision_directly(c, key, &znode, &n, old_lnum, old_offs); dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d", found, znode, n, old_lnum, old_offs); if (found < 0) { err = found; goto out_unlock; } if (found) { /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); if (err) goto out_unlock; zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; } } } if (!found) err = ubifs_add_dirt(c, lnum, len); if (!err) err = dbg_check_tnc(c, 0); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_add_nm - add a "hashed" node to TNC. * @c: UBIFS file-system description object * @key: key to add * @lnum: LEB number of node * @offs: node offset * @len: node length * @nm: node name * * This is the same as 'ubifs_tnc_add()' but it should be used with keys which * may have collisions, like directory entry keys. */ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, int lnum, int offs, int len, const struct qstr *nm) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "LEB %d:%d, name '%.*s', key ", lnum, offs, nm->len, nm->name); found = lookup_level0_dirty(c, key, &znode, &n); if (found < 0) { err = found; goto out_unlock; } if (found == 1) { if (c->replaying) found = fallible_resolve_collision(c, key, &znode, &n, nm, 1); else found = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n); if (found < 0) { err = found; goto out_unlock; } /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } if (found == 1) { struct ubifs_zbranch *zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; goto out_unlock; } } if (!found) { struct ubifs_zbranch zbr; zbr.znode = NULL; zbr.lnum = lnum; zbr.offs = offs; zbr.len = len; key_copy(c, key, &zbr.key); err = tnc_insert(c, znode, &zbr, n + 1); if (err) goto out_unlock; if (c->replaying) { /* * We did not find it in the index so there may be a * dangling branch still in the index. So we remove it * by passing 'ubifs_tnc_remove_nm()' the same key but * an unmatchable name. */ struct qstr noname = { .len = 0, .name = "" }; err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); if (err) return err; return ubifs_tnc_remove_nm(c, key, &noname); } } out_unlock: if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * tnc_delete - delete a znode form TNC. * @c: UBIFS file-system description object * @znode: znode to delete from * @n: zbranch slot number to delete * * This function deletes a leaf node from @n-th slot of @znode. Returns zero in * case of success and a negative error code in case of failure. */ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n) { struct ubifs_zbranch *zbr; struct ubifs_znode *zp; int i, err; /* Delete without merge for now */ ubifs_assert(znode->level == 0); ubifs_assert(n >= 0 && n < c->fanout); dbg_tnck(&znode->zbranch[n].key, "deleting key "); zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); if (err) { dbg_dump_znode(c, znode); return err; } /* We do not "gap" zbranch slots */ for (i = n; i < znode->child_cnt - 1; i++) znode->zbranch[i] = znode->zbranch[i + 1]; znode->child_cnt -= 1; if (znode->child_cnt > 0) return 0; /* * This was the last zbranch, we have to delete this znode from the * parent. */ do { ubifs_assert(!ubifs_zn_obsolete(znode)); ubifs_assert(ubifs_zn_dirty(znode)); zp = znode->parent; n = znode->iip; atomic_long_dec(&c->dirty_zn_cnt); err = insert_old_idx_znode(c, znode); if (err) return err; if (znode->cnext) { __set_bit(OBSOLETE_ZNODE, &znode->flags); atomic_long_inc(&c->clean_zn_cnt); atomic_long_inc(&ubifs_clean_zn_cnt); } else kfree(znode); znode = zp; } while (znode->child_cnt == 1); /* while removing last child */ /* Remove from znode, entry n - 1 */ znode->child_cnt -= 1; ubifs_assert(znode->level != 0); for (i = n; i < znode->child_cnt; i++) { znode->zbranch[i] = znode->zbranch[i + 1]; if (znode->zbranch[i].znode) znode->zbranch[i].znode->iip = i; } /* * If this is the root and it has only 1 child then * collapse the tree. */ if (!znode->parent) { while (znode->child_cnt == 1 && znode->level != 0) { zp = znode; zbr = &znode->zbranch[0]; znode = get_znode(c, znode, 0); if (IS_ERR(znode)) return PTR_ERR(znode); znode = dirty_cow_znode(c, zbr); if (IS_ERR(znode)) return PTR_ERR(znode); znode->parent = NULL; znode->iip = 0; if (c->zroot.len) { err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs); if (err) return err; } c->zroot.lnum = zbr->lnum; c->zroot.offs = zbr->offs; c->zroot.len = zbr->len; c->zroot.znode = znode; ubifs_assert(!ubifs_zn_obsolete(zp)); ubifs_assert(ubifs_zn_dirty(zp)); atomic_long_dec(&c->dirty_zn_cnt); if (zp->cnext) { __set_bit(OBSOLETE_ZNODE, &zp->flags); atomic_long_inc(&c->clean_zn_cnt); atomic_long_inc(&ubifs_clean_zn_cnt); } else kfree(zp); } } return 0; } /** * ubifs_tnc_remove - remove an index entry of a node. * @c: UBIFS file-system description object * @key: key of node * * Returns %0 on success or negative error code on failure. */ int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "key "); found = lookup_level0_dirty(c, key, &znode, &n); if (found < 0) { err = found; goto out_unlock; } if (found == 1) err = tnc_delete(c, znode, n); if (!err) err = dbg_check_tnc(c, 0); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_remove_nm - remove an index entry for a "hashed" node. * @c: UBIFS file-system description object * @key: key of node * @nm: directory entry name * * Returns %0 on success or negative error code on failure. */ int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key, const struct qstr *nm) { int n, err; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "%.*s, key ", nm->len, nm->name); err = lookup_level0_dirty(c, key, &znode, &n); if (err < 0) goto out_unlock; if (err) { if (c->replaying) err = fallible_resolve_collision(c, key, &znode, &n, nm, 0); else err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (err < 0) goto out_unlock; if (err) { /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } err = tnc_delete(c, znode, n); } } out_unlock: if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * key_in_range - determine if a key falls within a range of keys. * @c: UBIFS file-system description object * @key: key to check * @from_key: lowest key in range * @to_key: highest key in range * * This function returns %1 if the key is in range and %0 otherwise. */ static int key_in_range(struct ubifs_info *c, union ubifs_key *key, union ubifs_key *from_key, union ubifs_key *to_key) { if (keys_cmp(c, key, from_key) < 0) return 0; if (keys_cmp(c, key, to_key) > 0) return 0; return 1; } /** * ubifs_tnc_remove_range - remove index entries in range. * @c: UBIFS file-system description object * @from_key: lowest key to remove * @to_key: highest key to remove * * This function removes index entries starting at @from_key and ending at * @to_key. This function returns zero in case of success and a negative error * code in case of failure. */ int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key, union ubifs_key *to_key) { int i, n, k, err = 0; struct ubifs_znode *znode; union ubifs_key *key; mutex_lock(&c->tnc_mutex); while (1) { /* Find first level 0 znode that contains keys to remove */ err = ubifs_lookup_level0(c, from_key, &znode, &n); if (err < 0) goto out_unlock; if (err) key = from_key; else { err = tnc_next(c, &znode, &n); if (err == -ENOENT) { err = 0; goto out_unlock; } if (err < 0) goto out_unlock; key = &znode->zbranch[n].key; if (!key_in_range(c, key, from_key, to_key)) { err = 0; goto out_unlock; } } /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } /* Remove all keys in range except the first */ for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) { key = &znode->zbranch[i].key; if (!key_in_range(c, key, from_key, to_key)) break; lnc_free(&znode->zbranch[i]); err = ubifs_add_dirt(c, znode->zbranch[i].lnum, znode->zbranch[i].len); if (err) { dbg_dump_znode(c, znode); goto out_unlock; } dbg_tnck(key, "removing key "); } if (k) { for (i = n + 1 + k; i < znode->child_cnt; i++) znode->zbranch[i - k] = znode->zbranch[i]; znode->child_cnt -= k; } /* Now delete the first */ err = tnc_delete(c, znode, n); if (err) goto out_unlock; } out_unlock: if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_remove_ino - remove an inode from TNC. * @c: UBIFS file-system description object * @inum: inode number to remove * * This function remove inode @inum and all the extended attributes associated * with the anode from TNC and returns zero in case of success or a negative * error code in case of failure. */ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) { union ubifs_key key1, key2; struct ubifs_dent_node *xent, *pxent = NULL; struct qstr nm = { .name = NULL }; dbg_tnc("ino %lu", (unsigned long)inum); /* * Walk all extended attribute entries and remove them together with * corresponding extended attribute inodes. */ lowest_xent_key(c, &key1, inum); while (1) { ino_t xattr_inum; int err; xent = ubifs_tnc_next_ent(c, &key1, &nm); if (IS_ERR(xent)) { err = PTR_ERR(xent); if (err == -ENOENT) break; return err; } xattr_inum = le64_to_cpu(xent->inum); dbg_tnc("xent '%s', ino %lu", xent->name, (unsigned long)xattr_inum); nm.name = xent->name; nm.len = le16_to_cpu(xent->nlen); err = ubifs_tnc_remove_nm(c, &key1, &nm); if (err) { kfree(xent); return err; } lowest_ino_key(c, &key1, xattr_inum); highest_ino_key(c, &key2, xattr_inum); err = ubifs_tnc_remove_range(c, &key1, &key2); if (err) { kfree(xent); return err; } kfree(pxent); pxent = xent; key_read(c, &xent->key, &key1); } kfree(pxent); lowest_ino_key(c, &key1, inum); highest_ino_key(c, &key2, inum); return ubifs_tnc_remove_range(c, &key1, &key2); } /** * ubifs_tnc_next_ent - walk directory or extended attribute entries. * @c: UBIFS file-system description object * @key: key of last entry * @nm: name of last entry found or %NULL * * This function finds and reads the next directory or extended attribute entry * after the given key (@key) if there is one. @nm is used to resolve * collisions. * * If the name of the current entry is not known and only the key is known, * @nm->name has to be %NULL. In this case the semantics of this function is a * little bit different and it returns the entry corresponding to this key, not * the next one. If the key was not found, the closest "right" entry is * returned. * * If the fist entry has to be found, @key has to contain the lowest possible * key value for this inode and @name has to be %NULL. * * This function returns the found directory or extended attribute entry node * in case of success, %-ENOENT is returned if no entry was found, and a * negative error code is returned in case of failure. */ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, union ubifs_key *key, const struct qstr *nm) { int n, err, type = key_type(c, key); struct ubifs_znode *znode; struct ubifs_dent_node *dent; struct ubifs_zbranch *zbr; union ubifs_key *dkey; dbg_tnck(key, "%s ", nm->name ? (char *)nm->name : "(lowest)"); ubifs_assert(is_hash_key(c, key)); mutex_lock(&c->tnc_mutex); err = ubifs_lookup_level0(c, key, &znode, &n); if (unlikely(err < 0)) goto out_unlock; if (nm->name) { if (err) { /* Handle collisions */ err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (unlikely(err < 0)) goto out_unlock; } /* Now find next entry */ err = tnc_next(c, &znode, &n); if (unlikely(err)) goto out_unlock; } else { /* * The full name of the entry was not given, in which case the * behavior of this function is a little different and it * returns current entry, not the next one. */ if (!err) { /* * However, the given key does not exist in the TNC * tree and @znode/@n variables contain the closest * "preceding" element. Switch to the next one. */ err = tnc_next(c, &znode, &n); if (err) goto out_unlock; } } zbr = &znode->zbranch[n]; dent = kmalloc(zbr->len, GFP_NOFS); if (unlikely(!dent)) { err = -ENOMEM; goto out_unlock; } /* * The above 'tnc_next()' call could lead us to the next inode, check * this. */ dkey = &zbr->key; if (key_inum(c, dkey) != key_inum(c, key) || key_type(c, dkey) != type) { err = -ENOENT; goto out_free; } err = tnc_read_node_nm(c, zbr, dent); if (unlikely(err)) goto out_free; mutex_unlock(&c->tnc_mutex); return dent; out_free: kfree(dent); out_unlock: mutex_unlock(&c->tnc_mutex); return ERR_PTR(err); } /** * tnc_destroy_cnext - destroy left-over obsolete znodes from a failed commit. * @c: UBIFS file-system description object * * Destroy left-over obsolete znodes from a failed commit. */ static void tnc_destroy_cnext(struct ubifs_info *c) { struct ubifs_znode *cnext; if (!c->cnext) return; ubifs_assert(c->cmt_state == COMMIT_BROKEN); cnext = c->cnext; do { struct ubifs_znode *znode = cnext; cnext = cnext->cnext; if (ubifs_zn_obsolete(znode)) kfree(znode); } while (cnext && cnext != c->cnext); } /** * ubifs_tnc_close - close TNC subsystem and free all related resources. * @c: UBIFS file-system description object */ void ubifs_tnc_close(struct ubifs_info *c) { tnc_destroy_cnext(c); if (c->zroot.znode) { long n; ubifs_destroy_tnc_subtree(c->zroot.znode); n = atomic_long_read(&c->clean_zn_cnt); atomic_long_sub(n, &ubifs_clean_zn_cnt); } kfree(c->gap_lebs); kfree(c->ilebs); destroy_old_idx(c); } /** * left_znode - get the znode to the left. * @c: UBIFS file-system description object * @znode: znode * * This function returns a pointer to the znode to the left of @znode or NULL if * there is not one. A negative error code is returned on failure. */ static struct ubifs_znode *left_znode(struct ubifs_info *c, struct ubifs_znode *znode) { int level = znode->level; while (1) { int n = znode->iip - 1; /* Go up until we can go left */ znode = znode->parent; if (!znode) return NULL; if (n >= 0) { /* Now go down the rightmost branch to 'level' */ znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; while (znode->level != level) { n = znode->child_cnt - 1; znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; } break; } } return znode; } /** * right_znode - get the znode to the right. * @c: UBIFS file-system description object * @znode: znode * * This function returns a pointer to the znode to the right of @znode or NULL * if there is not one. A negative error code is returned on failure. */ static struct ubifs_znode *right_znode(struct ubifs_info *c, struct ubifs_znode *znode) { int level = znode->level; while (1) { int n = znode->iip + 1; /* Go up until we can go right */ znode = znode->parent; if (!znode) return NULL; if (n < znode->child_cnt) { /* Now go down the leftmost branch to 'level' */ znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; while (znode->level != level) { znode = get_znode(c, znode, 0); if (IS_ERR(znode)) return znode; } break; } } return znode; } /** * lookup_znode - find a particular indexing node from TNC. * @c: UBIFS file-system description object * @key: index node key to lookup * @level: index node level * @lnum: index node LEB number * @offs: index node offset * * This function searches an indexing node by its first key @key and its * address @lnum:@offs. It looks up the indexing tree by pulling all indexing * nodes it traverses to TNC. This function is called for indexing nodes which * were found on the media by scanning, for example when garbage-collecting or * when doing in-the-gaps commit. This means that the indexing node which is * looked for does not have to have exactly the same leftmost key @key, because * the leftmost key may have been changed, in which case TNC will contain a * dirty znode which still refers the same @lnum:@offs. This function is clever * enough to recognize such indexing nodes. * * Note, if a znode was deleted or changed too much, then this function will * not find it. For situations like this UBIFS has the old index RB-tree * (indexed by @lnum:@offs). * * This function returns a pointer to the znode found or %NULL if it is not * found. A negative error code is returned on failure. */ static struct ubifs_znode *lookup_znode(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs) { struct ubifs_znode *znode, *zn; int n, nn; ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY); /* * The arguments have probably been read off flash, so don't assume * they are valid. */ if (level < 0) return ERR_PTR(-EINVAL); /* Get the root znode */ znode = c->zroot.znode; if (!znode) { znode = ubifs_load_znode(c, &c->zroot, NULL, 0); if (IS_ERR(znode)) return znode; } /* Check if it is the one we are looking for */ if (c->zroot.lnum == lnum && c->zroot.offs == offs) return znode; /* Descend to the parent level i.e. (level + 1) */ if (level >= znode->level) return NULL; while (1) { ubifs_search_zbranch(c, znode, key, &n); if (n < 0) { /* * We reached a znode where the leftmost key is greater * than the key we are searching for. This is the same * situation as the one described in a huge comment at * the end of the 'ubifs_lookup_level0()' function. And * for exactly the same reasons we have to try to look * left before giving up. */ znode = left_znode(c, znode); if (!znode) return NULL; if (IS_ERR(znode)) return znode; ubifs_search_zbranch(c, znode, key, &n); ubifs_assert(n >= 0); } if (znode->level == level + 1) break; znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; } /* Check if the child is the one we are looking for */ if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs) return get_znode(c, znode, n); /* If the key is unique, there is nowhere else to look */ if (!is_hash_key(c, key)) return NULL; /* * The key is not unique and so may be also in the znodes to either * side. */ zn = znode; nn = n; /* Look left */ while (1) { /* Move one branch to the left */ if (n) n -= 1; else { znode = left_znode(c, znode); if (!znode) break; if (IS_ERR(znode)) return znode; n = znode->child_cnt - 1; } /* Check it */ if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs) return get_znode(c, znode, n); /* Stop if the key is less than the one we are looking for */ if (keys_cmp(c, &znode->zbranch[n].key, key) < 0) break; } /* Back to the middle */ znode = zn; n = nn; /* Look right */ while (1) { /* Move one branch to the right */ if (++n >= znode->child_cnt) { znode = right_znode(c, znode); if (!znode) break; if (IS_ERR(znode)) return znode; n = 0; } /* Check it */ if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs) return get_znode(c, znode, n); /* Stop if the key is greater than the one we are looking for */ if (keys_cmp(c, &znode->zbranch[n].key, key) > 0) break; } return NULL; } /** * is_idx_node_in_tnc - determine if an index node is in the TNC. * @c: UBIFS file-system description object * @key: key of index node * @level: index node level * @lnum: LEB number of index node * @offs: offset of index node * * This function returns %0 if the index node is not referred to in the TNC, %1 * if the index node is referred to in the TNC and the corresponding znode is * dirty, %2 if an index node is referred to in the TNC and the corresponding * znode is clean, and a negative error code in case of failure. * * Note, the @key argument has to be the key of the first child. Also note, * this function relies on the fact that 0:0 is never a valid LEB number and * offset for a main-area node. */ int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs) { struct ubifs_znode *znode; znode = lookup_znode(c, key, level, lnum, offs); if (!znode) return 0; if (IS_ERR(znode)) return PTR_ERR(znode); return ubifs_zn_dirty(znode) ? 1 : 2; } /** * is_leaf_node_in_tnc - determine if a non-indexing not is in the TNC. * @c: UBIFS file-system description object * @key: node key * @lnum: node LEB number * @offs: node offset * * This function returns %1 if the node is referred to in the TNC, %0 if it is * not, and a negative error code in case of failure. * * Note, this function relies on the fact that 0:0 is never a valid LEB number * and offset for a main-area node. */ static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int lnum, int offs) { struct ubifs_zbranch *zbr; struct ubifs_znode *znode, *zn; int n, found, err, nn; const int unique = !is_hash_key(c, key); found = ubifs_lookup_level0(c, key, &znode, &n); if (found < 0) return found; /* Error code */ if (!found) return 0; zbr = &znode->zbranch[n]; if (lnum == zbr->lnum && offs == zbr->offs) return 1; /* Found it */ if (unique) return 0; /* * Because the key is not unique, we have to look left * and right as well */ zn = znode; nn = n; /* Look left */ while (1) { err = tnc_prev(c, &znode, &n); if (err == -ENOENT) break; if (err) return err; if (keys_cmp(c, key, &znode->zbranch[n].key)) break; zbr = &znode->zbranch[n]; if (lnum == zbr->lnum && offs == zbr->offs) return 1; /* Found it */ } /* Look right */ znode = zn; n = nn; while (1) { err = tnc_next(c, &znode, &n); if (err) { if (err == -ENOENT) return 0; return err; } if (keys_cmp(c, key, &znode->zbranch[n].key)) break; zbr = &znode->zbranch[n]; if (lnum == zbr->lnum && offs == zbr->offs) return 1; /* Found it */ } return 0; } /** * ubifs_tnc_has_node - determine whether a node is in the TNC. * @c: UBIFS file-system description object * @key: node key * @level: index node level (if it is an index node) * @lnum: node LEB number * @offs: node offset * @is_idx: non-zero if the node is an index node * * This function returns %1 if the node is in the TNC, %0 if it is not, and a * negative error code in case of failure. For index nodes, @key has to be the * key of the first child. An index node is considered to be in the TNC only if * the corresponding znode is clean or has not been loaded. */ int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs, int is_idx) { int err; mutex_lock(&c->tnc_mutex); if (is_idx) { err = is_idx_node_in_tnc(c, key, level, lnum, offs); if (err < 0) goto out_unlock; if (err == 1) /* The index node was found but it was dirty */ err = 0; else if (err == 2) /* The index node was found and it was clean */ err = 1; else BUG_ON(err != 0); } else err = is_leaf_node_in_tnc(c, key, lnum, offs); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_dirty_idx_node - dirty an index node. * @c: UBIFS file-system description object * @key: index node key * @level: index node level * @lnum: index node LEB number * @offs: index node offset * * This function loads and dirties an index node so that it can be garbage * collected. The @key argument has to be the key of the first child. This * function relies on the fact that 0:0 is never a valid LEB number and offset * for a main-area node. Returns %0 on success and a negative error code on * failure. */ int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs) { struct ubifs_znode *znode; int err = 0; mutex_lock(&c->tnc_mutex); znode = lookup_znode(c, key, level, lnum, offs); if (!znode) goto out_unlock; if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } out_unlock: mutex_unlock(&c->tnc_mutex); return err; } #ifdef CONFIG_UBIFS_FS_DEBUG /** * dbg_check_inode_size - check if inode size is correct. * @c: UBIFS file-system description object * @inum: inode number * @size: inode size * * This function makes sure that the inode size (@size) is correct and it does * not have any pages beyond @size. Returns zero if the inode is OK, %-EINVAL * if it has a data page beyond @size, and other negative error code in case of * other errors. */ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, loff_t size) { int err, n; union ubifs_key from_key, to_key, *key; struct ubifs_znode *znode; unsigned int block; if (!S_ISREG(inode->i_mode)) return 0; if (!dbg_is_chk_gen(c)) return 0; block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; data_key_init(c, &from_key, inode->i_ino, block); highest_data_key(c, &to_key, inode->i_ino); mutex_lock(&c->tnc_mutex); err = ubifs_lookup_level0(c, &from_key, &znode, &n); if (err < 0) goto out_unlock; if (err) { err = -EINVAL; key = &from_key; goto out_dump; } err = tnc_next(c, &znode, &n); if (err == -ENOENT) { err = 0; goto out_unlock; } if (err < 0) goto out_unlock; ubifs_assert(err == 0); key = &znode->zbranch[n].key; if (!key_in_range(c, key, &from_key, &to_key)) goto out_unlock; out_dump: block = key_block(c, key); ubifs_err("inode %lu has size %lld, but there are data at offset %lld", (unsigned long)inode->i_ino, size, ((loff_t)block) << UBIFS_BLOCK_SHIFT); mutex_unlock(&c->tnc_mutex); dbg_dump_inode(c, inode); dbg_dump_stack(); return -EINVAL; out_unlock: mutex_unlock(&c->tnc_mutex); return err; } #endif /* CONFIG_UBIFS_FS_DEBUG */
gpl-2.0
xenon1978/sony_t3
drivers/mfd/tps65217.c
4822
5964
/* * tps65217.c * * TPS65217 chip family multi-function driver * * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/regmap.h> #include <linux/err.h> #include <linux/mfd/core.h> #include <linux/mfd/tps65217.h> /** * tps65217_reg_read: Read a single tps65217 register. * * @tps: Device to read from. * @reg: Register to read. * @val: Contians the value */ int tps65217_reg_read(struct tps65217 *tps, unsigned int reg, unsigned int *val) { return regmap_read(tps->regmap, reg, val); } EXPORT_SYMBOL_GPL(tps65217_reg_read); /** * tps65217_reg_write: Write a single tps65217 register. * * @tps65217: Device to write to. * @reg: Register to write to. * @val: Value to write. * @level: Password protected level */ int tps65217_reg_write(struct tps65217 *tps, unsigned int reg, unsigned int val, unsigned int level) { int ret; unsigned int xor_reg_val; switch (level) { case TPS65217_PROTECT_NONE: return regmap_write(tps->regmap, reg, val); case TPS65217_PROTECT_L1: xor_reg_val = reg ^ TPS65217_PASSWORD_REGS_UNLOCK; ret = regmap_write(tps->regmap, TPS65217_REG_PASSWORD, xor_reg_val); if (ret < 0) return ret; return regmap_write(tps->regmap, reg, val); case TPS65217_PROTECT_L2: xor_reg_val = reg ^ TPS65217_PASSWORD_REGS_UNLOCK; ret = regmap_write(tps->regmap, TPS65217_REG_PASSWORD, xor_reg_val); if (ret < 0) return ret; ret = regmap_write(tps->regmap, reg, val); if (ret < 0) return ret; ret = regmap_write(tps->regmap, TPS65217_REG_PASSWORD, xor_reg_val); if (ret < 0) return ret; return regmap_write(tps->regmap, reg, val); default: return -EINVAL; } } EXPORT_SYMBOL_GPL(tps65217_reg_write); /** * tps65217_update_bits: Modify bits w.r.t mask, val and level. * * @tps65217: Device to write to. * @reg: Register to read-write to. * @mask: Mask. * @val: Value to write. * @level: Password protected level */ int tps65217_update_bits(struct tps65217 *tps, unsigned int reg, unsigned int mask, unsigned int val, unsigned int level) { int ret; unsigned int data; ret = tps65217_reg_read(tps, reg, &data); if (ret) { dev_err(tps->dev, "Read from reg 0x%x failed\n", reg); return ret; } data &= ~mask; data |= val & mask; ret = tps65217_reg_write(tps, reg, data, level); if (ret) dev_err(tps->dev, "Write for reg 0x%x failed\n", reg); return ret; } int tps65217_set_bits(struct tps65217 *tps, unsigned int reg, unsigned int mask, unsigned int val, unsigned int level) { return tps65217_update_bits(tps, reg, mask, val, level); } EXPORT_SYMBOL_GPL(tps65217_set_bits); int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg, unsigned int mask, unsigned int level) { return tps65217_update_bits(tps, reg, mask, 0, level); } EXPORT_SYMBOL_GPL(tps65217_clear_bits); static struct regmap_config tps65217_regmap_config = { .reg_bits = 8, .val_bits = 8, }; static int __devinit tps65217_probe(struct i2c_client *client, const struct i2c_device_id *ids) { struct tps65217 *tps; struct tps65217_board *pdata = client->dev.platform_data; int i, ret; unsigned int version; tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL); if (!tps) return -ENOMEM; tps->pdata = pdata; tps->regmap = regmap_init_i2c(client, &tps65217_regmap_config); if (IS_ERR(tps->regmap)) { ret = PTR_ERR(tps->regmap); dev_err(tps->dev, "Failed to allocate register map: %d\n", ret); return ret; } i2c_set_clientdata(client, tps); tps->dev = &client->dev; ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version); if (ret < 0) { dev_err(tps->dev, "Failed to read revision" " register: %d\n", ret); goto err_regmap; } dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n", (version & TPS65217_CHIPID_CHIP_MASK) >> 4, version & TPS65217_CHIPID_REV_MASK); for (i = 0; i < TPS65217_NUM_REGULATOR; i++) { struct platform_device *pdev; pdev = platform_device_alloc("tps65217-pmic", i); if (!pdev) { dev_err(tps->dev, "Cannot create regulator %d\n", i); continue; } pdev->dev.parent = tps->dev; platform_device_add_data(pdev, &pdata->tps65217_init_data[i], sizeof(pdata->tps65217_init_data[i])); tps->regulator_pdev[i] = pdev; platform_device_add(pdev); } return 0; err_regmap: regmap_exit(tps->regmap); return ret; } static int __devexit tps65217_remove(struct i2c_client *client) { struct tps65217 *tps = i2c_get_clientdata(client); int i; for (i = 0; i < TPS65217_NUM_REGULATOR; i++) platform_device_unregister(tps->regulator_pdev[i]); regmap_exit(tps->regmap); return 0; } static const struct i2c_device_id tps65217_id_table[] = { {"tps65217", 0xF0}, {/* end of list */} }; MODULE_DEVICE_TABLE(i2c, tps65217_id_table); static struct i2c_driver tps65217_driver = { .driver = { .name = "tps65217", }, .id_table = tps65217_id_table, .probe = tps65217_probe, .remove = __devexit_p(tps65217_remove), }; static int __init tps65217_init(void) { return i2c_add_driver(&tps65217_driver); } subsys_initcall(tps65217_init); static void __exit tps65217_exit(void) { i2c_del_driver(&tps65217_driver); } module_exit(tps65217_exit); MODULE_AUTHOR("AnilKumar Ch <anilkumar@ti.com>"); MODULE_DESCRIPTION("TPS65217 chip family multi-function driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
denseye73/mykernel
arch/x86/kernel/i8259.c
4822
10356
#include <linux/linkage.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/random.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/syscore_ops.h> #include <linux/bitops.h> #include <linux/acpi.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/atomic.h> #include <asm/timer.h> #include <asm/hw_irq.h> #include <asm/pgtable.h> #include <asm/desc.h> #include <asm/apic.h> #include <asm/i8259.h> /* * This is the 'legacy' 8259A Programmable Interrupt Controller, * present in the majority of PC/AT boxes. * plus some generic x86 specific things if generic specifics makes * any sense at all. */ static void init_8259A(int auto_eoi); static int i8259A_auto_eoi; DEFINE_RAW_SPINLOCK(i8259A_lock); /* * 8259A PIC functions to handle ISA devices: */ /* * This contains the irq mask for both 8259A irq controllers, */ unsigned int cached_irq_mask = 0xffff; /* * Not all IRQs can be routed through the IO-APIC, eg. on certain (older) * boards the timer interrupt is not really connected to any IO-APIC pin, * it's fed to the master 8259A's IR0 line only. * * Any '1' bit in this mask means the IRQ is routed through the IO-APIC. * this 'mixed mode' IRQ handling costs nothing because it's only used * at IRQ setup time. */ unsigned long io_apic_irqs; static void mask_8259A_irq(unsigned int irq) { unsigned int mask = 1 << irq; unsigned long flags; raw_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask |= mask; if (irq & 8) outb(cached_slave_mask, PIC_SLAVE_IMR); else outb(cached_master_mask, PIC_MASTER_IMR); raw_spin_unlock_irqrestore(&i8259A_lock, flags); } static void disable_8259A_irq(struct irq_data *data) { mask_8259A_irq(data->irq); } static void unmask_8259A_irq(unsigned int irq) { unsigned int mask = ~(1 << irq); unsigned long flags; raw_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask &= mask; if (irq & 8) outb(cached_slave_mask, PIC_SLAVE_IMR); else outb(cached_master_mask, PIC_MASTER_IMR); raw_spin_unlock_irqrestore(&i8259A_lock, flags); } static void enable_8259A_irq(struct irq_data *data) { unmask_8259A_irq(data->irq); } static int i8259A_irq_pending(unsigned int irq) { unsigned int mask = 1<<irq; unsigned long flags; int ret; raw_spin_lock_irqsave(&i8259A_lock, flags); if (irq < 8) ret = inb(PIC_MASTER_CMD) & mask; else ret = inb(PIC_SLAVE_CMD) & (mask >> 8); raw_spin_unlock_irqrestore(&i8259A_lock, flags); return ret; } static void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); io_apic_irqs &= ~(1<<irq); irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, i8259A_chip.name); enable_irq(irq); } /* * This function assumes to be called rarely. Switching between * 8259A registers is slow. * This has to be protected by the irq controller spinlock * before being called. */ static inline int i8259A_irq_real(unsigned int irq) { int value; int irqmask = 1<<irq; if (irq < 8) { outb(0x0B, PIC_MASTER_CMD); /* ISR register */ value = inb(PIC_MASTER_CMD) & irqmask; outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */ return value; } outb(0x0B, PIC_SLAVE_CMD); /* ISR register */ value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */ return value; } /* * Careful! The 8259A is a fragile beast, it pretty * much _has_ to be done exactly like this (mask it * first, _then_ send the EOI, and the order of EOI * to the two 8259s is important! */ static void mask_and_ack_8259A(struct irq_data *data) { unsigned int irq = data->irq; unsigned int irqmask = 1 << irq; unsigned long flags; raw_spin_lock_irqsave(&i8259A_lock, flags); /* * Lightweight spurious IRQ detection. We do not want * to overdo spurious IRQ handling - it's usually a sign * of hardware problems, so we only do the checks we can * do without slowing down good hardware unnecessarily. * * Note that IRQ7 and IRQ15 (the two spurious IRQs * usually resulting from the 8259A-1|2 PICs) occur * even if the IRQ is masked in the 8259A. Thus we * can check spurious 8259A IRQs without doing the * quite slow i8259A_irq_real() call for every IRQ. * This does not cover 100% of spurious interrupts, * but should be enough to warn the user that there * is something bad going on ... */ if (cached_irq_mask & irqmask) goto spurious_8259A_irq; cached_irq_mask |= irqmask; handle_real_irq: if (irq & 8) { inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ outb(cached_slave_mask, PIC_SLAVE_IMR); /* 'Specific EOI' to slave */ outb(0x60+(irq&7), PIC_SLAVE_CMD); /* 'Specific EOI' to master-IRQ2 */ outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); } else { inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ outb(cached_master_mask, PIC_MASTER_IMR); outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ } raw_spin_unlock_irqrestore(&i8259A_lock, flags); return; spurious_8259A_irq: /* * this is the slow path - should happen rarely. */ if (i8259A_irq_real(irq)) /* * oops, the IRQ _is_ in service according to the * 8259A - not spurious, go handle it. */ goto handle_real_irq; { static int spurious_irq_mask; /* * At this point we can be sure the IRQ is spurious, * lets ACK and report it. [once per IRQ] */ if (!(spurious_irq_mask & irqmask)) { printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } atomic_inc(&irq_err_count); /* * Theoretically we do not have to handle this IRQ, * but in Linux this does not cause problems and is * simpler for us. */ goto handle_real_irq; } } struct irq_chip i8259A_chip = { .name = "XT-PIC", .irq_mask = disable_8259A_irq, .irq_disable = disable_8259A_irq, .irq_unmask = enable_8259A_irq, .irq_mask_ack = mask_and_ack_8259A, }; static char irq_trigger[2]; /** * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ */ static void restore_ELCR(char *trigger) { outb(trigger[0], 0x4d0); outb(trigger[1], 0x4d1); } static void save_ELCR(char *trigger) { /* IRQ 0,1,2,8,13 are marked as reserved */ trigger[0] = inb(0x4d0) & 0xF8; trigger[1] = inb(0x4d1) & 0xDE; } static void i8259A_resume(void) { init_8259A(i8259A_auto_eoi); restore_ELCR(irq_trigger); } static int i8259A_suspend(void) { save_ELCR(irq_trigger); return 0; } static void i8259A_shutdown(void) { /* Put the i8259A into a quiescent state that * the kernel initialization code can get it * out of. */ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ } static struct syscore_ops i8259_syscore_ops = { .suspend = i8259A_suspend, .resume = i8259A_resume, .shutdown = i8259A_shutdown, }; static void mask_8259A(void) { unsigned long flags; raw_spin_lock_irqsave(&i8259A_lock, flags); outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ raw_spin_unlock_irqrestore(&i8259A_lock, flags); } static void unmask_8259A(void) { unsigned long flags; raw_spin_lock_irqsave(&i8259A_lock, flags); outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ raw_spin_unlock_irqrestore(&i8259A_lock, flags); } static void init_8259A(int auto_eoi) { unsigned long flags; i8259A_auto_eoi = auto_eoi; raw_spin_lock_irqsave(&i8259A_lock, flags); outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ /* * outb_pic - this has to work on a wide range of PC hardware. */ outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, to 0x20-0x27 on i386 */ outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); if (auto_eoi) /* master does Auto EOI */ outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); else /* master expects normal EOI */ outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ /* ICW2: 8259A-2 IR0-7 mapped to IRQ8_VECTOR */ outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); if (auto_eoi) /* * In AEOI mode we just have to mask the interrupt * when acking. */ i8259A_chip.irq_mask_ack = disable_8259A_irq; else i8259A_chip.irq_mask_ack = mask_and_ack_8259A; udelay(100); /* wait for 8259A to initialize */ outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ raw_spin_unlock_irqrestore(&i8259A_lock, flags); } /* * make i8259 a driver so that we can select pic functions at run time. the goal * is to make x86 binary compatible among pc compatible and non-pc compatible * platforms, such as x86 MID. */ static void legacy_pic_noop(void) { }; static void legacy_pic_uint_noop(unsigned int unused) { }; static void legacy_pic_int_noop(int unused) { }; static int legacy_pic_irq_pending_noop(unsigned int irq) { return 0; } struct legacy_pic null_legacy_pic = { .nr_legacy_irqs = 0, .chip = &dummy_irq_chip, .mask = legacy_pic_uint_noop, .unmask = legacy_pic_uint_noop, .mask_all = legacy_pic_noop, .restore_mask = legacy_pic_noop, .init = legacy_pic_int_noop, .irq_pending = legacy_pic_irq_pending_noop, .make_irq = legacy_pic_uint_noop, }; struct legacy_pic default_legacy_pic = { .nr_legacy_irqs = NR_IRQS_LEGACY, .chip = &i8259A_chip, .mask = mask_8259A_irq, .unmask = unmask_8259A_irq, .mask_all = mask_8259A, .restore_mask = unmask_8259A, .init = init_8259A, .irq_pending = i8259A_irq_pending, .make_irq = make_8259A_irq, }; struct legacy_pic *legacy_pic = &default_legacy_pic; static int __init i8259A_init_ops(void) { if (legacy_pic == &default_legacy_pic) register_syscore_ops(&i8259_syscore_ops); return 0; } device_initcall(i8259A_init_ops);
gpl-2.0
p12tic/tf700-kernel
drivers/leds/leds-netxbig.c
4822
10899
/* * leds-netxbig.c - Driver for the 2Big and 5Big Network series LEDs * * Copyright (C) 2010 LaCie * * Author: Simon Guinot <sguinot@lacie.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/leds.h> #include <mach/leds-netxbig.h> /* * GPIO extension bus. */ static DEFINE_SPINLOCK(gpio_ext_lock); static void gpio_ext_set_addr(struct netxbig_gpio_ext *gpio_ext, int addr) { int pin; for (pin = 0; pin < gpio_ext->num_addr; pin++) gpio_set_value(gpio_ext->addr[pin], (addr >> pin) & 1); } static void gpio_ext_set_data(struct netxbig_gpio_ext *gpio_ext, int data) { int pin; for (pin = 0; pin < gpio_ext->num_data; pin++) gpio_set_value(gpio_ext->data[pin], (data >> pin) & 1); } static void gpio_ext_enable_select(struct netxbig_gpio_ext *gpio_ext) { /* Enable select is done on the raising edge. */ gpio_set_value(gpio_ext->enable, 0); gpio_set_value(gpio_ext->enable, 1); } static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext, int addr, int value) { unsigned long flags; spin_lock_irqsave(&gpio_ext_lock, flags); gpio_ext_set_addr(gpio_ext, addr); gpio_ext_set_data(gpio_ext, value); gpio_ext_enable_select(gpio_ext); spin_unlock_irqrestore(&gpio_ext_lock, flags); } static int __devinit gpio_ext_init(struct netxbig_gpio_ext *gpio_ext) { int err; int i; if (unlikely(!gpio_ext)) return -EINVAL; /* Configure address GPIOs. */ for (i = 0; i < gpio_ext->num_addr; i++) { err = gpio_request_one(gpio_ext->addr[i], GPIOF_OUT_INIT_LOW, "GPIO extension addr"); if (err) goto err_free_addr; } /* Configure data GPIOs. */ for (i = 0; i < gpio_ext->num_data; i++) { err = gpio_request_one(gpio_ext->data[i], GPIOF_OUT_INIT_LOW, "GPIO extension data"); if (err) goto err_free_data; } /* Configure "enable select" GPIO. */ err = gpio_request_one(gpio_ext->enable, GPIOF_OUT_INIT_LOW, "GPIO extension enable"); if (err) goto err_free_data; return 0; err_free_data: for (i = i - 1; i >= 0; i--) gpio_free(gpio_ext->data[i]); i = gpio_ext->num_addr; err_free_addr: for (i = i - 1; i >= 0; i--) gpio_free(gpio_ext->addr[i]); return err; } static void gpio_ext_free(struct netxbig_gpio_ext *gpio_ext) { int i; gpio_free(gpio_ext->enable); for (i = gpio_ext->num_addr - 1; i >= 0; i--) gpio_free(gpio_ext->addr[i]); for (i = gpio_ext->num_data - 1; i >= 0; i--) gpio_free(gpio_ext->data[i]); } /* * Class LED driver. */ struct netxbig_led_data { struct netxbig_gpio_ext *gpio_ext; struct led_classdev cdev; int mode_addr; int *mode_val; int bright_addr; int bright_max; struct netxbig_led_timer *timer; int num_timer; enum netxbig_led_mode mode; int sata; spinlock_t lock; }; static int netxbig_led_get_timer_mode(enum netxbig_led_mode *mode, unsigned long delay_on, unsigned long delay_off, struct netxbig_led_timer *timer, int num_timer) { int i; for (i = 0; i < num_timer; i++) { if (timer[i].delay_on == delay_on && timer[i].delay_off == delay_off) { *mode = timer[i].mode; return 0; } } return -EINVAL; } static int netxbig_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct netxbig_led_data *led_dat = container_of(led_cdev, struct netxbig_led_data, cdev); enum netxbig_led_mode mode; int mode_val; int ret; /* Look for a LED mode with the requested timer frequency. */ ret = netxbig_led_get_timer_mode(&mode, *delay_on, *delay_off, led_dat->timer, led_dat->num_timer); if (ret < 0) return ret; mode_val = led_dat->mode_val[mode]; if (mode_val == NETXBIG_LED_INVALID_MODE) return -EINVAL; spin_lock_irq(&led_dat->lock); gpio_ext_set_value(led_dat->gpio_ext, led_dat->mode_addr, mode_val); led_dat->mode = mode; spin_unlock_irq(&led_dat->lock); return 0; } static void netxbig_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct netxbig_led_data *led_dat = container_of(led_cdev, struct netxbig_led_data, cdev); enum netxbig_led_mode mode; int mode_val, bright_val; int set_brightness = 1; unsigned long flags; spin_lock_irqsave(&led_dat->lock, flags); if (value == LED_OFF) { mode = NETXBIG_LED_OFF; set_brightness = 0; } else { if (led_dat->sata) mode = NETXBIG_LED_SATA; else if (led_dat->mode == NETXBIG_LED_OFF) mode = NETXBIG_LED_ON; else /* Keep 'timer' mode. */ mode = led_dat->mode; } mode_val = led_dat->mode_val[mode]; gpio_ext_set_value(led_dat->gpio_ext, led_dat->mode_addr, mode_val); led_dat->mode = mode; /* * Note that the brightness register is shared between all the * SATA LEDs. So, change the brightness setting for a single * SATA LED will affect all the others. */ if (set_brightness) { bright_val = DIV_ROUND_UP(value * led_dat->bright_max, LED_FULL); gpio_ext_set_value(led_dat->gpio_ext, led_dat->bright_addr, bright_val); } spin_unlock_irqrestore(&led_dat->lock, flags); } static ssize_t netxbig_led_sata_store(struct device *dev, struct device_attribute *attr, const char *buff, size_t count) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct netxbig_led_data *led_dat = container_of(led_cdev, struct netxbig_led_data, cdev); unsigned long enable; enum netxbig_led_mode mode; int mode_val; int ret; ret = strict_strtoul(buff, 10, &enable); if (ret < 0) return ret; enable = !!enable; spin_lock_irq(&led_dat->lock); if (led_dat->sata == enable) { ret = count; goto exit_unlock; } if (led_dat->mode != NETXBIG_LED_ON && led_dat->mode != NETXBIG_LED_SATA) mode = led_dat->mode; /* Keep modes 'off' and 'timer'. */ else if (enable) mode = NETXBIG_LED_SATA; else mode = NETXBIG_LED_ON; mode_val = led_dat->mode_val[mode]; if (mode_val == NETXBIG_LED_INVALID_MODE) { ret = -EINVAL; goto exit_unlock; } gpio_ext_set_value(led_dat->gpio_ext, led_dat->mode_addr, mode_val); led_dat->mode = mode; led_dat->sata = enable; ret = count; exit_unlock: spin_unlock_irq(&led_dat->lock); return ret; } static ssize_t netxbig_led_sata_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct netxbig_led_data *led_dat = container_of(led_cdev, struct netxbig_led_data, cdev); return sprintf(buf, "%d\n", led_dat->sata); } static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store); static void delete_netxbig_led(struct netxbig_led_data *led_dat) { if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) device_remove_file(led_dat->cdev.dev, &dev_attr_sata); led_classdev_unregister(&led_dat->cdev); } static int __devinit create_netxbig_led(struct platform_device *pdev, struct netxbig_led_data *led_dat, const struct netxbig_led *template) { struct netxbig_led_platform_data *pdata = pdev->dev.platform_data; int ret; spin_lock_init(&led_dat->lock); led_dat->gpio_ext = pdata->gpio_ext; led_dat->cdev.name = template->name; led_dat->cdev.default_trigger = template->default_trigger; led_dat->cdev.blink_set = netxbig_led_blink_set; led_dat->cdev.brightness_set = netxbig_led_set; /* * Because the GPIO extension bus don't allow to read registers * value, there is no way to probe the LED initial state. * So, the initial sysfs LED value for the "brightness" and "sata" * attributes are inconsistent. * * Note that the initial LED state can't be reconfigured. * The reason is that the LED behaviour must stay uniform during * the whole boot process (bootloader+linux). */ led_dat->sata = 0; led_dat->cdev.brightness = LED_OFF; led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; led_dat->mode_addr = template->mode_addr; led_dat->mode_val = template->mode_val; led_dat->bright_addr = template->bright_addr; led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1; led_dat->timer = pdata->timer; led_dat->num_timer = pdata->num_timer; ret = led_classdev_register(&pdev->dev, &led_dat->cdev); if (ret < 0) return ret; /* * If available, expose the SATA activity blink capability through * a "sata" sysfs attribute. */ if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) { ret = device_create_file(led_dat->cdev.dev, &dev_attr_sata); if (ret) led_classdev_unregister(&led_dat->cdev); } return ret; } static int __devinit netxbig_led_probe(struct platform_device *pdev) { struct netxbig_led_platform_data *pdata = pdev->dev.platform_data; struct netxbig_led_data *leds_data; int i; int ret; if (!pdata) return -EINVAL; leds_data = kzalloc(sizeof(struct netxbig_led_data) * pdata->num_leds, GFP_KERNEL); if (!leds_data) return -ENOMEM; ret = gpio_ext_init(pdata->gpio_ext); if (ret < 0) goto err_free_data; for (i = 0; i < pdata->num_leds; i++) { ret = create_netxbig_led(pdev, &leds_data[i], &pdata->leds[i]); if (ret < 0) goto err_free_leds; } platform_set_drvdata(pdev, leds_data); return 0; err_free_leds: for (i = i - 1; i >= 0; i--) delete_netxbig_led(&leds_data[i]); gpio_ext_free(pdata->gpio_ext); err_free_data: kfree(leds_data); return ret; } static int __devexit netxbig_led_remove(struct platform_device *pdev) { struct netxbig_led_platform_data *pdata = pdev->dev.platform_data; struct netxbig_led_data *leds_data; int i; leds_data = platform_get_drvdata(pdev); for (i = 0; i < pdata->num_leds; i++) delete_netxbig_led(&leds_data[i]); gpio_ext_free(pdata->gpio_ext); kfree(leds_data); return 0; } static struct platform_driver netxbig_led_driver = { .probe = netxbig_led_probe, .remove = __devexit_p(netxbig_led_remove), .driver = { .name = "leds-netxbig", .owner = THIS_MODULE, }, }; module_platform_driver(netxbig_led_driver); MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>"); MODULE_DESCRIPTION("LED driver for LaCie xBig Network boards"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:leds-netxbig");
gpl-2.0
spegelius/android_kernel_samsung_jf
fs/ubifs/tnc.c
4822
89184
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file implements TNC (Tree Node Cache) which caches indexing nodes of * the UBIFS B-tree. * * At the moment the locking rules of the TNC tree are quite simple and * straightforward. We just have a mutex and lock it when we traverse the * tree. If a znode is not in memory, we read it from flash while still having * the mutex locked. */ #include <linux/crc32.h> #include <linux/slab.h> #include "ubifs.h" /* * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. * @NAME_LESS: name corresponding to the first argument is less than second * @NAME_MATCHES: names match * @NAME_GREATER: name corresponding to the second argument is greater than * first * @NOT_ON_MEDIA: node referred by zbranch does not exist on the media * * These constants were introduce to improve readability. */ enum { NAME_LESS = 0, NAME_MATCHES = 1, NAME_GREATER = 2, NOT_ON_MEDIA = 3, }; /** * insert_old_idx - record an index node obsoleted since the last commit start. * @c: UBIFS file-system description object * @lnum: LEB number of obsoleted index node * @offs: offset of obsoleted index node * * Returns %0 on success, and a negative error code on failure. * * For recovery, there must always be a complete intact version of the index on * flash at all times. That is called the "old index". It is the index as at the * time of the last successful commit. Many of the index nodes in the old index * may be dirty, but they must not be erased until the next successful commit * (at which point that index becomes the old index). * * That means that the garbage collection and the in-the-gaps method of * committing must be able to determine if an index node is in the old index. * Most of the old index nodes can be found by looking up the TNC using the * 'lookup_znode()' function. However, some of the old index nodes may have * been deleted from the current index or may have been changed so much that * they cannot be easily found. In those cases, an entry is added to an RB-tree. * That is what this function does. The RB-tree is ordered by LEB number and * offset because they uniquely identify the old index node. */ static int insert_old_idx(struct ubifs_info *c, int lnum, int offs) { struct ubifs_old_idx *old_idx, *o; struct rb_node **p, *parent = NULL; old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS); if (unlikely(!old_idx)) return -ENOMEM; old_idx->lnum = lnum; old_idx->offs = offs; p = &c->old_idx.rb_node; while (*p) { parent = *p; o = rb_entry(parent, struct ubifs_old_idx, rb); if (lnum < o->lnum) p = &(*p)->rb_left; else if (lnum > o->lnum) p = &(*p)->rb_right; else if (offs < o->offs) p = &(*p)->rb_left; else if (offs > o->offs) p = &(*p)->rb_right; else { ubifs_err("old idx added twice!"); kfree(old_idx); return 0; } } rb_link_node(&old_idx->rb, parent, p); rb_insert_color(&old_idx->rb, &c->old_idx); return 0; } /** * insert_old_idx_znode - record a znode obsoleted since last commit start. * @c: UBIFS file-system description object * @znode: znode of obsoleted index node * * Returns %0 on success, and a negative error code on failure. */ int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode) { if (znode->parent) { struct ubifs_zbranch *zbr; zbr = &znode->parent->zbranch[znode->iip]; if (zbr->len) return insert_old_idx(c, zbr->lnum, zbr->offs); } else if (c->zroot.len) return insert_old_idx(c, c->zroot.lnum, c->zroot.offs); return 0; } /** * ins_clr_old_idx_znode - record a znode obsoleted since last commit start. * @c: UBIFS file-system description object * @znode: znode of obsoleted index node * * Returns %0 on success, and a negative error code on failure. */ static int ins_clr_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode) { int err; if (znode->parent) { struct ubifs_zbranch *zbr; zbr = &znode->parent->zbranch[znode->iip]; if (zbr->len) { err = insert_old_idx(c, zbr->lnum, zbr->offs); if (err) return err; zbr->lnum = 0; zbr->offs = 0; zbr->len = 0; } } else if (c->zroot.len) { err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs); if (err) return err; c->zroot.lnum = 0; c->zroot.offs = 0; c->zroot.len = 0; } return 0; } /** * destroy_old_idx - destroy the old_idx RB-tree. * @c: UBIFS file-system description object * * During start commit, the old_idx RB-tree is used to avoid overwriting index * nodes that were in the index last commit but have since been deleted. This * is necessary for recovery i.e. the old index must be kept intact until the * new index is successfully written. The old-idx RB-tree is used for the * in-the-gaps method of writing index nodes and is destroyed every commit. */ void destroy_old_idx(struct ubifs_info *c) { struct rb_node *this = c->old_idx.rb_node; struct ubifs_old_idx *old_idx; while (this) { if (this->rb_left) { this = this->rb_left; continue; } else if (this->rb_right) { this = this->rb_right; continue; } old_idx = rb_entry(this, struct ubifs_old_idx, rb); this = rb_parent(this); if (this) { if (this->rb_left == &old_idx->rb) this->rb_left = NULL; else this->rb_right = NULL; } kfree(old_idx); } c->old_idx = RB_ROOT; } /** * copy_znode - copy a dirty znode. * @c: UBIFS file-system description object * @znode: znode to copy * * A dirty znode being committed may not be changed, so it is copied. */ static struct ubifs_znode *copy_znode(struct ubifs_info *c, struct ubifs_znode *znode) { struct ubifs_znode *zn; zn = kmalloc(c->max_znode_sz, GFP_NOFS); if (unlikely(!zn)) return ERR_PTR(-ENOMEM); memcpy(zn, znode, c->max_znode_sz); zn->cnext = NULL; __set_bit(DIRTY_ZNODE, &zn->flags); __clear_bit(COW_ZNODE, &zn->flags); ubifs_assert(!ubifs_zn_obsolete(znode)); __set_bit(OBSOLETE_ZNODE, &znode->flags); if (znode->level != 0) { int i; const int n = zn->child_cnt; /* The children now have new parent */ for (i = 0; i < n; i++) { struct ubifs_zbranch *zbr = &zn->zbranch[i]; if (zbr->znode) zbr->znode->parent = zn; } } atomic_long_inc(&c->dirty_zn_cnt); return zn; } /** * add_idx_dirt - add dirt due to a dirty znode. * @c: UBIFS file-system description object * @lnum: LEB number of index node * @dirt: size of index node * * This function updates lprops dirty space and the new size of the index. */ static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt) { c->calc_idx_sz -= ALIGN(dirt, 8); return ubifs_add_dirt(c, lnum, dirt); } /** * dirty_cow_znode - ensure a znode is not being committed. * @c: UBIFS file-system description object * @zbr: branch of znode to check * * Returns dirtied znode on success or negative error code on failure. */ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr) { struct ubifs_znode *znode = zbr->znode; struct ubifs_znode *zn; int err; if (!ubifs_zn_cow(znode)) { /* znode is not being committed */ if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) { atomic_long_inc(&c->dirty_zn_cnt); atomic_long_dec(&c->clean_zn_cnt); atomic_long_dec(&ubifs_clean_zn_cnt); err = add_idx_dirt(c, zbr->lnum, zbr->len); if (unlikely(err)) return ERR_PTR(err); } return znode; } zn = copy_znode(c, znode); if (IS_ERR(zn)) return zn; if (zbr->len) { err = insert_old_idx(c, zbr->lnum, zbr->offs); if (unlikely(err)) return ERR_PTR(err); err = add_idx_dirt(c, zbr->lnum, zbr->len); } else err = 0; zbr->znode = zn; zbr->lnum = 0; zbr->offs = 0; zbr->len = 0; if (unlikely(err)) return ERR_PTR(err); return zn; } /** * lnc_add - add a leaf node to the leaf node cache. * @c: UBIFS file-system description object * @zbr: zbranch of leaf node * @node: leaf node * * Leaf nodes are non-index nodes directory entry nodes or data nodes. The * purpose of the leaf node cache is to save re-reading the same leaf node over * and over again. Most things are cached by VFS, however the file system must * cache directory entries for readdir and for resolving hash collisions. The * present implementation of the leaf node cache is extremely simple, and * allows for error returns that are not used but that may be needed if a more * complex implementation is created. * * Note, this function does not add the @node object to LNC directly, but * allocates a copy of the object and adds the copy to LNC. The reason for this * is that @node has been allocated outside of the TNC subsystem and will be * used with @c->tnc_mutex unlock upon return from the TNC subsystem. But LNC * may be changed at any time, e.g. freed by the shrinker. */ static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr, const void *node) { int err; void *lnc_node; const struct ubifs_dent_node *dent = node; ubifs_assert(!zbr->leaf); ubifs_assert(zbr->len != 0); ubifs_assert(is_hash_key(c, &zbr->key)); err = ubifs_validate_entry(c, dent); if (err) { dbg_dump_stack(); dbg_dump_node(c, dent); return err; } lnc_node = kmemdup(node, zbr->len, GFP_NOFS); if (!lnc_node) /* We don't have to have the cache, so no error */ return 0; zbr->leaf = lnc_node; return 0; } /** * lnc_add_directly - add a leaf node to the leaf-node-cache. * @c: UBIFS file-system description object * @zbr: zbranch of leaf node * @node: leaf node * * This function is similar to 'lnc_add()', but it does not create a copy of * @node but inserts @node to TNC directly. */ static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *node) { int err; ubifs_assert(!zbr->leaf); ubifs_assert(zbr->len != 0); err = ubifs_validate_entry(c, node); if (err) { dbg_dump_stack(); dbg_dump_node(c, node); return err; } zbr->leaf = node; return 0; } /** * lnc_free - remove a leaf node from the leaf node cache. * @zbr: zbranch of leaf node * @node: leaf node */ static void lnc_free(struct ubifs_zbranch *zbr) { if (!zbr->leaf) return; kfree(zbr->leaf); zbr->leaf = NULL; } /** * tnc_read_node_nm - read a "hashed" leaf node. * @c: UBIFS file-system description object * @zbr: key and position of the node * @node: node is returned here * * This function reads a "hashed" node defined by @zbr from the leaf node cache * (in it is there) or from the hash media, in which case the node is also * added to LNC. Returns zero in case of success or a negative negative error * code in case of failure. */ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *node) { int err; ubifs_assert(is_hash_key(c, &zbr->key)); if (zbr->leaf) { /* Read from the leaf node cache */ ubifs_assert(zbr->len != 0); memcpy(node, zbr->leaf, zbr->len); return 0; } err = ubifs_tnc_read_node(c, zbr, node); if (err) return err; /* Add the node to the leaf node cache */ err = lnc_add(c, zbr, node); return err; } /** * try_read_node - read a node if it is a node. * @c: UBIFS file-system description object * @buf: buffer to read to * @type: node type * @len: node length (not aligned) * @lnum: LEB number of node to read * @offs: offset of node to read * * This function tries to read a node of known type and length, checks it and * stores it in @buf. This function returns %1 if a node is present and %0 if * a node is not present. A negative error code is returned for I/O errors. * This function performs that same function as ubifs_read_node except that * it does not require that there is actually a node present and instead * the return code indicates if a node was read. * * Note, this function does not check CRC of data nodes if @c->no_chk_data_crc * is true (it is controlled by corresponding mount option). However, if * @c->mounting or @c->remounting_rw is true (we are mounting or re-mounting to * R/W mode), @c->no_chk_data_crc is ignored and CRC is checked. This is * because during mounting or re-mounting from R/O mode to R/W mode we may read * journal nodes (when replying the journal or doing the recovery) and the * journal nodes may potentially be corrupted, so checking is required. */ static int try_read_node(const struct ubifs_info *c, void *buf, int type, int len, int lnum, int offs) { int err, node_len; struct ubifs_ch *ch = buf; uint32_t crc, node_crc; dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); err = ubifs_leb_read(c, lnum, buf, offs, len, 1); if (err) { ubifs_err("cannot read node type %d from LEB %d:%d, error %d", type, lnum, offs, err); return err; } if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) return 0; if (ch->node_type != type) return 0; node_len = le32_to_cpu(ch->len); if (node_len != len) return 0; if (type == UBIFS_DATA_NODE && c->no_chk_data_crc && !c->mounting && !c->remounting_rw) return 1; crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); node_crc = le32_to_cpu(ch->crc); if (crc != node_crc) return 0; return 1; } /** * fallible_read_node - try to read a leaf node. * @c: UBIFS file-system description object * @key: key of node to read * @zbr: position of node * @node: node returned * * This function tries to read a node and returns %1 if the node is read, %0 * if the node is not present, and a negative error code in the case of error. */ static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_zbranch *zbr, void *node) { int ret; dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs); ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum, zbr->offs); if (ret == 1) { union ubifs_key node_key; struct ubifs_dent_node *dent = node; /* All nodes have key in the same place */ key_read(c, &dent->key, &node_key); if (keys_cmp(c, key, &node_key) != 0) ret = 0; } if (ret == 0 && c->replaying) dbg_mntk(key, "dangling branch LEB %d:%d len %d, key ", zbr->lnum, zbr->offs, zbr->len); return ret; } /** * matches_name - determine if a direntry or xattr entry matches a given name. * @c: UBIFS file-system description object * @zbr: zbranch of dent * @nm: name to match * * This function checks if xentry/direntry referred by zbranch @zbr matches name * @nm. Returns %NAME_MATCHES if it does, %NAME_LESS if the name referred by * @zbr is less than @nm, and %NAME_GREATER if it is greater than @nm. In case * of failure, a negative error code is returned. */ static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr, const struct qstr *nm) { struct ubifs_dent_node *dent; int nlen, err; /* If possible, match against the dent in the leaf node cache */ if (!zbr->leaf) { dent = kmalloc(zbr->len, GFP_NOFS); if (!dent) return -ENOMEM; err = ubifs_tnc_read_node(c, zbr, dent); if (err) goto out_free; /* Add the node to the leaf node cache */ err = lnc_add_directly(c, zbr, dent); if (err) goto out_free; } else dent = zbr->leaf; nlen = le16_to_cpu(dent->nlen); err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len)); if (err == 0) { if (nlen == nm->len) return NAME_MATCHES; else if (nlen < nm->len) return NAME_LESS; else return NAME_GREATER; } else if (err < 0) return NAME_LESS; else return NAME_GREATER; out_free: kfree(dent); return err; } /** * get_znode - get a TNC znode that may not be loaded yet. * @c: UBIFS file-system description object * @znode: parent znode * @n: znode branch slot number * * This function returns the znode or a negative error code. */ static struct ubifs_znode *get_znode(struct ubifs_info *c, struct ubifs_znode *znode, int n) { struct ubifs_zbranch *zbr; zbr = &znode->zbranch[n]; if (zbr->znode) znode = zbr->znode; else znode = ubifs_load_znode(c, zbr, znode, n); return znode; } /** * tnc_next - find next TNC entry. * @c: UBIFS file-system description object * @zn: znode is passed and returned here * @n: znode branch slot number is passed and returned here * * This function returns %0 if the next TNC entry is found, %-ENOENT if there is * no next entry, or a negative error code otherwise. */ static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n) { struct ubifs_znode *znode = *zn; int nn = *n; nn += 1; if (nn < znode->child_cnt) { *n = nn; return 0; } while (1) { struct ubifs_znode *zp; zp = znode->parent; if (!zp) return -ENOENT; nn = znode->iip + 1; znode = zp; if (nn < znode->child_cnt) { znode = get_znode(c, znode, nn); if (IS_ERR(znode)) return PTR_ERR(znode); while (znode->level != 0) { znode = get_znode(c, znode, 0); if (IS_ERR(znode)) return PTR_ERR(znode); } nn = 0; break; } } *zn = znode; *n = nn; return 0; } /** * tnc_prev - find previous TNC entry. * @c: UBIFS file-system description object * @zn: znode is returned here * @n: znode branch slot number is passed and returned here * * This function returns %0 if the previous TNC entry is found, %-ENOENT if * there is no next entry, or a negative error code otherwise. */ static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n) { struct ubifs_znode *znode = *zn; int nn = *n; if (nn > 0) { *n = nn - 1; return 0; } while (1) { struct ubifs_znode *zp; zp = znode->parent; if (!zp) return -ENOENT; nn = znode->iip - 1; znode = zp; if (nn >= 0) { znode = get_znode(c, znode, nn); if (IS_ERR(znode)) return PTR_ERR(znode); while (znode->level != 0) { nn = znode->child_cnt - 1; znode = get_znode(c, znode, nn); if (IS_ERR(znode)) return PTR_ERR(znode); } nn = znode->child_cnt - 1; break; } } *zn = znode; *n = nn; return 0; } /** * resolve_collision - resolve a collision. * @c: UBIFS file-system description object * @key: key of a directory or extended attribute entry * @zn: znode is returned here * @n: zbranch number is passed and returned here * @nm: name of the entry * * This function is called for "hashed" keys to make sure that the found key * really corresponds to the looked up node (directory or extended attribute * entry). It returns %1 and sets @zn and @n if the collision is resolved. * %0 is returned if @nm is not found and @zn and @n are set to the previous * entry, i.e. to the entry after which @nm could follow if it were in TNC. * This means that @n may be set to %-1 if the leftmost key in @zn is the * previous one. A negative error code is returned on failures. */ static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n, const struct qstr *nm) { int err; err = matches_name(c, &(*zn)->zbranch[*n], nm); if (unlikely(err < 0)) return err; if (err == NAME_MATCHES) return 1; if (err == NAME_GREATER) { /* Look left */ while (1) { err = tnc_prev(c, zn, n); if (err == -ENOENT) { ubifs_assert(*n == 0); *n = -1; return 0; } if (err < 0) return err; if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) { /* * We have found the branch after which we would * like to insert, but inserting in this znode * may still be wrong. Consider the following 3 * znodes, in the case where we are resolving a * collision with Key2. * * znode zp * ---------------------- * level 1 | Key0 | Key1 | * ----------------------- * | | * znode za | | znode zb * ------------ ------------ * level 0 | Key0 | | Key2 | * ------------ ------------ * * The lookup finds Key2 in znode zb. Lets say * there is no match and the name is greater so * we look left. When we find Key0, we end up * here. If we return now, we will insert into * znode za at slot n = 1. But that is invalid * according to the parent's keys. Key2 must * be inserted into znode zb. * * Note, this problem is not relevant for the * case when we go right, because * 'tnc_insert()' would correct the parent key. */ if (*n == (*zn)->child_cnt - 1) { err = tnc_next(c, zn, n); if (err) { /* Should be impossible */ ubifs_assert(0); if (err == -ENOENT) err = -EINVAL; return err; } ubifs_assert(*n == 0); *n = -1; } return 0; } err = matches_name(c, &(*zn)->zbranch[*n], nm); if (err < 0) return err; if (err == NAME_LESS) return 0; if (err == NAME_MATCHES) return 1; ubifs_assert(err == NAME_GREATER); } } else { int nn = *n; struct ubifs_znode *znode = *zn; /* Look right */ while (1) { err = tnc_next(c, &znode, &nn); if (err == -ENOENT) return 0; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) return 0; err = matches_name(c, &znode->zbranch[nn], nm); if (err < 0) return err; if (err == NAME_GREATER) return 0; *zn = znode; *n = nn; if (err == NAME_MATCHES) return 1; ubifs_assert(err == NAME_LESS); } } } /** * fallible_matches_name - determine if a dent matches a given name. * @c: UBIFS file-system description object * @zbr: zbranch of dent * @nm: name to match * * This is a "fallible" version of 'matches_name()' function which does not * panic if the direntry/xentry referred by @zbr does not exist on the media. * * This function checks if xentry/direntry referred by zbranch @zbr matches name * @nm. Returns %NAME_MATCHES it does, %NAME_LESS if the name referred by @zbr * is less than @nm, %NAME_GREATER if it is greater than @nm, and @NOT_ON_MEDIA * if xentry/direntry referred by @zbr does not exist on the media. A negative * error code is returned in case of failure. */ static int fallible_matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr, const struct qstr *nm) { struct ubifs_dent_node *dent; int nlen, err; /* If possible, match against the dent in the leaf node cache */ if (!zbr->leaf) { dent = kmalloc(zbr->len, GFP_NOFS); if (!dent) return -ENOMEM; err = fallible_read_node(c, &zbr->key, zbr, dent); if (err < 0) goto out_free; if (err == 0) { /* The node was not present */ err = NOT_ON_MEDIA; goto out_free; } ubifs_assert(err == 1); err = lnc_add_directly(c, zbr, dent); if (err) goto out_free; } else dent = zbr->leaf; nlen = le16_to_cpu(dent->nlen); err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len)); if (err == 0) { if (nlen == nm->len) return NAME_MATCHES; else if (nlen < nm->len) return NAME_LESS; else return NAME_GREATER; } else if (err < 0) return NAME_LESS; else return NAME_GREATER; out_free: kfree(dent); return err; } /** * fallible_resolve_collision - resolve a collision even if nodes are missing. * @c: UBIFS file-system description object * @key: key * @zn: znode is returned here * @n: branch number is passed and returned here * @nm: name of directory entry * @adding: indicates caller is adding a key to the TNC * * This is a "fallible" version of the 'resolve_collision()' function which * does not panic if one of the nodes referred to by TNC does not exist on the * media. This may happen when replaying the journal if a deleted node was * Garbage-collected and the commit was not done. A branch that refers to a node * that is not present is called a dangling branch. The following are the return * codes for this function: * o if @nm was found, %1 is returned and @zn and @n are set to the found * branch; * o if we are @adding and @nm was not found, %0 is returned; * o if we are not @adding and @nm was not found, but a dangling branch was * found, then %1 is returned and @zn and @n are set to the dangling branch; * o a negative error code is returned in case of failure. */ static int fallible_resolve_collision(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n, const struct qstr *nm, int adding) { struct ubifs_znode *o_znode = NULL, *znode = *zn; int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n; cmp = fallible_matches_name(c, &znode->zbranch[nn], nm); if (unlikely(cmp < 0)) return cmp; if (cmp == NAME_MATCHES) return 1; if (cmp == NOT_ON_MEDIA) { o_znode = znode; o_n = nn; /* * We are unlucky and hit a dangling branch straight away. * Now we do not really know where to go to find the needed * branch - to the left or to the right. Well, let's try left. */ unsure = 1; } else if (!adding) unsure = 1; /* Remove a dangling branch wherever it is */ if (cmp == NAME_GREATER || unsure) { /* Look left */ while (1) { err = tnc_prev(c, zn, n); if (err == -ENOENT) { ubifs_assert(*n == 0); *n = -1; break; } if (err < 0) return err; if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) { /* See comments in 'resolve_collision()' */ if (*n == (*zn)->child_cnt - 1) { err = tnc_next(c, zn, n); if (err) { /* Should be impossible */ ubifs_assert(0); if (err == -ENOENT) err = -EINVAL; return err; } ubifs_assert(*n == 0); *n = -1; } break; } err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm); if (err < 0) return err; if (err == NAME_MATCHES) return 1; if (err == NOT_ON_MEDIA) { o_znode = *zn; o_n = *n; continue; } if (!adding) continue; if (err == NAME_LESS) break; else unsure = 0; } } if (cmp == NAME_LESS || unsure) { /* Look right */ *zn = znode; *n = nn; while (1) { err = tnc_next(c, &znode, &nn); if (err == -ENOENT) break; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) break; err = fallible_matches_name(c, &znode->zbranch[nn], nm); if (err < 0) return err; if (err == NAME_GREATER) break; *zn = znode; *n = nn; if (err == NAME_MATCHES) return 1; if (err == NOT_ON_MEDIA) { o_znode = znode; o_n = nn; } } } /* Never match a dangling branch when adding */ if (adding || !o_znode) return 0; dbg_mntk(key, "dangling match LEB %d:%d len %d key ", o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs, o_znode->zbranch[o_n].len); *zn = o_znode; *n = o_n; return 1; } /** * matches_position - determine if a zbranch matches a given position. * @zbr: zbranch of dent * @lnum: LEB number of dent to match * @offs: offset of dent to match * * This function returns %1 if @lnum:@offs matches, and %0 otherwise. */ static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs) { if (zbr->lnum == lnum && zbr->offs == offs) return 1; else return 0; } /** * resolve_collision_directly - resolve a collision directly. * @c: UBIFS file-system description object * @key: key of directory entry * @zn: znode is passed and returned here * @n: zbranch number is passed and returned here * @lnum: LEB number of dent node to match * @offs: offset of dent node to match * * This function is used for "hashed" keys to make sure the found directory or * extended attribute entry node is what was looked for. It is used when the * flash address of the right node is known (@lnum:@offs) which makes it much * easier to resolve collisions (no need to read entries and match full * names). This function returns %1 and sets @zn and @n if the collision is * resolved, %0 if @lnum:@offs is not found and @zn and @n are set to the * previous directory entry. Otherwise a negative error code is returned. */ static int resolve_collision_directly(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n, int lnum, int offs) { struct ubifs_znode *znode; int nn, err; znode = *zn; nn = *n; if (matches_position(&znode->zbranch[nn], lnum, offs)) return 1; /* Look left */ while (1) { err = tnc_prev(c, &znode, &nn); if (err == -ENOENT) break; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) break; if (matches_position(&znode->zbranch[nn], lnum, offs)) { *zn = znode; *n = nn; return 1; } } /* Look right */ znode = *zn; nn = *n; while (1) { err = tnc_next(c, &znode, &nn); if (err == -ENOENT) return 0; if (err < 0) return err; if (keys_cmp(c, &znode->zbranch[nn].key, key)) return 0; *zn = znode; *n = nn; if (matches_position(&znode->zbranch[nn], lnum, offs)) return 1; } } /** * dirty_cow_bottom_up - dirty a znode and its ancestors. * @c: UBIFS file-system description object * @znode: znode to dirty * * If we do not have a unique key that resides in a znode, then we cannot * dirty that znode from the top down (i.e. by using lookup_level0_dirty) * This function records the path back to the last dirty ancestor, and then * dirties the znodes on that path. */ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c, struct ubifs_znode *znode) { struct ubifs_znode *zp; int *path = c->bottom_up_buf, p = 0; ubifs_assert(c->zroot.znode); ubifs_assert(znode); if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) { kfree(c->bottom_up_buf); c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int), GFP_NOFS); if (!c->bottom_up_buf) return ERR_PTR(-ENOMEM); path = c->bottom_up_buf; } if (c->zroot.znode->level) { /* Go up until parent is dirty */ while (1) { int n; zp = znode->parent; if (!zp) break; n = znode->iip; ubifs_assert(p < c->zroot.znode->level); path[p++] = n; if (!zp->cnext && ubifs_zn_dirty(znode)) break; znode = zp; } } /* Come back down, dirtying as we go */ while (1) { struct ubifs_zbranch *zbr; zp = znode->parent; if (zp) { ubifs_assert(path[p - 1] >= 0); ubifs_assert(path[p - 1] < zp->child_cnt); zbr = &zp->zbranch[path[--p]]; znode = dirty_cow_znode(c, zbr); } else { ubifs_assert(znode == c->zroot.znode); znode = dirty_cow_znode(c, &c->zroot); } if (IS_ERR(znode) || !p) break; ubifs_assert(path[p - 1] >= 0); ubifs_assert(path[p - 1] < znode->child_cnt); znode = znode->zbranch[path[p - 1]].znode; } return znode; } /** * ubifs_lookup_level0 - search for zero-level znode. * @c: UBIFS file-system description object * @key: key to lookup * @zn: znode is returned here * @n: znode branch slot number is returned here * * This function looks up the TNC tree and search for zero-level znode which * refers key @key. The found zero-level znode is returned in @zn. There are 3 * cases: * o exact match, i.e. the found zero-level znode contains key @key, then %1 * is returned and slot number of the matched branch is stored in @n; * o not exact match, which means that zero-level znode does not contain * @key, then %0 is returned and slot number of the closest branch is stored * in @n; * o @key is so small that it is even less than the lowest key of the * leftmost zero-level node, then %0 is returned and %0 is stored in @n. * * Note, when the TNC tree is traversed, some znodes may be absent, then this * function reads corresponding indexing nodes and inserts them to TNC. In * case of failure, a negative error code is returned. */ int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n) { int err, exact; struct ubifs_znode *znode; unsigned long time = get_seconds(); dbg_tnck(key, "search key "); ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY); znode = c->zroot.znode; if (unlikely(!znode)) { znode = ubifs_load_znode(c, &c->zroot, NULL, 0); if (IS_ERR(znode)) return PTR_ERR(znode); } znode->time = time; while (1) { struct ubifs_zbranch *zbr; exact = ubifs_search_zbranch(c, znode, key, n); if (znode->level == 0) break; if (*n < 0) *n = 0; zbr = &znode->zbranch[*n]; if (zbr->znode) { znode->time = time; znode = zbr->znode; continue; } /* znode is not in TNC cache, load it from the media */ znode = ubifs_load_znode(c, zbr, znode, *n); if (IS_ERR(znode)) return PTR_ERR(znode); } *zn = znode; if (exact || !is_hash_key(c, key) || *n != -1) { dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n); return exact; } /* * Here is a tricky place. We have not found the key and this is a * "hashed" key, which may collide. The rest of the code deals with * situations like this: * * | 3 | 5 | * / \ * | 3 | 5 | | 6 | 7 | (x) * * Or more a complex example: * * | 1 | 5 | * / \ * | 1 | 3 | | 5 | 8 | * \ / * | 5 | 5 | | 6 | 7 | (x) * * In the examples, if we are looking for key "5", we may reach nodes * marked with "(x)". In this case what we have do is to look at the * left and see if there is "5" key there. If there is, we have to * return it. * * Note, this whole situation is possible because we allow to have * elements which are equivalent to the next key in the parent in the * children of current znode. For example, this happens if we split a * znode like this: | 3 | 5 | 5 | 6 | 7 |, which results in something * like this: * | 3 | 5 | * / \ * | 3 | 5 | | 5 | 6 | 7 | * ^ * And this becomes what is at the first "picture" after key "5" marked * with "^" is removed. What could be done is we could prohibit * splitting in the middle of the colliding sequence. Also, when * removing the leftmost key, we would have to correct the key of the * parent node, which would introduce additional complications. Namely, * if we changed the leftmost key of the parent znode, the garbage * collector would be unable to find it (GC is doing this when GC'ing * indexing LEBs). Although we already have an additional RB-tree where * we save such changed znodes (see 'ins_clr_old_idx_znode()') until * after the commit. But anyway, this does not look easy to implement * so we did not try this. */ err = tnc_prev(c, &znode, n); if (err == -ENOENT) { dbg_tnc("found 0, lvl %d, n -1", znode->level); *n = -1; return 0; } if (unlikely(err < 0)) return err; if (keys_cmp(c, key, &znode->zbranch[*n].key)) { dbg_tnc("found 0, lvl %d, n -1", znode->level); *n = -1; return 0; } dbg_tnc("found 1, lvl %d, n %d", znode->level, *n); *zn = znode; return 1; } /** * lookup_level0_dirty - search for zero-level znode dirtying. * @c: UBIFS file-system description object * @key: key to lookup * @zn: znode is returned here * @n: znode branch slot number is returned here * * This function looks up the TNC tree and search for zero-level znode which * refers key @key. The found zero-level znode is returned in @zn. There are 3 * cases: * o exact match, i.e. the found zero-level znode contains key @key, then %1 * is returned and slot number of the matched branch is stored in @n; * o not exact match, which means that zero-level znode does not contain @key * then %0 is returned and slot number of the closed branch is stored in * @n; * o @key is so small that it is even less than the lowest key of the * leftmost zero-level node, then %0 is returned and %-1 is stored in @n. * * Additionally all znodes in the path from the root to the located zero-level * znode are marked as dirty. * * Note, when the TNC tree is traversed, some znodes may be absent, then this * function reads corresponding indexing nodes and inserts them to TNC. In * case of failure, a negative error code is returned. */ static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_znode **zn, int *n) { int err, exact; struct ubifs_znode *znode; unsigned long time = get_seconds(); dbg_tnck(key, "search and dirty key "); znode = c->zroot.znode; if (unlikely(!znode)) { znode = ubifs_load_znode(c, &c->zroot, NULL, 0); if (IS_ERR(znode)) return PTR_ERR(znode); } znode = dirty_cow_znode(c, &c->zroot); if (IS_ERR(znode)) return PTR_ERR(znode); znode->time = time; while (1) { struct ubifs_zbranch *zbr; exact = ubifs_search_zbranch(c, znode, key, n); if (znode->level == 0) break; if (*n < 0) *n = 0; zbr = &znode->zbranch[*n]; if (zbr->znode) { znode->time = time; znode = dirty_cow_znode(c, zbr); if (IS_ERR(znode)) return PTR_ERR(znode); continue; } /* znode is not in TNC cache, load it from the media */ znode = ubifs_load_znode(c, zbr, znode, *n); if (IS_ERR(znode)) return PTR_ERR(znode); znode = dirty_cow_znode(c, zbr); if (IS_ERR(znode)) return PTR_ERR(znode); } *zn = znode; if (exact || !is_hash_key(c, key) || *n != -1) { dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n); return exact; } /* * See huge comment at 'lookup_level0_dirty()' what is the rest of the * code. */ err = tnc_prev(c, &znode, n); if (err == -ENOENT) { *n = -1; dbg_tnc("found 0, lvl %d, n -1", znode->level); return 0; } if (unlikely(err < 0)) return err; if (keys_cmp(c, key, &znode->zbranch[*n].key)) { *n = -1; dbg_tnc("found 0, lvl %d, n -1", znode->level); return 0; } if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) return PTR_ERR(znode); } dbg_tnc("found 1, lvl %d, n %d", znode->level, *n); *zn = znode; return 1; } /** * maybe_leb_gced - determine if a LEB may have been garbage collected. * @c: UBIFS file-system description object * @lnum: LEB number * @gc_seq1: garbage collection sequence number * * This function determines if @lnum may have been garbage collected since * sequence number @gc_seq1. If it may have been then %1 is returned, otherwise * %0 is returned. */ static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1) { int gc_seq2, gced_lnum; gced_lnum = c->gced_lnum; smp_rmb(); gc_seq2 = c->gc_seq; /* Same seq means no GC */ if (gc_seq1 == gc_seq2) return 0; /* Different by more than 1 means we don't know */ if (gc_seq1 + 1 != gc_seq2) return 1; /* * We have seen the sequence number has increased by 1. Now we need to * be sure we read the right LEB number, so read it again. */ smp_rmb(); if (gced_lnum != c->gced_lnum) return 1; /* Finally we can check lnum */ if (gced_lnum == lnum) return 1; return 0; } /** * ubifs_tnc_locate - look up a file-system node and return it and its location. * @c: UBIFS file-system description object * @key: node key to lookup * @node: the node is returned here * @lnum: LEB number is returned here * @offs: offset is returned here * * This function looks up and reads node with key @key. The caller has to make * sure the @node buffer is large enough to fit the node. Returns zero in case * of success, %-ENOENT if the node was not found, and a negative error code in * case of failure. The node location can be returned in @lnum and @offs. */ int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, void *node, int *lnum, int *offs) { int found, n, err, safely = 0, gc_seq1; struct ubifs_znode *znode; struct ubifs_zbranch zbr, *zt; again: mutex_lock(&c->tnc_mutex); found = ubifs_lookup_level0(c, key, &znode, &n); if (!found) { err = -ENOENT; goto out; } else if (found < 0) { err = found; goto out; } zt = &znode->zbranch[n]; if (lnum) { *lnum = zt->lnum; *offs = zt->offs; } if (is_hash_key(c, key)) { /* * In this case the leaf node cache gets used, so we pass the * address of the zbranch and keep the mutex locked */ err = tnc_read_node_nm(c, zt, node); goto out; } if (safely) { err = ubifs_tnc_read_node(c, zt, node); goto out; } /* Drop the TNC mutex prematurely and race with garbage collection */ zbr = znode->zbranch[n]; gc_seq1 = c->gc_seq; mutex_unlock(&c->tnc_mutex); if (ubifs_get_wbuf(c, zbr.lnum)) { /* We do not GC journal heads */ err = ubifs_tnc_read_node(c, &zbr, node); return err; } err = fallible_read_node(c, key, &zbr, node); if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) { /* * The node may have been GC'ed out from under us so try again * while keeping the TNC mutex locked. */ safely = 1; goto again; } return 0; out: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_get_bu_keys - lookup keys for bulk-read. * @c: UBIFS file-system description object * @bu: bulk-read parameters and results * * Lookup consecutive data node keys for the same inode that reside * consecutively in the same LEB. This function returns zero in case of success * and a negative error code in case of failure. * * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares * maximum possible amount of nodes for bulk-read. */ int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) { int n, err = 0, lnum = -1, uninitialized_var(offs); int uninitialized_var(len); unsigned int block = key_block(c, &bu->key); struct ubifs_znode *znode; bu->cnt = 0; bu->blk_cnt = 0; bu->eof = 0; mutex_lock(&c->tnc_mutex); /* Find first key */ err = ubifs_lookup_level0(c, &bu->key, &znode, &n); if (err < 0) goto out; if (err) { /* Key found */ len = znode->zbranch[n].len; /* The buffer must be big enough for at least 1 node */ if (len > bu->buf_len) { err = -EINVAL; goto out; } /* Add this key */ bu->zbranch[bu->cnt++] = znode->zbranch[n]; bu->blk_cnt += 1; lnum = znode->zbranch[n].lnum; offs = ALIGN(znode->zbranch[n].offs + len, 8); } while (1) { struct ubifs_zbranch *zbr; union ubifs_key *key; unsigned int next_block; /* Find next key */ err = tnc_next(c, &znode, &n); if (err) goto out; zbr = &znode->zbranch[n]; key = &zbr->key; /* See if there is another data key for this file */ if (key_inum(c, key) != key_inum(c, &bu->key) || key_type(c, key) != UBIFS_DATA_KEY) { err = -ENOENT; goto out; } if (lnum < 0) { /* First key found */ lnum = zbr->lnum; offs = ALIGN(zbr->offs + zbr->len, 8); len = zbr->len; if (len > bu->buf_len) { err = -EINVAL; goto out; } } else { /* * The data nodes must be in consecutive positions in * the same LEB. */ if (zbr->lnum != lnum || zbr->offs != offs) goto out; offs += ALIGN(zbr->len, 8); len = ALIGN(len, 8) + zbr->len; /* Must not exceed buffer length */ if (len > bu->buf_len) goto out; } /* Allow for holes */ next_block = key_block(c, key); bu->blk_cnt += (next_block - block - 1); if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) goto out; block = next_block; /* Add this key */ bu->zbranch[bu->cnt++] = *zbr; bu->blk_cnt += 1; /* See if we have room for more */ if (bu->cnt >= UBIFS_MAX_BULK_READ) goto out; if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) goto out; } out: if (err == -ENOENT) { bu->eof = 1; err = 0; } bu->gc_seq = c->gc_seq; mutex_unlock(&c->tnc_mutex); if (err) return err; /* * An enormous hole could cause bulk-read to encompass too many * page cache pages, so limit the number here. */ if (bu->blk_cnt > UBIFS_MAX_BULK_READ) bu->blk_cnt = UBIFS_MAX_BULK_READ; /* * Ensure that bulk-read covers a whole number of page cache * pages. */ if (UBIFS_BLOCKS_PER_PAGE == 1 || !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1))) return 0; if (bu->eof) { /* At the end of file we can round up */ bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1; return 0; } /* Exclude data nodes that do not make up a whole page cache page */ block = key_block(c, &bu->key) + bu->blk_cnt; block &= ~(UBIFS_BLOCKS_PER_PAGE - 1); while (bu->cnt) { if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block) break; bu->cnt -= 1; } return 0; } /** * read_wbuf - bulk-read from a LEB with a wbuf. * @wbuf: wbuf that may overlap the read * @buf: buffer into which to read * @len: read length * @lnum: LEB number from which to read * @offs: offset from which to read * * This functions returns %0 on success or a negative error code on failure. */ static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum, int offs) { const struct ubifs_info *c = wbuf->c; int rlen, overlap; dbg_io("LEB %d:%d, length %d", lnum, offs, len); ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(offs + len <= c->leb_size); spin_lock(&wbuf->lock); overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); if (!overlap) { /* We may safely unlock the write-buffer and read the data */ spin_unlock(&wbuf->lock); return ubifs_leb_read(c, lnum, buf, offs, len, 0); } /* Don't read under wbuf */ rlen = wbuf->offs - offs; if (rlen < 0) rlen = 0; /* Copy the rest from the write-buffer */ memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen); spin_unlock(&wbuf->lock); if (rlen > 0) /* Read everything that goes before write-buffer */ return ubifs_leb_read(c, lnum, buf, offs, rlen, 0); return 0; } /** * validate_data_node - validate data nodes for bulk-read. * @c: UBIFS file-system description object * @buf: buffer containing data node to validate * @zbr: zbranch of data node to validate * * This functions returns %0 on success or a negative error code on failure. */ static int validate_data_node(struct ubifs_info *c, void *buf, struct ubifs_zbranch *zbr) { union ubifs_key key1; struct ubifs_ch *ch = buf; int err, len; if (ch->node_type != UBIFS_DATA_NODE) { ubifs_err("bad node type (%d but expected %d)", ch->node_type, UBIFS_DATA_NODE); goto out_err; } err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0); if (err) { ubifs_err("expected node type %d", UBIFS_DATA_NODE); goto out; } len = le32_to_cpu(ch->len); if (len != zbr->len) { ubifs_err("bad node length %d, expected %d", len, zbr->len); goto out_err; } /* Make sure the key of the read node is correct */ key_read(c, buf + UBIFS_KEY_OFFSET, &key1); if (!keys_eq(c, &zbr->key, &key1)) { ubifs_err("bad key in node at LEB %d:%d", zbr->lnum, zbr->offs); dbg_tnck(&zbr->key, "looked for key "); dbg_tnck(&key1, "found node's key "); goto out_err; } return 0; out_err: err = -EINVAL; out: ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs); dbg_dump_node(c, buf); dbg_dump_stack(); return err; } /** * ubifs_tnc_bulk_read - read a number of data nodes in one go. * @c: UBIFS file-system description object * @bu: bulk-read parameters and results * * This functions reads and validates the data nodes that were identified by the * 'ubifs_tnc_get_bu_keys()' function. This functions returns %0 on success, * -EAGAIN to indicate a race with GC, or another negative error code on * failure. */ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu) { int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i; struct ubifs_wbuf *wbuf; void *buf; len = bu->zbranch[bu->cnt - 1].offs; len += bu->zbranch[bu->cnt - 1].len - offs; if (len > bu->buf_len) { ubifs_err("buffer too small %d vs %d", bu->buf_len, len); return -EINVAL; } /* Do the read */ wbuf = ubifs_get_wbuf(c, lnum); if (wbuf) err = read_wbuf(wbuf, bu->buf, len, lnum, offs); else err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0); /* Check for a race with GC */ if (maybe_leb_gced(c, lnum, bu->gc_seq)) return -EAGAIN; if (err && err != -EBADMSG) { ubifs_err("failed to read from LEB %d:%d, error %d", lnum, offs, err); dbg_dump_stack(); dbg_tnck(&bu->key, "key "); return err; } /* Validate the nodes read */ buf = bu->buf; for (i = 0; i < bu->cnt; i++) { err = validate_data_node(c, buf, &bu->zbranch[i]); if (err) return err; buf = buf + ALIGN(bu->zbranch[i].len, 8); } return 0; } /** * do_lookup_nm- look up a "hashed" node. * @c: UBIFS file-system description object * @key: node key to lookup * @node: the node is returned here * @nm: node name * * This function look up and reads a node which contains name hash in the key. * Since the hash may have collisions, there may be many nodes with the same * key, so we have to sequentially look to all of them until the needed one is * found. This function returns zero in case of success, %-ENOENT if the node * was not found, and a negative error code in case of failure. */ static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, void *node, const struct qstr *nm) { int found, n, err; struct ubifs_znode *znode; dbg_tnck(key, "name '%.*s' key ", nm->len, nm->name); mutex_lock(&c->tnc_mutex); found = ubifs_lookup_level0(c, key, &znode, &n); if (!found) { err = -ENOENT; goto out_unlock; } else if (found < 0) { err = found; goto out_unlock; } ubifs_assert(n >= 0); err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (unlikely(err < 0)) goto out_unlock; if (err == 0) { err = -ENOENT; goto out_unlock; } err = tnc_read_node_nm(c, &znode->zbranch[n], node); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_lookup_nm - look up a "hashed" node. * @c: UBIFS file-system description object * @key: node key to lookup * @node: the node is returned here * @nm: node name * * This function look up and reads a node which contains name hash in the key. * Since the hash may have collisions, there may be many nodes with the same * key, so we have to sequentially look to all of them until the needed one is * found. This function returns zero in case of success, %-ENOENT if the node * was not found, and a negative error code in case of failure. */ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, void *node, const struct qstr *nm) { int err, len; const struct ubifs_dent_node *dent = node; /* * We assume that in most of the cases there are no name collisions and * 'ubifs_tnc_lookup()' returns us the right direntry. */ err = ubifs_tnc_lookup(c, key, node); if (err) return err; len = le16_to_cpu(dent->nlen); if (nm->len == len && !memcmp(dent->name, nm->name, len)) return 0; /* * Unluckily, there are hash collisions and we have to iterate over * them look at each direntry with colliding name hash sequentially. */ return do_lookup_nm(c, key, node, nm); } /** * correct_parent_keys - correct parent znodes' keys. * @c: UBIFS file-system description object * @znode: znode to correct parent znodes for * * This is a helper function for 'tnc_insert()'. When the key of the leftmost * zbranch changes, keys of parent znodes have to be corrected. This helper * function is called in such situations and corrects the keys if needed. */ static void correct_parent_keys(const struct ubifs_info *c, struct ubifs_znode *znode) { union ubifs_key *key, *key1; ubifs_assert(znode->parent); ubifs_assert(znode->iip == 0); key = &znode->zbranch[0].key; key1 = &znode->parent->zbranch[0].key; while (keys_cmp(c, key, key1) < 0) { key_copy(c, key, key1); znode = znode->parent; znode->alt = 1; if (!znode->parent || znode->iip) break; key1 = &znode->parent->zbranch[0].key; } } /** * insert_zbranch - insert a zbranch into a znode. * @znode: znode into which to insert * @zbr: zbranch to insert * @n: slot number to insert to * * This is a helper function for 'tnc_insert()'. UBIFS does not allow "gaps" in * znode's array of zbranches and keeps zbranches consolidated, so when a new * zbranch has to be inserted to the @znode->zbranches[]' array at the @n-th * slot, zbranches starting from @n have to be moved right. */ static void insert_zbranch(struct ubifs_znode *znode, const struct ubifs_zbranch *zbr, int n) { int i; ubifs_assert(ubifs_zn_dirty(znode)); if (znode->level) { for (i = znode->child_cnt; i > n; i--) { znode->zbranch[i] = znode->zbranch[i - 1]; if (znode->zbranch[i].znode) znode->zbranch[i].znode->iip = i; } if (zbr->znode) zbr->znode->iip = n; } else for (i = znode->child_cnt; i > n; i--) znode->zbranch[i] = znode->zbranch[i - 1]; znode->zbranch[n] = *zbr; znode->child_cnt += 1; /* * After inserting at slot zero, the lower bound of the key range of * this znode may have changed. If this znode is subsequently split * then the upper bound of the key range may change, and furthermore * it could change to be lower than the original lower bound. If that * happens, then it will no longer be possible to find this znode in the * TNC using the key from the index node on flash. That is bad because * if it is not found, we will assume it is obsolete and may overwrite * it. Then if there is an unclean unmount, we will start using the * old index which will be broken. * * So we first mark znodes that have insertions at slot zero, and then * if they are split we add their lnum/offs to the old_idx tree. */ if (n == 0) znode->alt = 1; } /** * tnc_insert - insert a node into TNC. * @c: UBIFS file-system description object * @znode: znode to insert into * @zbr: branch to insert * @n: slot number to insert new zbranch to * * This function inserts a new node described by @zbr into znode @znode. If * znode does not have a free slot for new zbranch, it is split. Parent znodes * are splat as well if needed. Returns zero in case of success or a negative * error code in case of failure. */ static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode, struct ubifs_zbranch *zbr, int n) { struct ubifs_znode *zn, *zi, *zp; int i, keep, move, appending = 0; union ubifs_key *key = &zbr->key, *key1; ubifs_assert(n >= 0 && n <= c->fanout); /* Implement naive insert for now */ again: zp = znode->parent; if (znode->child_cnt < c->fanout) { ubifs_assert(n != c->fanout); dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level); insert_zbranch(znode, zbr, n); /* Ensure parent's key is correct */ if (n == 0 && zp && znode->iip == 0) correct_parent_keys(c, znode); return 0; } /* * Unfortunately, @znode does not have more empty slots and we have to * split it. */ dbg_tnck(key, "splitting level %d, key ", znode->level); if (znode->alt) /* * We can no longer be sure of finding this znode by key, so we * record it in the old_idx tree. */ ins_clr_old_idx_znode(c, znode); zn = kzalloc(c->max_znode_sz, GFP_NOFS); if (!zn) return -ENOMEM; zn->parent = zp; zn->level = znode->level; /* Decide where to split */ if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) { /* Try not to split consecutive data keys */ if (n == c->fanout) { key1 = &znode->zbranch[n - 1].key; if (key_inum(c, key1) == key_inum(c, key) && key_type(c, key1) == UBIFS_DATA_KEY) appending = 1; } else goto check_split; } else if (appending && n != c->fanout) { /* Try not to split consecutive data keys */ appending = 0; check_split: if (n >= (c->fanout + 1) / 2) { key1 = &znode->zbranch[0].key; if (key_inum(c, key1) == key_inum(c, key) && key_type(c, key1) == UBIFS_DATA_KEY) { key1 = &znode->zbranch[n].key; if (key_inum(c, key1) != key_inum(c, key) || key_type(c, key1) != UBIFS_DATA_KEY) { keep = n; move = c->fanout - keep; zi = znode; goto do_split; } } } } if (appending) { keep = c->fanout; move = 0; } else { keep = (c->fanout + 1) / 2; move = c->fanout - keep; } /* * Although we don't at present, we could look at the neighbors and see * if we can move some zbranches there. */ if (n < keep) { /* Insert into existing znode */ zi = znode; move += 1; keep -= 1; } else { /* Insert into new znode */ zi = zn; n -= keep; /* Re-parent */ if (zn->level != 0) zbr->znode->parent = zn; } do_split: __set_bit(DIRTY_ZNODE, &zn->flags); atomic_long_inc(&c->dirty_zn_cnt); zn->child_cnt = move; znode->child_cnt = keep; dbg_tnc("moving %d, keeping %d", move, keep); /* Move zbranch */ for (i = 0; i < move; i++) { zn->zbranch[i] = znode->zbranch[keep + i]; /* Re-parent */ if (zn->level != 0) if (zn->zbranch[i].znode) { zn->zbranch[i].znode->parent = zn; zn->zbranch[i].znode->iip = i; } } /* Insert new key and branch */ dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level); insert_zbranch(zi, zbr, n); /* Insert new znode (produced by spitting) into the parent */ if (zp) { if (n == 0 && zi == znode && znode->iip == 0) correct_parent_keys(c, znode); /* Locate insertion point */ n = znode->iip + 1; /* Tail recursion */ zbr->key = zn->zbranch[0].key; zbr->znode = zn; zbr->lnum = 0; zbr->offs = 0; zbr->len = 0; znode = zp; goto again; } /* We have to split root znode */ dbg_tnc("creating new zroot at level %d", znode->level + 1); zi = kzalloc(c->max_znode_sz, GFP_NOFS); if (!zi) return -ENOMEM; zi->child_cnt = 2; zi->level = znode->level + 1; __set_bit(DIRTY_ZNODE, &zi->flags); atomic_long_inc(&c->dirty_zn_cnt); zi->zbranch[0].key = znode->zbranch[0].key; zi->zbranch[0].znode = znode; zi->zbranch[0].lnum = c->zroot.lnum; zi->zbranch[0].offs = c->zroot.offs; zi->zbranch[0].len = c->zroot.len; zi->zbranch[1].key = zn->zbranch[0].key; zi->zbranch[1].znode = zn; c->zroot.lnum = 0; c->zroot.offs = 0; c->zroot.len = 0; c->zroot.znode = zi; zn->parent = zi; zn->iip = 1; znode->parent = zi; znode->iip = 0; return 0; } /** * ubifs_tnc_add - add a node to TNC. * @c: UBIFS file-system description object * @key: key to add * @lnum: LEB number of node * @offs: node offset * @len: node length * * This function adds a node with key @key to TNC. The node may be new or it may * obsolete some existing one. Returns %0 on success or negative error code on * failure. */ int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum, int offs, int len) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "%d:%d, len %d, key ", lnum, offs, len); found = lookup_level0_dirty(c, key, &znode, &n); if (!found) { struct ubifs_zbranch zbr; zbr.znode = NULL; zbr.lnum = lnum; zbr.offs = offs; zbr.len = len; key_copy(c, key, &zbr.key); err = tnc_insert(c, znode, &zbr, n + 1); } else if (found == 1) { struct ubifs_zbranch *zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; } else err = found; if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_replace - replace a node in the TNC only if the old node is found. * @c: UBIFS file-system description object * @key: key to add * @old_lnum: LEB number of old node * @old_offs: old node offset * @lnum: LEB number of node * @offs: node offset * @len: node length * * This function replaces a node with key @key in the TNC only if the old node * is found. This function is called by garbage collection when node are moved. * Returns %0 on success or negative error code on failure. */ int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key, int old_lnum, int old_offs, int lnum, int offs, int len) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "old LEB %d:%d, new LEB %d:%d, len %d, key ", old_lnum, old_offs, lnum, offs, len); found = lookup_level0_dirty(c, key, &znode, &n); if (found < 0) { err = found; goto out_unlock; } if (found == 1) { struct ubifs_zbranch *zbr = &znode->zbranch[n]; found = 0; if (zbr->lnum == old_lnum && zbr->offs == old_offs) { lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); if (err) goto out_unlock; zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; found = 1; } else if (is_hash_key(c, key)) { found = resolve_collision_directly(c, key, &znode, &n, old_lnum, old_offs); dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d", found, znode, n, old_lnum, old_offs); if (found < 0) { err = found; goto out_unlock; } if (found) { /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); if (err) goto out_unlock; zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; } } } if (!found) err = ubifs_add_dirt(c, lnum, len); if (!err) err = dbg_check_tnc(c, 0); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_add_nm - add a "hashed" node to TNC. * @c: UBIFS file-system description object * @key: key to add * @lnum: LEB number of node * @offs: node offset * @len: node length * @nm: node name * * This is the same as 'ubifs_tnc_add()' but it should be used with keys which * may have collisions, like directory entry keys. */ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, int lnum, int offs, int len, const struct qstr *nm) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "LEB %d:%d, name '%.*s', key ", lnum, offs, nm->len, nm->name); found = lookup_level0_dirty(c, key, &znode, &n); if (found < 0) { err = found; goto out_unlock; } if (found == 1) { if (c->replaying) found = fallible_resolve_collision(c, key, &znode, &n, nm, 1); else found = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n); if (found < 0) { err = found; goto out_unlock; } /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } if (found == 1) { struct ubifs_zbranch *zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; goto out_unlock; } } if (!found) { struct ubifs_zbranch zbr; zbr.znode = NULL; zbr.lnum = lnum; zbr.offs = offs; zbr.len = len; key_copy(c, key, &zbr.key); err = tnc_insert(c, znode, &zbr, n + 1); if (err) goto out_unlock; if (c->replaying) { /* * We did not find it in the index so there may be a * dangling branch still in the index. So we remove it * by passing 'ubifs_tnc_remove_nm()' the same key but * an unmatchable name. */ struct qstr noname = { .len = 0, .name = "" }; err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); if (err) return err; return ubifs_tnc_remove_nm(c, key, &noname); } } out_unlock: if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * tnc_delete - delete a znode form TNC. * @c: UBIFS file-system description object * @znode: znode to delete from * @n: zbranch slot number to delete * * This function deletes a leaf node from @n-th slot of @znode. Returns zero in * case of success and a negative error code in case of failure. */ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n) { struct ubifs_zbranch *zbr; struct ubifs_znode *zp; int i, err; /* Delete without merge for now */ ubifs_assert(znode->level == 0); ubifs_assert(n >= 0 && n < c->fanout); dbg_tnck(&znode->zbranch[n].key, "deleting key "); zbr = &znode->zbranch[n]; lnc_free(zbr); err = ubifs_add_dirt(c, zbr->lnum, zbr->len); if (err) { dbg_dump_znode(c, znode); return err; } /* We do not "gap" zbranch slots */ for (i = n; i < znode->child_cnt - 1; i++) znode->zbranch[i] = znode->zbranch[i + 1]; znode->child_cnt -= 1; if (znode->child_cnt > 0) return 0; /* * This was the last zbranch, we have to delete this znode from the * parent. */ do { ubifs_assert(!ubifs_zn_obsolete(znode)); ubifs_assert(ubifs_zn_dirty(znode)); zp = znode->parent; n = znode->iip; atomic_long_dec(&c->dirty_zn_cnt); err = insert_old_idx_znode(c, znode); if (err) return err; if (znode->cnext) { __set_bit(OBSOLETE_ZNODE, &znode->flags); atomic_long_inc(&c->clean_zn_cnt); atomic_long_inc(&ubifs_clean_zn_cnt); } else kfree(znode); znode = zp; } while (znode->child_cnt == 1); /* while removing last child */ /* Remove from znode, entry n - 1 */ znode->child_cnt -= 1; ubifs_assert(znode->level != 0); for (i = n; i < znode->child_cnt; i++) { znode->zbranch[i] = znode->zbranch[i + 1]; if (znode->zbranch[i].znode) znode->zbranch[i].znode->iip = i; } /* * If this is the root and it has only 1 child then * collapse the tree. */ if (!znode->parent) { while (znode->child_cnt == 1 && znode->level != 0) { zp = znode; zbr = &znode->zbranch[0]; znode = get_znode(c, znode, 0); if (IS_ERR(znode)) return PTR_ERR(znode); znode = dirty_cow_znode(c, zbr); if (IS_ERR(znode)) return PTR_ERR(znode); znode->parent = NULL; znode->iip = 0; if (c->zroot.len) { err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs); if (err) return err; } c->zroot.lnum = zbr->lnum; c->zroot.offs = zbr->offs; c->zroot.len = zbr->len; c->zroot.znode = znode; ubifs_assert(!ubifs_zn_obsolete(zp)); ubifs_assert(ubifs_zn_dirty(zp)); atomic_long_dec(&c->dirty_zn_cnt); if (zp->cnext) { __set_bit(OBSOLETE_ZNODE, &zp->flags); atomic_long_inc(&c->clean_zn_cnt); atomic_long_inc(&ubifs_clean_zn_cnt); } else kfree(zp); } } return 0; } /** * ubifs_tnc_remove - remove an index entry of a node. * @c: UBIFS file-system description object * @key: key of node * * Returns %0 on success or negative error code on failure. */ int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key) { int found, n, err = 0; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "key "); found = lookup_level0_dirty(c, key, &znode, &n); if (found < 0) { err = found; goto out_unlock; } if (found == 1) err = tnc_delete(c, znode, n); if (!err) err = dbg_check_tnc(c, 0); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_remove_nm - remove an index entry for a "hashed" node. * @c: UBIFS file-system description object * @key: key of node * @nm: directory entry name * * Returns %0 on success or negative error code on failure. */ int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key, const struct qstr *nm) { int n, err; struct ubifs_znode *znode; mutex_lock(&c->tnc_mutex); dbg_tnck(key, "%.*s, key ", nm->len, nm->name); err = lookup_level0_dirty(c, key, &znode, &n); if (err < 0) goto out_unlock; if (err) { if (c->replaying) err = fallible_resolve_collision(c, key, &znode, &n, nm, 0); else err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (err < 0) goto out_unlock; if (err) { /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } err = tnc_delete(c, znode, n); } } out_unlock: if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * key_in_range - determine if a key falls within a range of keys. * @c: UBIFS file-system description object * @key: key to check * @from_key: lowest key in range * @to_key: highest key in range * * This function returns %1 if the key is in range and %0 otherwise. */ static int key_in_range(struct ubifs_info *c, union ubifs_key *key, union ubifs_key *from_key, union ubifs_key *to_key) { if (keys_cmp(c, key, from_key) < 0) return 0; if (keys_cmp(c, key, to_key) > 0) return 0; return 1; } /** * ubifs_tnc_remove_range - remove index entries in range. * @c: UBIFS file-system description object * @from_key: lowest key to remove * @to_key: highest key to remove * * This function removes index entries starting at @from_key and ending at * @to_key. This function returns zero in case of success and a negative error * code in case of failure. */ int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key, union ubifs_key *to_key) { int i, n, k, err = 0; struct ubifs_znode *znode; union ubifs_key *key; mutex_lock(&c->tnc_mutex); while (1) { /* Find first level 0 znode that contains keys to remove */ err = ubifs_lookup_level0(c, from_key, &znode, &n); if (err < 0) goto out_unlock; if (err) key = from_key; else { err = tnc_next(c, &znode, &n); if (err == -ENOENT) { err = 0; goto out_unlock; } if (err < 0) goto out_unlock; key = &znode->zbranch[n].key; if (!key_in_range(c, key, from_key, to_key)) { err = 0; goto out_unlock; } } /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } } /* Remove all keys in range except the first */ for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) { key = &znode->zbranch[i].key; if (!key_in_range(c, key, from_key, to_key)) break; lnc_free(&znode->zbranch[i]); err = ubifs_add_dirt(c, znode->zbranch[i].lnum, znode->zbranch[i].len); if (err) { dbg_dump_znode(c, znode); goto out_unlock; } dbg_tnck(key, "removing key "); } if (k) { for (i = n + 1 + k; i < znode->child_cnt; i++) znode->zbranch[i - k] = znode->zbranch[i]; znode->child_cnt -= k; } /* Now delete the first */ err = tnc_delete(c, znode, n); if (err) goto out_unlock; } out_unlock: if (!err) err = dbg_check_tnc(c, 0); mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_tnc_remove_ino - remove an inode from TNC. * @c: UBIFS file-system description object * @inum: inode number to remove * * This function remove inode @inum and all the extended attributes associated * with the anode from TNC and returns zero in case of success or a negative * error code in case of failure. */ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) { union ubifs_key key1, key2; struct ubifs_dent_node *xent, *pxent = NULL; struct qstr nm = { .name = NULL }; dbg_tnc("ino %lu", (unsigned long)inum); /* * Walk all extended attribute entries and remove them together with * corresponding extended attribute inodes. */ lowest_xent_key(c, &key1, inum); while (1) { ino_t xattr_inum; int err; xent = ubifs_tnc_next_ent(c, &key1, &nm); if (IS_ERR(xent)) { err = PTR_ERR(xent); if (err == -ENOENT) break; return err; } xattr_inum = le64_to_cpu(xent->inum); dbg_tnc("xent '%s', ino %lu", xent->name, (unsigned long)xattr_inum); nm.name = xent->name; nm.len = le16_to_cpu(xent->nlen); err = ubifs_tnc_remove_nm(c, &key1, &nm); if (err) { kfree(xent); return err; } lowest_ino_key(c, &key1, xattr_inum); highest_ino_key(c, &key2, xattr_inum); err = ubifs_tnc_remove_range(c, &key1, &key2); if (err) { kfree(xent); return err; } kfree(pxent); pxent = xent; key_read(c, &xent->key, &key1); } kfree(pxent); lowest_ino_key(c, &key1, inum); highest_ino_key(c, &key2, inum); return ubifs_tnc_remove_range(c, &key1, &key2); } /** * ubifs_tnc_next_ent - walk directory or extended attribute entries. * @c: UBIFS file-system description object * @key: key of last entry * @nm: name of last entry found or %NULL * * This function finds and reads the next directory or extended attribute entry * after the given key (@key) if there is one. @nm is used to resolve * collisions. * * If the name of the current entry is not known and only the key is known, * @nm->name has to be %NULL. In this case the semantics of this function is a * little bit different and it returns the entry corresponding to this key, not * the next one. If the key was not found, the closest "right" entry is * returned. * * If the fist entry has to be found, @key has to contain the lowest possible * key value for this inode and @name has to be %NULL. * * This function returns the found directory or extended attribute entry node * in case of success, %-ENOENT is returned if no entry was found, and a * negative error code is returned in case of failure. */ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, union ubifs_key *key, const struct qstr *nm) { int n, err, type = key_type(c, key); struct ubifs_znode *znode; struct ubifs_dent_node *dent; struct ubifs_zbranch *zbr; union ubifs_key *dkey; dbg_tnck(key, "%s ", nm->name ? (char *)nm->name : "(lowest)"); ubifs_assert(is_hash_key(c, key)); mutex_lock(&c->tnc_mutex); err = ubifs_lookup_level0(c, key, &znode, &n); if (unlikely(err < 0)) goto out_unlock; if (nm->name) { if (err) { /* Handle collisions */ err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (unlikely(err < 0)) goto out_unlock; } /* Now find next entry */ err = tnc_next(c, &znode, &n); if (unlikely(err)) goto out_unlock; } else { /* * The full name of the entry was not given, in which case the * behavior of this function is a little different and it * returns current entry, not the next one. */ if (!err) { /* * However, the given key does not exist in the TNC * tree and @znode/@n variables contain the closest * "preceding" element. Switch to the next one. */ err = tnc_next(c, &znode, &n); if (err) goto out_unlock; } } zbr = &znode->zbranch[n]; dent = kmalloc(zbr->len, GFP_NOFS); if (unlikely(!dent)) { err = -ENOMEM; goto out_unlock; } /* * The above 'tnc_next()' call could lead us to the next inode, check * this. */ dkey = &zbr->key; if (key_inum(c, dkey) != key_inum(c, key) || key_type(c, dkey) != type) { err = -ENOENT; goto out_free; } err = tnc_read_node_nm(c, zbr, dent); if (unlikely(err)) goto out_free; mutex_unlock(&c->tnc_mutex); return dent; out_free: kfree(dent); out_unlock: mutex_unlock(&c->tnc_mutex); return ERR_PTR(err); } /** * tnc_destroy_cnext - destroy left-over obsolete znodes from a failed commit. * @c: UBIFS file-system description object * * Destroy left-over obsolete znodes from a failed commit. */ static void tnc_destroy_cnext(struct ubifs_info *c) { struct ubifs_znode *cnext; if (!c->cnext) return; ubifs_assert(c->cmt_state == COMMIT_BROKEN); cnext = c->cnext; do { struct ubifs_znode *znode = cnext; cnext = cnext->cnext; if (ubifs_zn_obsolete(znode)) kfree(znode); } while (cnext && cnext != c->cnext); } /** * ubifs_tnc_close - close TNC subsystem and free all related resources. * @c: UBIFS file-system description object */ void ubifs_tnc_close(struct ubifs_info *c) { tnc_destroy_cnext(c); if (c->zroot.znode) { long n; ubifs_destroy_tnc_subtree(c->zroot.znode); n = atomic_long_read(&c->clean_zn_cnt); atomic_long_sub(n, &ubifs_clean_zn_cnt); } kfree(c->gap_lebs); kfree(c->ilebs); destroy_old_idx(c); } /** * left_znode - get the znode to the left. * @c: UBIFS file-system description object * @znode: znode * * This function returns a pointer to the znode to the left of @znode or NULL if * there is not one. A negative error code is returned on failure. */ static struct ubifs_znode *left_znode(struct ubifs_info *c, struct ubifs_znode *znode) { int level = znode->level; while (1) { int n = znode->iip - 1; /* Go up until we can go left */ znode = znode->parent; if (!znode) return NULL; if (n >= 0) { /* Now go down the rightmost branch to 'level' */ znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; while (znode->level != level) { n = znode->child_cnt - 1; znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; } break; } } return znode; } /** * right_znode - get the znode to the right. * @c: UBIFS file-system description object * @znode: znode * * This function returns a pointer to the znode to the right of @znode or NULL * if there is not one. A negative error code is returned on failure. */ static struct ubifs_znode *right_znode(struct ubifs_info *c, struct ubifs_znode *znode) { int level = znode->level; while (1) { int n = znode->iip + 1; /* Go up until we can go right */ znode = znode->parent; if (!znode) return NULL; if (n < znode->child_cnt) { /* Now go down the leftmost branch to 'level' */ znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; while (znode->level != level) { znode = get_znode(c, znode, 0); if (IS_ERR(znode)) return znode; } break; } } return znode; } /** * lookup_znode - find a particular indexing node from TNC. * @c: UBIFS file-system description object * @key: index node key to lookup * @level: index node level * @lnum: index node LEB number * @offs: index node offset * * This function searches an indexing node by its first key @key and its * address @lnum:@offs. It looks up the indexing tree by pulling all indexing * nodes it traverses to TNC. This function is called for indexing nodes which * were found on the media by scanning, for example when garbage-collecting or * when doing in-the-gaps commit. This means that the indexing node which is * looked for does not have to have exactly the same leftmost key @key, because * the leftmost key may have been changed, in which case TNC will contain a * dirty znode which still refers the same @lnum:@offs. This function is clever * enough to recognize such indexing nodes. * * Note, if a znode was deleted or changed too much, then this function will * not find it. For situations like this UBIFS has the old index RB-tree * (indexed by @lnum:@offs). * * This function returns a pointer to the znode found or %NULL if it is not * found. A negative error code is returned on failure. */ static struct ubifs_znode *lookup_znode(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs) { struct ubifs_znode *znode, *zn; int n, nn; ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY); /* * The arguments have probably been read off flash, so don't assume * they are valid. */ if (level < 0) return ERR_PTR(-EINVAL); /* Get the root znode */ znode = c->zroot.znode; if (!znode) { znode = ubifs_load_znode(c, &c->zroot, NULL, 0); if (IS_ERR(znode)) return znode; } /* Check if it is the one we are looking for */ if (c->zroot.lnum == lnum && c->zroot.offs == offs) return znode; /* Descend to the parent level i.e. (level + 1) */ if (level >= znode->level) return NULL; while (1) { ubifs_search_zbranch(c, znode, key, &n); if (n < 0) { /* * We reached a znode where the leftmost key is greater * than the key we are searching for. This is the same * situation as the one described in a huge comment at * the end of the 'ubifs_lookup_level0()' function. And * for exactly the same reasons we have to try to look * left before giving up. */ znode = left_znode(c, znode); if (!znode) return NULL; if (IS_ERR(znode)) return znode; ubifs_search_zbranch(c, znode, key, &n); ubifs_assert(n >= 0); } if (znode->level == level + 1) break; znode = get_znode(c, znode, n); if (IS_ERR(znode)) return znode; } /* Check if the child is the one we are looking for */ if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs) return get_znode(c, znode, n); /* If the key is unique, there is nowhere else to look */ if (!is_hash_key(c, key)) return NULL; /* * The key is not unique and so may be also in the znodes to either * side. */ zn = znode; nn = n; /* Look left */ while (1) { /* Move one branch to the left */ if (n) n -= 1; else { znode = left_znode(c, znode); if (!znode) break; if (IS_ERR(znode)) return znode; n = znode->child_cnt - 1; } /* Check it */ if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs) return get_znode(c, znode, n); /* Stop if the key is less than the one we are looking for */ if (keys_cmp(c, &znode->zbranch[n].key, key) < 0) break; } /* Back to the middle */ znode = zn; n = nn; /* Look right */ while (1) { /* Move one branch to the right */ if (++n >= znode->child_cnt) { znode = right_znode(c, znode); if (!znode) break; if (IS_ERR(znode)) return znode; n = 0; } /* Check it */ if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs) return get_znode(c, znode, n); /* Stop if the key is greater than the one we are looking for */ if (keys_cmp(c, &znode->zbranch[n].key, key) > 0) break; } return NULL; } /** * is_idx_node_in_tnc - determine if an index node is in the TNC. * @c: UBIFS file-system description object * @key: key of index node * @level: index node level * @lnum: LEB number of index node * @offs: offset of index node * * This function returns %0 if the index node is not referred to in the TNC, %1 * if the index node is referred to in the TNC and the corresponding znode is * dirty, %2 if an index node is referred to in the TNC and the corresponding * znode is clean, and a negative error code in case of failure. * * Note, the @key argument has to be the key of the first child. Also note, * this function relies on the fact that 0:0 is never a valid LEB number and * offset for a main-area node. */ int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs) { struct ubifs_znode *znode; znode = lookup_znode(c, key, level, lnum, offs); if (!znode) return 0; if (IS_ERR(znode)) return PTR_ERR(znode); return ubifs_zn_dirty(znode) ? 1 : 2; } /** * is_leaf_node_in_tnc - determine if a non-indexing not is in the TNC. * @c: UBIFS file-system description object * @key: node key * @lnum: node LEB number * @offs: node offset * * This function returns %1 if the node is referred to in the TNC, %0 if it is * not, and a negative error code in case of failure. * * Note, this function relies on the fact that 0:0 is never a valid LEB number * and offset for a main-area node. */ static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int lnum, int offs) { struct ubifs_zbranch *zbr; struct ubifs_znode *znode, *zn; int n, found, err, nn; const int unique = !is_hash_key(c, key); found = ubifs_lookup_level0(c, key, &znode, &n); if (found < 0) return found; /* Error code */ if (!found) return 0; zbr = &znode->zbranch[n]; if (lnum == zbr->lnum && offs == zbr->offs) return 1; /* Found it */ if (unique) return 0; /* * Because the key is not unique, we have to look left * and right as well */ zn = znode; nn = n; /* Look left */ while (1) { err = tnc_prev(c, &znode, &n); if (err == -ENOENT) break; if (err) return err; if (keys_cmp(c, key, &znode->zbranch[n].key)) break; zbr = &znode->zbranch[n]; if (lnum == zbr->lnum && offs == zbr->offs) return 1; /* Found it */ } /* Look right */ znode = zn; n = nn; while (1) { err = tnc_next(c, &znode, &n); if (err) { if (err == -ENOENT) return 0; return err; } if (keys_cmp(c, key, &znode->zbranch[n].key)) break; zbr = &znode->zbranch[n]; if (lnum == zbr->lnum && offs == zbr->offs) return 1; /* Found it */ } return 0; } /** * ubifs_tnc_has_node - determine whether a node is in the TNC. * @c: UBIFS file-system description object * @key: node key * @level: index node level (if it is an index node) * @lnum: node LEB number * @offs: node offset * @is_idx: non-zero if the node is an index node * * This function returns %1 if the node is in the TNC, %0 if it is not, and a * negative error code in case of failure. For index nodes, @key has to be the * key of the first child. An index node is considered to be in the TNC only if * the corresponding znode is clean or has not been loaded. */ int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs, int is_idx) { int err; mutex_lock(&c->tnc_mutex); if (is_idx) { err = is_idx_node_in_tnc(c, key, level, lnum, offs); if (err < 0) goto out_unlock; if (err == 1) /* The index node was found but it was dirty */ err = 0; else if (err == 2) /* The index node was found and it was clean */ err = 1; else BUG_ON(err != 0); } else err = is_leaf_node_in_tnc(c, key, lnum, offs); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * ubifs_dirty_idx_node - dirty an index node. * @c: UBIFS file-system description object * @key: index node key * @level: index node level * @lnum: index node LEB number * @offs: index node offset * * This function loads and dirties an index node so that it can be garbage * collected. The @key argument has to be the key of the first child. This * function relies on the fact that 0:0 is never a valid LEB number and offset * for a main-area node. Returns %0 on success and a negative error code on * failure. */ int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs) { struct ubifs_znode *znode; int err = 0; mutex_lock(&c->tnc_mutex); znode = lookup_znode(c, key, level, lnum, offs); if (!znode) goto out_unlock; if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } znode = dirty_cow_bottom_up(c, znode); if (IS_ERR(znode)) { err = PTR_ERR(znode); goto out_unlock; } out_unlock: mutex_unlock(&c->tnc_mutex); return err; } #ifdef CONFIG_UBIFS_FS_DEBUG /** * dbg_check_inode_size - check if inode size is correct. * @c: UBIFS file-system description object * @inum: inode number * @size: inode size * * This function makes sure that the inode size (@size) is correct and it does * not have any pages beyond @size. Returns zero if the inode is OK, %-EINVAL * if it has a data page beyond @size, and other negative error code in case of * other errors. */ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, loff_t size) { int err, n; union ubifs_key from_key, to_key, *key; struct ubifs_znode *znode; unsigned int block; if (!S_ISREG(inode->i_mode)) return 0; if (!dbg_is_chk_gen(c)) return 0; block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; data_key_init(c, &from_key, inode->i_ino, block); highest_data_key(c, &to_key, inode->i_ino); mutex_lock(&c->tnc_mutex); err = ubifs_lookup_level0(c, &from_key, &znode, &n); if (err < 0) goto out_unlock; if (err) { err = -EINVAL; key = &from_key; goto out_dump; } err = tnc_next(c, &znode, &n); if (err == -ENOENT) { err = 0; goto out_unlock; } if (err < 0) goto out_unlock; ubifs_assert(err == 0); key = &znode->zbranch[n].key; if (!key_in_range(c, key, &from_key, &to_key)) goto out_unlock; out_dump: block = key_block(c, key); ubifs_err("inode %lu has size %lld, but there are data at offset %lld", (unsigned long)inode->i_ino, size, ((loff_t)block) << UBIFS_BLOCK_SHIFT); mutex_unlock(&c->tnc_mutex); dbg_dump_inode(c, inode); dbg_dump_stack(); return -EINVAL; out_unlock: mutex_unlock(&c->tnc_mutex); return err; } #endif /* CONFIG_UBIFS_FS_DEBUG */
gpl-2.0
ztemt/Z5S_NX503A_130_kernel
drivers/media/dvb/dvb-usb/az6027.c
5078
27717
/* DVB USB compliant Linux driver for the AZUREWAVE DVB-S/S2 USB2.0 (AZ6027) * receiver. * * Copyright (C) 2009 Adams.Xu <adams.xu@azwave.com.cn> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "az6027.h" #include "stb0899_drv.h" #include "stb0899_reg.h" #include "stb0899_cfg.h" #include "stb6100.h" #include "stb6100_cfg.h" #include "dvb_ca_en50221.h" int dvb_usb_az6027_debug; module_param_named(debug, dvb_usb_az6027_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct az6027_device_state { struct dvb_ca_en50221 ca; struct mutex ca_mutex; u8 power_state; }; static const struct stb0899_s1_reg az6027_stb0899_s1_init_1[] = { /* 0x0000000b, SYSREG */ { STB0899_DEV_ID , 0x30 }, { STB0899_DISCNTRL1 , 0x32 }, { STB0899_DISCNTRL2 , 0x80 }, { STB0899_DISRX_ST0 , 0x04 }, { STB0899_DISRX_ST1 , 0x00 }, { STB0899_DISPARITY , 0x00 }, { STB0899_DISSTATUS , 0x20 }, { STB0899_DISF22 , 0x99 }, { STB0899_DISF22RX , 0xa8 }, /* SYSREG ? */ { STB0899_ACRPRESC , 0x11 }, { STB0899_ACRDIV1 , 0x0a }, { STB0899_ACRDIV2 , 0x05 }, { STB0899_DACR1 , 0x00 }, { STB0899_DACR2 , 0x00 }, { STB0899_OUTCFG , 0x00 }, { STB0899_MODECFG , 0x00 }, { STB0899_IRQSTATUS_3 , 0xfe }, { STB0899_IRQSTATUS_2 , 0x03 }, { STB0899_IRQSTATUS_1 , 0x7c }, { STB0899_IRQSTATUS_0 , 0xf4 }, { STB0899_IRQMSK_3 , 0xf3 }, { STB0899_IRQMSK_2 , 0xfc }, { STB0899_IRQMSK_1 , 0xff }, { STB0899_IRQMSK_0 , 0xff }, { STB0899_IRQCFG , 0x00 }, { STB0899_I2CCFG , 0x88 }, { STB0899_I2CRPT , 0x58 }, { STB0899_IOPVALUE5 , 0x00 }, { STB0899_IOPVALUE4 , 0x33 }, { STB0899_IOPVALUE3 , 0x6d }, { STB0899_IOPVALUE2 , 0x90 }, { STB0899_IOPVALUE1 , 0x60 }, { STB0899_IOPVALUE0 , 0x00 }, { STB0899_GPIO00CFG , 0x82 }, { STB0899_GPIO01CFG , 0x82 }, { STB0899_GPIO02CFG , 0x82 }, { STB0899_GPIO03CFG , 0x82 }, { STB0899_GPIO04CFG , 0x82 }, { STB0899_GPIO05CFG , 0x82 }, { STB0899_GPIO06CFG , 0x82 }, { STB0899_GPIO07CFG , 0x82 }, { STB0899_GPIO08CFG , 0x82 }, { STB0899_GPIO09CFG , 0x82 }, { STB0899_GPIO10CFG , 0x82 }, { STB0899_GPIO11CFG , 0x82 }, { STB0899_GPIO12CFG , 0x82 }, { STB0899_GPIO13CFG , 0x82 }, { STB0899_GPIO14CFG , 0x82 }, { STB0899_GPIO15CFG , 0x82 }, { STB0899_GPIO16CFG , 0x82 }, { STB0899_GPIO17CFG , 0x82 }, { STB0899_GPIO18CFG , 0x82 }, { STB0899_GPIO19CFG , 0x82 }, { STB0899_GPIO20CFG , 0x82 }, { STB0899_SDATCFG , 0xb8 }, { STB0899_SCLTCFG , 0xba }, { STB0899_AGCRFCFG , 0x1c }, /* 0x11 */ { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */ { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */ { STB0899_DIRCLKCFG , 0x82 }, { STB0899_CLKOUT27CFG , 0x7e }, { STB0899_STDBYCFG , 0x82 }, { STB0899_CS0CFG , 0x82 }, { STB0899_CS1CFG , 0x82 }, { STB0899_DISEQCOCFG , 0x20 }, { STB0899_GPIO32CFG , 0x82 }, { STB0899_GPIO33CFG , 0x82 }, { STB0899_GPIO34CFG , 0x82 }, { STB0899_GPIO35CFG , 0x82 }, { STB0899_GPIO36CFG , 0x82 }, { STB0899_GPIO37CFG , 0x82 }, { STB0899_GPIO38CFG , 0x82 }, { STB0899_GPIO39CFG , 0x82 }, { STB0899_NCOARSE , 0x17 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */ { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */ { STB0899_FILTCTRL , 0x00 }, { STB0899_SYSCTRL , 0x01 }, { STB0899_STOPCLK1 , 0x20 }, { STB0899_STOPCLK2 , 0x00 }, { STB0899_INTBUFSTATUS , 0x00 }, { STB0899_INTBUFCTRL , 0x0a }, { 0xffff , 0xff }, }; static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = { { STB0899_DEMOD , 0x00 }, { STB0899_RCOMPC , 0xc9 }, { STB0899_AGC1CN , 0x01 }, { STB0899_AGC1REF , 0x10 }, { STB0899_RTC , 0x23 }, { STB0899_TMGCFG , 0x4e }, { STB0899_AGC2REF , 0x34 }, { STB0899_TLSR , 0x84 }, { STB0899_CFD , 0xf7 }, { STB0899_ACLC , 0x87 }, { STB0899_BCLC , 0x94 }, { STB0899_EQON , 0x41 }, { STB0899_LDT , 0xf1 }, { STB0899_LDT2 , 0xe3 }, { STB0899_EQUALREF , 0xb4 }, { STB0899_TMGRAMP , 0x10 }, { STB0899_TMGTHD , 0x30 }, { STB0899_IDCCOMP , 0xfd }, { STB0899_QDCCOMP , 0xff }, { STB0899_POWERI , 0x0c }, { STB0899_POWERQ , 0x0f }, { STB0899_RCOMP , 0x6c }, { STB0899_AGCIQIN , 0x80 }, { STB0899_AGC2I1 , 0x06 }, { STB0899_AGC2I2 , 0x00 }, { STB0899_TLIR , 0x30 }, { STB0899_RTF , 0x7f }, { STB0899_DSTATUS , 0x00 }, { STB0899_LDI , 0xbc }, { STB0899_CFRM , 0xea }, { STB0899_CFRL , 0x31 }, { STB0899_NIRM , 0x2b }, { STB0899_NIRL , 0x80 }, { STB0899_ISYMB , 0x1d }, { STB0899_QSYMB , 0xa6 }, { STB0899_SFRH , 0x2f }, { STB0899_SFRM , 0x68 }, { STB0899_SFRL , 0x40 }, { STB0899_SFRUPH , 0x2f }, { STB0899_SFRUPM , 0x68 }, { STB0899_SFRUPL , 0x40 }, { STB0899_EQUAI1 , 0x02 }, { STB0899_EQUAQ1 , 0xff }, { STB0899_EQUAI2 , 0x04 }, { STB0899_EQUAQ2 , 0x05 }, { STB0899_EQUAI3 , 0x02 }, { STB0899_EQUAQ3 , 0xfd }, { STB0899_EQUAI4 , 0x03 }, { STB0899_EQUAQ4 , 0x07 }, { STB0899_EQUAI5 , 0x08 }, { STB0899_EQUAQ5 , 0xf5 }, { STB0899_DSTATUS2 , 0x00 }, { STB0899_VSTATUS , 0x00 }, { STB0899_VERROR , 0x86 }, { STB0899_IQSWAP , 0x2a }, { STB0899_ECNT1M , 0x00 }, { STB0899_ECNT1L , 0x00 }, { STB0899_ECNT2M , 0x00 }, { STB0899_ECNT2L , 0x00 }, { STB0899_ECNT3M , 0x0a }, { STB0899_ECNT3L , 0xad }, { STB0899_FECAUTO1 , 0x06 }, { STB0899_FECM , 0x01 }, { STB0899_VTH12 , 0xb0 }, { STB0899_VTH23 , 0x7a }, { STB0899_VTH34 , 0x58 }, { STB0899_VTH56 , 0x38 }, { STB0899_VTH67 , 0x34 }, { STB0899_VTH78 , 0x24 }, { STB0899_PRVIT , 0xff }, { STB0899_VITSYNC , 0x19 }, { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */ { STB0899_TSULC , 0x42 }, { STB0899_RSLLC , 0x41 }, { STB0899_TSLPL , 0x12 }, { STB0899_TSCFGH , 0x0c }, { STB0899_TSCFGM , 0x00 }, { STB0899_TSCFGL , 0x00 }, { STB0899_TSOUT , 0x69 }, /* 0x0d for CAM */ { STB0899_RSSYNCDEL , 0x00 }, { STB0899_TSINHDELH , 0x02 }, { STB0899_TSINHDELM , 0x00 }, { STB0899_TSINHDELL , 0x00 }, { STB0899_TSLLSTKM , 0x1b }, { STB0899_TSLLSTKL , 0xb3 }, { STB0899_TSULSTKM , 0x00 }, { STB0899_TSULSTKL , 0x00 }, { STB0899_PCKLENUL , 0xbc }, { STB0899_PCKLENLL , 0xcc }, { STB0899_RSPCKLEN , 0xbd }, { STB0899_TSSTATUS , 0x90 }, { STB0899_ERRCTRL1 , 0xb6 }, { STB0899_ERRCTRL2 , 0x95 }, { STB0899_ERRCTRL3 , 0x8d }, { STB0899_DMONMSK1 , 0x27 }, { STB0899_DMONMSK0 , 0x03 }, { STB0899_DEMAPVIT , 0x5c }, { STB0899_PLPARM , 0x19 }, { STB0899_PDELCTRL , 0x48 }, { STB0899_PDELCTRL2 , 0x00 }, { STB0899_BBHCTRL1 , 0x00 }, { STB0899_BBHCTRL2 , 0x00 }, { STB0899_HYSTTHRESH , 0x77 }, { STB0899_MATCSTM , 0x00 }, { STB0899_MATCSTL , 0x00 }, { STB0899_UPLCSTM , 0x00 }, { STB0899_UPLCSTL , 0x00 }, { STB0899_DFLCSTM , 0x00 }, { STB0899_DFLCSTL , 0x00 }, { STB0899_SYNCCST , 0x00 }, { STB0899_SYNCDCSTM , 0x00 }, { STB0899_SYNCDCSTL , 0x00 }, { STB0899_ISI_ENTRY , 0x00 }, { STB0899_ISI_BIT_EN , 0x00 }, { STB0899_MATSTRM , 0xf0 }, { STB0899_MATSTRL , 0x02 }, { STB0899_UPLSTRM , 0x45 }, { STB0899_UPLSTRL , 0x60 }, { STB0899_DFLSTRM , 0xe3 }, { STB0899_DFLSTRL , 0x00 }, { STB0899_SYNCSTR , 0x47 }, { STB0899_SYNCDSTRM , 0x05 }, { STB0899_SYNCDSTRL , 0x18 }, { STB0899_CFGPDELSTATUS1 , 0x19 }, { STB0899_CFGPDELSTATUS2 , 0x2b }, { STB0899_BBFERRORM , 0x00 }, { STB0899_BBFERRORL , 0x01 }, { STB0899_UPKTERRORM , 0x00 }, { STB0899_UPKTERRORL , 0x00 }, { 0xffff , 0xff }, }; struct stb0899_config az6027_stb0899_config = { .init_dev = az6027_stb0899_s1_init_1, .init_s2_demod = stb0899_s2_init_2, .init_s1_demod = az6027_stb0899_s1_init_3, .init_s2_fec = stb0899_s2_init_4, .init_tst = stb0899_s1_init_5, .demod_address = 0xd0, /* 0x68, 0xd0 >> 1 */ .xtal_freq = 27000000, .inversion = IQ_SWAP_ON, /* 1 */ .lo_clk = 76500000, .hi_clk = 99000000, .esno_ave = STB0899_DVBS2_ESNO_AVE, .esno_quant = STB0899_DVBS2_ESNO_QUANT, .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE, .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE, .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD, .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ, .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK, .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF, .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT, .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS, .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET, .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS, .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER, .tuner_get_frequency = stb6100_get_frequency, .tuner_set_frequency = stb6100_set_frequency, .tuner_set_bandwidth = stb6100_set_bandwidth, .tuner_get_bandwidth = stb6100_get_bandwidth, .tuner_set_rfsiggain = NULL, }; struct stb6100_config az6027_stb6100_config = { .tuner_address = 0xc0, .refclock = 27000000, }; /* check for mutex FIXME */ int az6027_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret = -1; if (mutex_lock_interruptible(&d->usb_mutex)) return -EAGAIN; ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, USB_TYPE_VENDOR | USB_DIR_IN, value, index, b, blen, 2000); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EIO; } else ret = 0; deb_xfer("in: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index); debug_dump(b, blen, deb_xfer); mutex_unlock(&d->usb_mutex); return ret; } static int az6027_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index); debug_dump(b, blen, deb_xfer); if (mutex_lock_interruptible(&d->usb_mutex)) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), req, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, b, blen, 2000); if (ret != blen) { warn("usb out operation failed. (%d)", ret); mutex_unlock(&d->usb_mutex); return -EIO; } else{ mutex_unlock(&d->usb_mutex); return 0; } } static int az6027_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { int ret; u8 req; u16 value; u16 index; int blen; deb_info("%s %d", __func__, onoff); req = 0xBC; value = onoff; index = 0; blen = 0; ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) warn("usb out operation failed. (%d)", ret); return ret; } /* keys for the enclosed remote control */ static struct rc_map_table rc_map_az6027_table[] = { { 0x01, KEY_1 }, { 0x02, KEY_2 }, }; /* remote control stuff (does not work with my box) */ static int az6027_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { return 0; } /* int az6027_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 v = onoff; return az6027_usb_out_op(d,0xBC,v,3,NULL,1); } */ static int az6027_ci_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value; u16 index; int blen; u8 *b; if (slot != 0) return -EINVAL; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC1; value = address; index = 0; blen = 1; ret = az6027_usb_in_op(d, req, value, index, b, blen); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EINVAL; } else { ret = b[0]; } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static int az6027_ci_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value1; u16 index; int blen; deb_info("%s %d", __func__, slot); if (slot != 0) return -EINVAL; mutex_lock(&state->ca_mutex); req = 0xC2; value1 = address; index = value; blen = 0; ret = az6027_usb_out_op(d, req, value1, index, NULL, blen); if (ret != 0) warn("usb out operation failed. (%d)", ret); mutex_unlock(&state->ca_mutex); return ret; } static int az6027_ci_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value; u16 index; int blen; u8 *b; if (slot != 0) return -EINVAL; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC3; value = address; index = 0; blen = 2; ret = az6027_usb_in_op(d, req, value, index, b, blen); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EINVAL; } else { if (b[0] == 0) warn("Read CI IO error"); ret = b[1]; deb_info("read cam data = %x from 0x%x", b[1], value); } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static int az6027_ci_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value1; u16 index; int blen; if (slot != 0) return -EINVAL; mutex_lock(&state->ca_mutex); req = 0xC4; value1 = address; index = value; blen = 0; ret = az6027_usb_out_op(d, req, value1, index, NULL, blen); if (ret != 0) { warn("usb out operation failed. (%d)", ret); goto failed; } failed: mutex_unlock(&state->ca_mutex); return ret; } static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; int ret; u8 req; u16 value; u16 index; int blen; u8 *b; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; req = 0xC8; value = 0; index = 0; blen = 1; ret = az6027_usb_in_op(d, req, value, index, b, blen); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EIO; } else{ ret = b[0]; } kfree(b); return ret; } static int az6027_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret, i; u8 req; u16 value; u16 index; int blen; mutex_lock(&state->ca_mutex); req = 0xC6; value = 1; index = 0; blen = 0; ret = az6027_usb_out_op(d, req, value, index, NULL, blen); if (ret != 0) { warn("usb out operation failed. (%d)", ret); goto failed; } msleep(500); req = 0xC6; value = 0; index = 0; blen = 0; ret = az6027_usb_out_op(d, req, value, index, NULL, blen); if (ret != 0) { warn("usb out operation failed. (%d)", ret); goto failed; } for (i = 0; i < 15; i++) { msleep(100); if (CI_CamReady(ca, slot)) { deb_info("CAM Ready"); break; } } msleep(5000); failed: mutex_unlock(&state->ca_mutex); return ret; } static int az6027_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot) { return 0; } static int az6027_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value; u16 index; int blen; deb_info("%s", __func__); mutex_lock(&state->ca_mutex); req = 0xC7; value = 1; index = 0; blen = 0; ret = az6027_usb_out_op(d, req, value, index, NULL, blen); if (ret != 0) { warn("usb out operation failed. (%d)", ret); goto failed; } failed: mutex_unlock(&state->ca_mutex); return ret; } static int az6027_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) { struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; u8 req; u16 value; u16 index; int blen; u8 *b; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC5; value = 0; index = 0; blen = 1; ret = az6027_usb_in_op(d, req, value, index, b, blen); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EIO; } else ret = 0; if (!ret && b[0] == 1) { ret = DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static void az6027_ci_uninit(struct dvb_usb_device *d) { struct az6027_device_state *state; deb_info("%s", __func__); if (NULL == d) return; state = (struct az6027_device_state *)d->priv; if (NULL == state) return; if (NULL == state->ca.data) return; dvb_ca_en50221_release(&state->ca); memset(&state->ca, 0, sizeof(state->ca)); } static int az6027_ci_init(struct dvb_usb_adapter *a) { struct dvb_usb_device *d = a->dev; struct az6027_device_state *state = (struct az6027_device_state *)d->priv; int ret; deb_info("%s", __func__); mutex_init(&state->ca_mutex); state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = az6027_ci_read_attribute_mem; state->ca.write_attribute_mem = az6027_ci_write_attribute_mem; state->ca.read_cam_control = az6027_ci_read_cam_control; state->ca.write_cam_control = az6027_ci_write_cam_control; state->ca.slot_reset = az6027_ci_slot_reset; state->ca.slot_shutdown = az6027_ci_slot_shutdown; state->ca.slot_ts_enable = az6027_ci_slot_ts_enable; state->ca.poll_slot_status = az6027_ci_poll_slot_status; state->ca.data = d; ret = dvb_ca_en50221_init(&a->dvb_adap, &state->ca, 0, /* flags */ 1);/* n_slots */ if (ret != 0) { err("Cannot initialize CI: Error %d.", ret); memset(&state->ca, 0, sizeof(state->ca)); return ret; } deb_info("CI initialized."); return 0; } /* static int az6027_read_mac_addr(struct dvb_usb_device *d, u8 mac[6]) { az6027_usb_in_op(d, 0xb7, 6, 0, &mac[0], 6); return 0; } */ static int az6027_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { u8 buf; struct dvb_usb_adapter *adap = fe->dvb->priv; struct i2c_msg i2c_msg = { .addr = 0x99, .flags = 0, .buf = &buf, .len = 1 }; /* * 2 --18v * 1 --13v * 0 --off */ switch (voltage) { case SEC_VOLTAGE_13: buf = 1; i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1); break; case SEC_VOLTAGE_18: buf = 2; i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1); break; case SEC_VOLTAGE_OFF: buf = 0; i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1); break; default: return -EINVAL; } return 0; } static int az6027_frontend_poweron(struct dvb_usb_adapter *adap) { int ret; u8 req; u16 value; u16 index; int blen; req = 0xBC; value = 1; /* power on */ index = 3; blen = 0; ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) return -EIO; return 0; } static int az6027_frontend_reset(struct dvb_usb_adapter *adap) { int ret; u8 req; u16 value; u16 index; int blen; /* reset demodulator */ req = 0xC0; value = 1; /* high */ index = 3; blen = 0; ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) return -EIO; req = 0xC0; value = 0; /* low */ index = 3; blen = 0; msleep_interruptible(200); ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) return -EIO; msleep_interruptible(200); req = 0xC0; value = 1; /*high */ index = 3; blen = 0; ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) return -EIO; msleep_interruptible(200); return 0; } static int az6027_frontend_tsbypass(struct dvb_usb_adapter *adap, int onoff) { int ret; u8 req; u16 value; u16 index; int blen; /* TS passthrough */ req = 0xC7; value = onoff; index = 0; blen = 0; ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); if (ret != 0) return -EIO; return 0; } static int az6027_frontend_attach(struct dvb_usb_adapter *adap) { az6027_frontend_poweron(adap); az6027_frontend_reset(adap); deb_info("adap = %p, dev = %p\n", adap, adap->dev); adap->fe_adap[0].fe = stb0899_attach(&az6027_stb0899_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) { deb_info("found STB0899 DVB-S/DVB-S2 frontend @0x%02x", az6027_stb0899_config.demod_address); if (stb6100_attach(adap->fe_adap[0].fe, &az6027_stb6100_config, &adap->dev->i2c_adap)) { deb_info("found STB6100 DVB-S/DVB-S2 frontend @0x%02x", az6027_stb6100_config.tuner_address); adap->fe_adap[0].fe->ops.set_voltage = az6027_set_voltage; az6027_ci_init(adap); } else { adap->fe_adap[0].fe = NULL; } } else warn("no front-end attached\n"); az6027_frontend_tsbypass(adap, 0); return 0; } static struct dvb_usb_device_properties az6027_properties; static void az6027_usb_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); az6027_ci_uninit(d); dvb_usb_device_exit(intf); } static int az6027_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return dvb_usb_device_init(intf, &az6027_properties, THIS_MODULE, NULL, adapter_nr); } /* I2C */ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i = 0, j = 0, len = 0; u16 index; u16 value; int length; u8 req; u8 *data; data = kmalloc(256, GFP_KERNEL); if (!data) return -ENOMEM; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) { kfree(data); return -EAGAIN; } if (num > 2) warn("more than 2 i2c messages at a time is not handled yet. TODO."); for (i = 0; i < num; i++) { if (msg[i].addr == 0x99) { req = 0xBE; index = 0; value = msg[i].buf[0] & 0x00ff; length = 1; az6027_usb_out_op(d, req, value, index, data, length); } if (msg[i].addr == 0xd0) { /* write/read request */ if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) { req = 0xB9; index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff)); value = msg[i].addr + (msg[i].len << 8); length = msg[i + 1].len + 6; az6027_usb_in_op(d, req, value, index, data, length); len = msg[i + 1].len; for (j = 0; j < len; j++) msg[i + 1].buf[j] = data[j + 5]; i++; } else { /* demod 16bit addr */ req = 0xBD; index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff)); value = msg[i].addr + (2 << 8); length = msg[i].len - 2; len = msg[i].len - 2; for (j = 0; j < len; j++) data[j] = msg[i].buf[j + 2]; az6027_usb_out_op(d, req, value, index, data, length); } } if (msg[i].addr == 0xc0) { if (msg[i].flags & I2C_M_RD) { req = 0xB9; index = 0x0; value = msg[i].addr; length = msg[i].len + 6; az6027_usb_in_op(d, req, value, index, data, length); len = msg[i].len; for (j = 0; j < len; j++) msg[i].buf[j] = data[j + 5]; } else { req = 0xBD; index = msg[i].buf[0] & 0x00FF; value = msg[i].addr + (1 << 8); length = msg[i].len - 1; len = msg[i].len - 1; for (j = 0; j < len; j++) data[j] = msg[i].buf[j + 1]; az6027_usb_out_op(d, req, value, index, data, length); } } } mutex_unlock(&d->i2c_mutex); kfree(data); return i; } static u32 az6027_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm az6027_i2c_algo = { .master_xfer = az6027_i2c_xfer, .functionality = az6027_i2c_func, }; int az6027_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { u8 *b; s16 ret; b = kmalloc(16, GFP_KERNEL); if (!b) return -ENOMEM; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0xb7, USB_TYPE_VENDOR | USB_DIR_IN, 6, 0, b, 6, USB_CTRL_GET_TIMEOUT); *cold = ret <= 0; kfree(b); deb_info("cold: %d\n", *cold); return 0; } static struct usb_device_id az6027_usb_table[] = { { USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_AZ6027) }, { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V1) }, { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V2) }, { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V1) }, { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V2) }, { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_SAT) }, { }, }; MODULE_DEVICE_TABLE(usb, az6027_usb_table); static struct dvb_usb_device_properties az6027_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-az6027-03.fw", .no_reconnect = 1, .size_of_priv = sizeof(struct az6027_device_state), .identify_state = az6027_identify_state, .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = az6027_streaming_ctrl, .frontend_attach = az6027_frontend_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 10, .endpoint = 0x02, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, /* .power_ctrl = az6027_power_ctrl, .read_mac_address = az6027_read_mac_addr, */ .rc.legacy = { .rc_map_table = rc_map_az6027_table, .rc_map_size = ARRAY_SIZE(rc_map_az6027_table), .rc_interval = 400, .rc_query = az6027_rc_query, }, .i2c_algo = &az6027_i2c_algo, .num_device_descs = 6, .devices = { { .name = "AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)", .cold_ids = { &az6027_usb_table[0], NULL }, .warm_ids = { NULL }, }, { .name = "TERRATEC S7", .cold_ids = { &az6027_usb_table[1], NULL }, .warm_ids = { NULL }, }, { .name = "TERRATEC S7 MKII", .cold_ids = { &az6027_usb_table[2], NULL }, .warm_ids = { NULL }, }, { .name = "Technisat SkyStar USB 2 HD CI", .cold_ids = { &az6027_usb_table[3], NULL }, .warm_ids = { NULL }, }, { .name = "Technisat SkyStar USB 2 HD CI", .cold_ids = { &az6027_usb_table[4], NULL }, .warm_ids = { NULL }, }, { .name = "Elgato EyeTV Sat", .cold_ids = { &az6027_usb_table[5], NULL }, .warm_ids = { NULL }, }, { NULL }, } }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver az6027_usb_driver = { .name = "dvb_usb_az6027", .probe = az6027_usb_probe, .disconnect = az6027_usb_disconnect, .id_table = az6027_usb_table, }; module_usb_driver(az6027_usb_driver); MODULE_AUTHOR("Adams Xu <Adams.xu@azwave.com.cn>"); MODULE_DESCRIPTION("Driver for AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
darkknight1812/d851_kernel
drivers/watchdog/alim7101_wdt.c
7382
10763
/* * ALi M7101 PMU Computer Watchdog Timer driver * * Based on w83877f_wdt.c by Scott Jennings <linuxdrivers@oro.net> * and the Cobalt kernel WDT timer driver by Tim Hockin * <thockin@cobaltnet.com> * * (c)2002 Steve Hill <steve@navaho.co.uk> * * This WDT driver is different from most other Linux WDT * drivers in that the driver will ping the watchdog by itself, * because this particular WDT has a very short timeout (1.6 * seconds) and it would be insane to count on any userspace * daemon always getting scheduled within that time frame. * * Additions: * Aug 23, 2004 - Added use_gpio module parameter for use on revision a1d PMUs * found on very old cobalt hardware. * -- Mike Waychison <michael.waychison@sun.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/ioport.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/uaccess.h> #define WDT_ENABLE 0x9C #define WDT_DISABLE 0x8C #define ALI_7101_WDT 0x92 #define ALI_7101_GPIO 0x7D #define ALI_7101_GPIO_O 0x7E #define ALI_WDT_ARM 0x01 /* * We're going to use a 1 second timeout. * If we reset the watchdog every ~250ms we should be safe. */ #define WDT_INTERVAL (HZ/4+1) /* * We must not require too good response from the userspace daemon. * Here we require the userspace daemon to send us a heartbeat * char to /dev/watchdog every 30 seconds. */ #define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */ /* in seconds, will be multiplied by HZ to get seconds to wait for a ping */ static int timeout = WATCHDOG_TIMEOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); static int use_gpio; /* Use the pic (for a1d revision alim7101) */ module_param(use_gpio, int, 0); MODULE_PARM_DESC(use_gpio, "Use the gpio watchdog (required by old cobalt boards)."); static void wdt_timer_ping(unsigned long); static DEFINE_TIMER(timer, wdt_timer_ping, 0, 1); static unsigned long next_heartbeat; static unsigned long wdt_is_open; static char wdt_expect_close; static struct pci_dev *alim7101_pmu; static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * Whack the dog */ static void wdt_timer_ping(unsigned long data) { /* If we got a heartbeat pulse within the WDT_US_INTERVAL * we agree to ping the WDT */ char tmp; if (time_before(jiffies, next_heartbeat)) { /* Ping the WDT (this is actually a disarm/arm sequence) */ pci_read_config_byte(alim7101_pmu, 0x92, &tmp); pci_write_config_byte(alim7101_pmu, ALI_7101_WDT, (tmp & ~ALI_WDT_ARM)); pci_write_config_byte(alim7101_pmu, ALI_7101_WDT, (tmp | ALI_WDT_ARM)); if (use_gpio) { pci_read_config_byte(alim7101_pmu, ALI_7101_GPIO_O, &tmp); pci_write_config_byte(alim7101_pmu, ALI_7101_GPIO_O, tmp | 0x20); pci_write_config_byte(alim7101_pmu, ALI_7101_GPIO_O, tmp & ~0x20); } } else { pr_warn("Heartbeat lost! Will not ping the watchdog\n"); } /* Re-set the timer interval */ mod_timer(&timer, jiffies + WDT_INTERVAL); } /* * Utility routines */ static void wdt_change(int writeval) { char tmp; pci_read_config_byte(alim7101_pmu, ALI_7101_WDT, &tmp); if (writeval == WDT_ENABLE) { pci_write_config_byte(alim7101_pmu, ALI_7101_WDT, (tmp | ALI_WDT_ARM)); if (use_gpio) { pci_read_config_byte(alim7101_pmu, ALI_7101_GPIO_O, &tmp); pci_write_config_byte(alim7101_pmu, ALI_7101_GPIO_O, tmp & ~0x20); } } else { pci_write_config_byte(alim7101_pmu, ALI_7101_WDT, (tmp & ~ALI_WDT_ARM)); if (use_gpio) { pci_read_config_byte(alim7101_pmu, ALI_7101_GPIO_O, &tmp); pci_write_config_byte(alim7101_pmu, ALI_7101_GPIO_O, tmp | 0x20); } } } static void wdt_startup(void) { next_heartbeat = jiffies + (timeout * HZ); /* We must enable before we kick off the timer in case the timer occurs as we ping it */ wdt_change(WDT_ENABLE); /* Start the timer */ mod_timer(&timer, jiffies + WDT_INTERVAL); pr_info("Watchdog timer is now enabled\n"); } static void wdt_turnoff(void) { /* Stop the timer */ del_timer_sync(&timer); wdt_change(WDT_DISABLE); pr_info("Watchdog timer is now disabled...\n"); } static void wdt_keepalive(void) { /* user land ping */ next_heartbeat = jiffies + (timeout * HZ); } /* * /dev/watchdog handling */ static ssize_t fop_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (count) { if (!nowayout) { size_t ofs; /* note: just in case someone wrote the magic character * five months ago... */ wdt_expect_close = 0; /* now scan */ for (ofs = 0; ofs != count; ofs++) { char c; if (get_user(c, buf + ofs)) return -EFAULT; if (c == 'V') wdt_expect_close = 42; } } /* someone wrote to us, we should restart timer */ wdt_keepalive(); } return count; } static int fop_open(struct inode *inode, struct file *file) { /* Just in case we're already talking to someone... */ if (test_and_set_bit(0, &wdt_is_open)) return -EBUSY; /* Good, fire up the show */ wdt_startup(); return nonseekable_open(inode, file); } static int fop_close(struct inode *inode, struct file *file) { if (wdt_expect_close == 42) wdt_turnoff(); else { /* wim: shouldn't there be a: del_timer(&timer); */ pr_crit("device file closed unexpectedly. Will not stop the WDT!\n"); } clear_bit(0, &wdt_is_open); wdt_expect_close = 0; return 0; } static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "ALiM7101", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { wdt_turnoff(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { wdt_startup(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: wdt_keepalive(); return 0; case WDIOC_SETTIMEOUT: { int new_timeout; if (get_user(new_timeout, p)) return -EFAULT; /* arbitrary upper limit */ if (new_timeout < 1 || new_timeout > 3600) return -EINVAL; timeout = new_timeout; wdt_keepalive(); /* Fall through */ } case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: return -ENOTTY; } } static const struct file_operations wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = fop_write, .open = fop_open, .release = fop_close, .unlocked_ioctl = fop_ioctl, }; static struct miscdevice wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdt_fops, }; /* * Notifier for system down */ static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdt_turnoff(); if (code == SYS_RESTART) { /* * Cobalt devices have no way of rebooting themselves other * than getting the watchdog to pull reset, so we restart the * watchdog on reboot with no heartbeat */ wdt_change(WDT_ENABLE); pr_info("Watchdog timer is now enabled with no heartbeat - should reboot in ~1 second\n"); } return NOTIFY_DONE; } /* * The WDT needs to learn about soft shutdowns in order to * turn the timebomb registers off. */ static struct notifier_block wdt_notifier = { .notifier_call = wdt_notify_sys, }; static void __exit alim7101_wdt_unload(void) { wdt_turnoff(); /* Deregister */ misc_deregister(&wdt_miscdev); unregister_reboot_notifier(&wdt_notifier); pci_dev_put(alim7101_pmu); } static int __init alim7101_wdt_init(void) { int rc = -EBUSY; struct pci_dev *ali1543_south; char tmp; pr_info("Steve Hill <steve@navaho.co.uk>\n"); alim7101_pmu = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL); if (!alim7101_pmu) { pr_info("ALi M7101 PMU not present - WDT not set\n"); return -EBUSY; } /* Set the WDT in the PMU to 1 second */ pci_write_config_byte(alim7101_pmu, ALI_7101_WDT, 0x02); ali1543_south = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); if (!ali1543_south) { pr_info("ALi 1543 South-Bridge not present - WDT not set\n"); goto err_out; } pci_read_config_byte(ali1543_south, 0x5e, &tmp); pci_dev_put(ali1543_south); if ((tmp & 0x1e) == 0x00) { if (!use_gpio) { pr_info("Detected old alim7101 revision 'a1d'. If this is a cobalt board, set the 'use_gpio' module parameter.\n"); goto err_out; } nowayout = 1; } else if ((tmp & 0x1e) != 0x12 && (tmp & 0x1e) != 0x00) { pr_info("ALi 1543 South-Bridge does not have the correct revision number (???1001?) - WDT not set\n"); goto err_out; } if (timeout < 1 || timeout > 3600) { /* arbitrary upper limit */ timeout = WATCHDOG_TIMEOUT; pr_info("timeout value must be 1 <= x <= 3600, using %d\n", timeout); } rc = register_reboot_notifier(&wdt_notifier); if (rc) { pr_err("cannot register reboot notifier (err=%d)\n", rc); goto err_out; } rc = misc_register(&wdt_miscdev); if (rc) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", wdt_miscdev.minor, rc); goto err_out_reboot; } if (nowayout) __module_get(THIS_MODULE); pr_info("WDT driver for ALi M7101 initialised. timeout=%d sec (nowayout=%d)\n", timeout, nowayout); return 0; err_out_reboot: unregister_reboot_notifier(&wdt_notifier); err_out: pci_dev_put(alim7101_pmu); return rc; } module_init(alim7101_wdt_init); module_exit(alim7101_wdt_unload); static DEFINE_PCI_DEVICE_TABLE(alim7101_pci_tbl) __used = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) }, { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, { } }; MODULE_DEVICE_TABLE(pci, alim7101_pci_tbl); MODULE_AUTHOR("Steve Hill"); MODULE_DESCRIPTION("ALi M7101 PMU Computer Watchdog Timer driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
MROM/android_kernel_bn_encore
arch/sh/drivers/dma/dma-api.c
7894
9355
/* * arch/sh/drivers/dma/dma-api.c * * SuperH-specific DMA management API * * Copyright (C) 2003, 2004, 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/dma.h> DEFINE_SPINLOCK(dma_spin_lock); static LIST_HEAD(registered_dmac_list); struct dma_info *get_dma_info(unsigned int chan) { struct dma_info *info; /* * Look for each DMAC's range to determine who the owner of * the channel is. */ list_for_each_entry(info, &registered_dmac_list, list) { if ((chan < info->first_vchannel_nr) || (chan >= info->first_vchannel_nr + info->nr_channels)) continue; return info; } return NULL; } EXPORT_SYMBOL(get_dma_info); struct dma_info *get_dma_info_by_name(const char *dmac_name) { struct dma_info *info; list_for_each_entry(info, &registered_dmac_list, list) { if (dmac_name && (strcmp(dmac_name, info->name) != 0)) continue; else return info; } return NULL; } EXPORT_SYMBOL(get_dma_info_by_name); static unsigned int get_nr_channels(void) { struct dma_info *info; unsigned int nr = 0; if (unlikely(list_empty(&registered_dmac_list))) return nr; list_for_each_entry(info, &registered_dmac_list, list) nr += info->nr_channels; return nr; } struct dma_channel *get_dma_channel(unsigned int chan) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel; int i; if (unlikely(!info)) return ERR_PTR(-EINVAL); for (i = 0; i < info->nr_channels; i++) { channel = &info->channels[i]; if (channel->vchan == chan) return channel; } return NULL; } EXPORT_SYMBOL(get_dma_channel); int get_dma_residue(unsigned int chan) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); if (info->ops->get_residue) return info->ops->get_residue(channel); return 0; } EXPORT_SYMBOL(get_dma_residue); static int search_cap(const char **haystack, const char *needle) { const char **p; for (p = haystack; *p; p++) if (strcmp(*p, needle) == 0) return 1; return 0; } /** * request_dma_bycap - Allocate a DMA channel based on its capabilities * @dmac: List of DMA controllers to search * @caps: List of capabilities * * Search all channels of all DMA controllers to find a channel which * matches the requested capabilities. The result is the channel * number if a match is found, or %-ENODEV if no match is found. * * Note that not all DMA controllers export capabilities, in which * case they can never be allocated using this API, and so * request_dma() must be used specifying the channel number. */ int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id) { unsigned int found = 0; struct dma_info *info; const char **p; int i; BUG_ON(!dmac || !caps); list_for_each_entry(info, &registered_dmac_list, list) if (strcmp(*dmac, info->name) == 0) { found = 1; break; } if (!found) return -ENODEV; for (i = 0; i < info->nr_channels; i++) { struct dma_channel *channel = &info->channels[i]; if (unlikely(!channel->caps)) continue; for (p = caps; *p; p++) { if (!search_cap(channel->caps, *p)) break; if (request_dma(channel->chan, dev_id) == 0) return channel->chan; } } return -EINVAL; } EXPORT_SYMBOL(request_dma_bycap); int dmac_search_free_channel(const char *dev_id) { struct dma_channel *channel = { 0 }; struct dma_info *info = get_dma_info(0); int i; for (i = 0; i < info->nr_channels; i++) { channel = &info->channels[i]; if (unlikely(!channel)) return -ENODEV; if (atomic_read(&channel->busy) == 0) break; } if (info->ops->request) { int result = info->ops->request(channel); if (result) return result; atomic_set(&channel->busy, 1); return channel->chan; } return -ENOSYS; } int request_dma(unsigned int chan, const char *dev_id) { struct dma_channel *channel = { 0 }; struct dma_info *info = get_dma_info(chan); int result; channel = get_dma_channel(chan); if (atomic_xchg(&channel->busy, 1)) return -EBUSY; strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id)); if (info->ops->request) { result = info->ops->request(channel); if (result) atomic_set(&channel->busy, 0); return result; } return 0; } EXPORT_SYMBOL(request_dma); void free_dma(unsigned int chan) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); if (info->ops->free) info->ops->free(channel); atomic_set(&channel->busy, 0); } EXPORT_SYMBOL(free_dma); void dma_wait_for_completion(unsigned int chan) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); if (channel->flags & DMA_TEI_CAPABLE) { wait_event(channel->wait_queue, (info->ops->get_residue(channel) == 0)); return; } while (info->ops->get_residue(channel)) cpu_relax(); } EXPORT_SYMBOL(dma_wait_for_completion); int register_chan_caps(const char *dmac, struct dma_chan_caps *caps) { struct dma_info *info; unsigned int found = 0; int i; list_for_each_entry(info, &registered_dmac_list, list) if (strcmp(dmac, info->name) == 0) { found = 1; break; } if (unlikely(!found)) return -ENODEV; for (i = 0; i < info->nr_channels; i++, caps++) { struct dma_channel *channel; if ((info->first_channel_nr + i) != caps->ch_num) return -EINVAL; channel = &info->channels[i]; channel->caps = caps->caplist; } return 0; } EXPORT_SYMBOL(register_chan_caps); void dma_configure_channel(unsigned int chan, unsigned long flags) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); if (info->ops->configure) info->ops->configure(channel, flags); } EXPORT_SYMBOL(dma_configure_channel); int dma_xfer(unsigned int chan, unsigned long from, unsigned long to, size_t size, unsigned int mode) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); channel->sar = from; channel->dar = to; channel->count = size; channel->mode = mode; return info->ops->xfer(channel); } EXPORT_SYMBOL(dma_xfer); int dma_extend(unsigned int chan, unsigned long op, void *param) { struct dma_info *info = get_dma_info(chan); struct dma_channel *channel = get_dma_channel(chan); if (info->ops->extend) return info->ops->extend(channel, op, param); return -ENOSYS; } EXPORT_SYMBOL(dma_extend); static int dma_read_proc(char *buf, char **start, off_t off, int len, int *eof, void *data) { struct dma_info *info; char *p = buf; if (list_empty(&registered_dmac_list)) return 0; /* * Iterate over each registered DMAC */ list_for_each_entry(info, &registered_dmac_list, list) { int i; /* * Iterate over each channel */ for (i = 0; i < info->nr_channels; i++) { struct dma_channel *channel = info->channels + i; if (!(channel->flags & DMA_CONFIGURED)) continue; p += sprintf(p, "%2d: %14s %s\n", i, info->name, channel->dev_id); } } return p - buf; } int register_dmac(struct dma_info *info) { unsigned int total_channels, i; INIT_LIST_HEAD(&info->list); printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n", info->name, info->nr_channels, info->nr_channels > 1 ? "s" : ""); BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels); info->pdev = platform_device_register_simple(info->name, -1, NULL, 0); if (IS_ERR(info->pdev)) return PTR_ERR(info->pdev); /* * Don't touch pre-configured channels */ if (!(info->flags & DMAC_CHANNELS_CONFIGURED)) { unsigned int size; size = sizeof(struct dma_channel) * info->nr_channels; info->channels = kzalloc(size, GFP_KERNEL); if (!info->channels) return -ENOMEM; } total_channels = get_nr_channels(); info->first_vchannel_nr = total_channels; for (i = 0; i < info->nr_channels; i++) { struct dma_channel *chan = &info->channels[i]; atomic_set(&chan->busy, 0); chan->chan = info->first_channel_nr + i; chan->vchan = info->first_channel_nr + i + total_channels; memcpy(chan->dev_id, "Unused", 7); if (info->flags & DMAC_CHANNELS_TEI_CAPABLE) chan->flags |= DMA_TEI_CAPABLE; init_waitqueue_head(&chan->wait_queue); dma_create_sysfs_files(chan, info); } list_add(&info->list, &registered_dmac_list); return 0; } EXPORT_SYMBOL(register_dmac); void unregister_dmac(struct dma_info *info) { unsigned int i; for (i = 0; i < info->nr_channels; i++) dma_remove_sysfs_files(info->channels + i, info); if (!(info->flags & DMAC_CHANNELS_CONFIGURED)) kfree(info->channels); list_del(&info->list); platform_device_unregister(info->pdev); } EXPORT_SYMBOL(unregister_dmac); static int __init dma_api_init(void) { printk(KERN_NOTICE "DMA: Registering DMA API.\n"); return create_proc_read_entry("dma", 0, 0, dma_read_proc, 0) ? 0 : -ENOMEM; } subsys_initcall(dma_api_init); MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); MODULE_DESCRIPTION("DMA API for SuperH"); MODULE_LICENSE("GPL");
gpl-2.0
RaymanFX/kernel_samsung_lt03wifi
fs/btrfs/tree-defrag.c
8150
3604
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> #include "ctree.h" #include "disk-io.h" #include "print-tree.h" #include "transaction.h" #include "locking.h" /* defrag all the leaves in a given btree. If cache_only == 1, don't read * things from disk, otherwise read all the leaves and try to get key order to * better reflect disk order */ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, struct btrfs_root *root, int cache_only) { struct btrfs_path *path = NULL; struct btrfs_key key; int ret = 0; int wret; int level; int is_extent = 0; int next_key_ret = 0; u64 last_ret = 0; u64 min_trans = 0; if (cache_only) goto out; if (root->fs_info->extent_root == root) { /* * there's recursion here right now in the tree locking, * we can't defrag the extent root without deadlock */ goto out; } if (root->ref_cows == 0 && !is_extent) goto out; if (btrfs_test_opt(root, SSD)) goto out; path = btrfs_alloc_path(); if (!path) return -ENOMEM; level = btrfs_header_level(root->node); if (level == 0) goto out; if (root->defrag_progress.objectid == 0) { struct extent_buffer *root_node; u32 nritems; root_node = btrfs_lock_root_node(root); btrfs_set_lock_blocking(root_node); nritems = btrfs_header_nritems(root_node); root->defrag_max.objectid = 0; /* from above we know this is not a leaf */ btrfs_node_key_to_cpu(root_node, &root->defrag_max, nritems - 1); btrfs_tree_unlock(root_node); free_extent_buffer(root_node); memset(&key, 0, sizeof(key)); } else { memcpy(&key, &root->defrag_progress, sizeof(key)); } path->keep_locks = 1; if (cache_only) min_trans = root->defrag_trans_start; ret = btrfs_search_forward(root, &key, NULL, path, cache_only, min_trans); if (ret < 0) goto out; if (ret > 0) { ret = 0; goto out; } btrfs_release_path(path); wret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (wret < 0) { ret = wret; goto out; } if (!path->nodes[1]) { ret = 0; goto out; } path->slots[1] = btrfs_header_nritems(path->nodes[1]); next_key_ret = btrfs_find_next_key(root, path, &key, 1, cache_only, min_trans); ret = btrfs_realloc_node(trans, root, path->nodes[1], 0, cache_only, &last_ret, &root->defrag_progress); if (ret) { WARN_ON(ret == -EAGAIN); goto out; } if (next_key_ret == 0) { memcpy(&root->defrag_progress, &key, sizeof(key)); ret = -EAGAIN; } out: if (path) btrfs_free_path(path); if (ret == -EAGAIN) { if (root->defrag_max.objectid > root->defrag_progress.objectid) goto done; if (root->defrag_max.type > root->defrag_progress.type) goto done; if (root->defrag_max.offset > root->defrag_progress.offset) goto done; ret = 0; } done: if (ret != -EAGAIN) { memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); root->defrag_trans_start = trans->transid; } return ret; }
gpl-2.0
WhiteNeo-/NeoKernel-L
drivers/net/wireless/b43/debugfs.c
9174
19466
/* Broadcom B43 wireless driver debugfs driver debugging code Copyright (c) 2005-2007 Michael Buesch <m@bues.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/mutex.h> #include "b43.h" #include "main.h" #include "debugfs.h" #include "dma.h" #include "xmit.h" /* The root directory. */ static struct dentry *rootdir; struct b43_debugfs_fops { ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize); int (*write)(struct b43_wldev *dev, const char *buf, size_t count); struct file_operations fops; /* Offset of struct b43_dfs_file in struct b43_dfsentry */ size_t file_struct_offset; }; static inline struct b43_dfs_file *fops_to_dfs_file(struct b43_wldev *dev, const struct b43_debugfs_fops *dfops) { void *p; p = dev->dfsentry; p += dfops->file_struct_offset; return p; } #define fappend(fmt, x...) \ do { \ if (bufsize - count) \ count += snprintf(buf + count, \ bufsize - count, \ fmt , ##x); \ else \ printk(KERN_ERR "b43: fappend overflow\n"); \ } while (0) /* The biggest address values for SHM access from the debugfs files. */ #define B43_MAX_SHM_ROUTING 4 #define B43_MAX_SHM_ADDR 0xFFFF static ssize_t shm16read__read_file(struct b43_wldev *dev, char *buf, size_t bufsize) { ssize_t count = 0; unsigned int routing, addr; u16 val; routing = dev->dfsentry->shm16read_routing_next; addr = dev->dfsentry->shm16read_addr_next; if ((routing > B43_MAX_SHM_ROUTING) || (addr > B43_MAX_SHM_ADDR)) return -EDESTADDRREQ; val = b43_shm_read16(dev, routing, addr); fappend("0x%04X\n", val); return count; } static int shm16read__write_file(struct b43_wldev *dev, const char *buf, size_t count) { unsigned int routing, addr; int res; res = sscanf(buf, "0x%X 0x%X", &routing, &addr); if (res != 2) return -EINVAL; if (routing > B43_MAX_SHM_ROUTING) return -EADDRNOTAVAIL; if (addr > B43_MAX_SHM_ADDR) return -EADDRNOTAVAIL; if (routing == B43_SHM_SHARED) { if ((addr % 2) != 0) return -EADDRNOTAVAIL; } dev->dfsentry->shm16read_routing_next = routing; dev->dfsentry->shm16read_addr_next = addr; return 0; } static int shm16write__write_file(struct b43_wldev *dev, const char *buf, size_t count) { unsigned int routing, addr, mask, set; u16 val; int res; res = sscanf(buf, "0x%X 0x%X 0x%X 0x%X", &routing, &addr, &mask, &set); if (res != 4) return -EINVAL; if (routing > B43_MAX_SHM_ROUTING) return -EADDRNOTAVAIL; if (addr > B43_MAX_SHM_ADDR) return -EADDRNOTAVAIL; if (routing == B43_SHM_SHARED) { if ((addr % 2) != 0) return -EADDRNOTAVAIL; } if ((mask > 0xFFFF) || (set > 0xFFFF)) return -E2BIG; if (mask == 0) val = 0; else val = b43_shm_read16(dev, routing, addr); val &= mask; val |= set; b43_shm_write16(dev, routing, addr, val); return 0; } static ssize_t shm32read__read_file(struct b43_wldev *dev, char *buf, size_t bufsize) { ssize_t count = 0; unsigned int routing, addr; u32 val; routing = dev->dfsentry->shm32read_routing_next; addr = dev->dfsentry->shm32read_addr_next; if ((routing > B43_MAX_SHM_ROUTING) || (addr > B43_MAX_SHM_ADDR)) return -EDESTADDRREQ; val = b43_shm_read32(dev, routing, addr); fappend("0x%08X\n", val); return count; } static int shm32read__write_file(struct b43_wldev *dev, const char *buf, size_t count) { unsigned int routing, addr; int res; res = sscanf(buf, "0x%X 0x%X", &routing, &addr); if (res != 2) return -EINVAL; if (routing > B43_MAX_SHM_ROUTING) return -EADDRNOTAVAIL; if (addr > B43_MAX_SHM_ADDR) return -EADDRNOTAVAIL; if (routing == B43_SHM_SHARED) { if ((addr % 2) != 0) return -EADDRNOTAVAIL; } dev->dfsentry->shm32read_routing_next = routing; dev->dfsentry->shm32read_addr_next = addr; return 0; } static int shm32write__write_file(struct b43_wldev *dev, const char *buf, size_t count) { unsigned int routing, addr, mask, set; u32 val; int res; res = sscanf(buf, "0x%X 0x%X 0x%X 0x%X", &routing, &addr, &mask, &set); if (res != 4) return -EINVAL; if (routing > B43_MAX_SHM_ROUTING) return -EADDRNOTAVAIL; if (addr > B43_MAX_SHM_ADDR) return -EADDRNOTAVAIL; if (routing == B43_SHM_SHARED) { if ((addr % 2) != 0) return -EADDRNOTAVAIL; } if ((mask > 0xFFFFFFFF) || (set > 0xFFFFFFFF)) return -E2BIG; if (mask == 0) val = 0; else val = b43_shm_read32(dev, routing, addr); val &= mask; val |= set; b43_shm_write32(dev, routing, addr, val); return 0; } /* The biggest MMIO address that we allow access to from the debugfs files. */ #define B43_MAX_MMIO_ACCESS (0xF00 - 1) static ssize_t mmio16read__read_file(struct b43_wldev *dev, char *buf, size_t bufsize) { ssize_t count = 0; unsigned int addr; u16 val; addr = dev->dfsentry->mmio16read_next; if (addr > B43_MAX_MMIO_ACCESS) return -EDESTADDRREQ; val = b43_read16(dev, addr); fappend("0x%04X\n", val); return count; } static int mmio16read__write_file(struct b43_wldev *dev, const char *buf, size_t count) { unsigned int addr; int res; res = sscanf(buf, "0x%X", &addr); if (res != 1) return -EINVAL; if (addr > B43_MAX_MMIO_ACCESS) return -EADDRNOTAVAIL; if ((addr % 2) != 0) return -EINVAL; dev->dfsentry->mmio16read_next = addr; return 0; } static int mmio16write__write_file(struct b43_wldev *dev, const char *buf, size_t count) { unsigned int addr, mask, set; int res; u16 val; res = sscanf(buf, "0x%X 0x%X 0x%X", &addr, &mask, &set); if (res != 3) return -EINVAL; if (addr > B43_MAX_MMIO_ACCESS) return -EADDRNOTAVAIL; if ((mask > 0xFFFF) || (set > 0xFFFF)) return -E2BIG; if ((addr % 2) != 0) return -EINVAL; if (mask == 0) val = 0; else val = b43_read16(dev, addr); val &= mask; val |= set; b43_write16(dev, addr, val); return 0; } static ssize_t mmio32read__read_file(struct b43_wldev *dev, char *buf, size_t bufsize) { ssize_t count = 0; unsigned int addr; u32 val; addr = dev->dfsentry->mmio32read_next; if (addr > B43_MAX_MMIO_ACCESS) return -EDESTADDRREQ; val = b43_read32(dev, addr); fappend("0x%08X\n", val); return count; } static int mmio32read__write_file(struct b43_wldev *dev, const char *buf, size_t count) { unsigned int addr; int res; res = sscanf(buf, "0x%X", &addr); if (res != 1) return -EINVAL; if (addr > B43_MAX_MMIO_ACCESS) return -EADDRNOTAVAIL; if ((addr % 4) != 0) return -EINVAL; dev->dfsentry->mmio32read_next = addr; return 0; } static int mmio32write__write_file(struct b43_wldev *dev, const char *buf, size_t count) { unsigned int addr, mask, set; int res; u32 val; res = sscanf(buf, "0x%X 0x%X 0x%X", &addr, &mask, &set); if (res != 3) return -EINVAL; if (addr > B43_MAX_MMIO_ACCESS) return -EADDRNOTAVAIL; if ((mask > 0xFFFFFFFF) || (set > 0xFFFFFFFF)) return -E2BIG; if ((addr % 4) != 0) return -EINVAL; if (mask == 0) val = 0; else val = b43_read32(dev, addr); val &= mask; val |= set; b43_write32(dev, addr, val); return 0; } static ssize_t txstat_read_file(struct b43_wldev *dev, char *buf, size_t bufsize) { struct b43_txstatus_log *log = &dev->dfsentry->txstatlog; ssize_t count = 0; int i, idx; struct b43_txstatus *stat; if (log->end < 0) { fappend("Nothing transmitted, yet\n"); goto out; } fappend("b43 TX status reports:\n\n" "index | cookie | seq | phy_stat | frame_count | " "rts_count | supp_reason | pm_indicated | " "intermediate | for_ampdu | acked\n" "---\n"); i = log->end + 1; idx = 0; while (1) { if (i == B43_NR_LOGGED_TXSTATUS) i = 0; stat = &(log->log[i]); if (stat->cookie) { fappend("%03d | " "0x%04X | 0x%04X | 0x%02X | " "0x%X | 0x%X | " "%u | %u | " "%u | %u | %u\n", idx, stat->cookie, stat->seq, stat->phy_stat, stat->frame_count, stat->rts_count, stat->supp_reason, stat->pm_indicated, stat->intermediate, stat->for_ampdu, stat->acked); idx++; } if (i == log->end) break; i++; } out: return count; } static int restart_write_file(struct b43_wldev *dev, const char *buf, size_t count) { int err = 0; if (count > 0 && buf[0] == '1') { b43_controller_restart(dev, "manually restarted"); } else err = -EINVAL; return err; } static unsigned long calc_expire_secs(unsigned long now, unsigned long time, unsigned long expire) { expire = time + expire; if (time_after(now, expire)) return 0; /* expired */ if (expire < now) { /* jiffies wrapped */ expire -= MAX_JIFFY_OFFSET; now -= MAX_JIFFY_OFFSET; } B43_WARN_ON(expire < now); return (expire - now) / HZ; } static ssize_t loctls_read_file(struct b43_wldev *dev, char *buf, size_t bufsize) { ssize_t count = 0; struct b43_txpower_lo_control *lo; int i, err = 0; struct b43_lo_calib *cal; unsigned long now = jiffies; struct b43_phy *phy = &dev->phy; if (phy->type != B43_PHYTYPE_G) { fappend("Device is not a G-PHY\n"); err = -ENODEV; goto out; } lo = phy->g->lo_control; fappend("-- Local Oscillator calibration data --\n\n"); fappend("HW-power-control enabled: %d\n", dev->phy.hardware_power_control); fappend("TX Bias: 0x%02X, TX Magn: 0x%02X (expire in %lu sec)\n", lo->tx_bias, lo->tx_magn, calc_expire_secs(now, lo->txctl_measured_time, B43_LO_TXCTL_EXPIRE)); fappend("Power Vector: 0x%08X%08X (expires in %lu sec)\n", (unsigned int)((lo->power_vector & 0xFFFFFFFF00000000ULL) >> 32), (unsigned int)(lo->power_vector & 0x00000000FFFFFFFFULL), calc_expire_secs(now, lo->pwr_vec_read_time, B43_LO_PWRVEC_EXPIRE)); fappend("\nCalibrated settings:\n"); list_for_each_entry(cal, &lo->calib_list, list) { bool active; active = (b43_compare_bbatt(&cal->bbatt, &phy->g->bbatt) && b43_compare_rfatt(&cal->rfatt, &phy->g->rfatt)); fappend("BB(%d), RF(%d,%d) -> I=%d, Q=%d " "(expires in %lu sec)%s\n", cal->bbatt.att, cal->rfatt.att, cal->rfatt.with_padmix, cal->ctl.i, cal->ctl.q, calc_expire_secs(now, cal->calib_time, B43_LO_CALIB_EXPIRE), active ? " ACTIVE" : ""); } fappend("\nUsed RF attenuation values: Value(WithPadmix flag)\n"); for (i = 0; i < lo->rfatt_list.len; i++) { fappend("%u(%d), ", lo->rfatt_list.list[i].att, lo->rfatt_list.list[i].with_padmix); } fappend("\n"); fappend("\nUsed Baseband attenuation values:\n"); for (i = 0; i < lo->bbatt_list.len; i++) { fappend("%u, ", lo->bbatt_list.list[i].att); } fappend("\n"); out: return err ? err : count; } #undef fappend static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct b43_wldev *dev; struct b43_debugfs_fops *dfops; struct b43_dfs_file *dfile; ssize_t uninitialized_var(ret); char *buf; const size_t bufsize = 1024 * 16; /* 16 kiB buffer */ const size_t buforder = get_order(bufsize); int err = 0; if (!count) return 0; dev = file->private_data; if (!dev) return -ENODEV; mutex_lock(&dev->wl->mutex); if (b43_status(dev) < B43_STAT_INITIALIZED) { err = -ENODEV; goto out_unlock; } dfops = container_of(file->f_op, struct b43_debugfs_fops, fops); if (!dfops->read) { err = -ENOSYS; goto out_unlock; } dfile = fops_to_dfs_file(dev, dfops); if (!dfile->buffer) { buf = (char *)__get_free_pages(GFP_KERNEL, buforder); if (!buf) { err = -ENOMEM; goto out_unlock; } memset(buf, 0, bufsize); ret = dfops->read(dev, buf, bufsize); if (ret <= 0) { free_pages((unsigned long)buf, buforder); err = ret; goto out_unlock; } dfile->data_len = ret; dfile->buffer = buf; } ret = simple_read_from_buffer(userbuf, count, ppos, dfile->buffer, dfile->data_len); if (*ppos >= dfile->data_len) { free_pages((unsigned long)dfile->buffer, buforder); dfile->buffer = NULL; dfile->data_len = 0; } out_unlock: mutex_unlock(&dev->wl->mutex); return err ? err : ret; } static ssize_t b43_debugfs_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct b43_wldev *dev; struct b43_debugfs_fops *dfops; char *buf; int err = 0; if (!count) return 0; if (count > PAGE_SIZE) return -E2BIG; dev = file->private_data; if (!dev) return -ENODEV; mutex_lock(&dev->wl->mutex); if (b43_status(dev) < B43_STAT_INITIALIZED) { err = -ENODEV; goto out_unlock; } dfops = container_of(file->f_op, struct b43_debugfs_fops, fops); if (!dfops->write) { err = -ENOSYS; goto out_unlock; } buf = (char *)get_zeroed_page(GFP_KERNEL); if (!buf) { err = -ENOMEM; goto out_unlock; } if (copy_from_user(buf, userbuf, count)) { err = -EFAULT; goto out_freepage; } err = dfops->write(dev, buf, count); if (err) goto out_freepage; out_freepage: free_page((unsigned long)buf); out_unlock: mutex_unlock(&dev->wl->mutex); return err ? err : count; } #define B43_DEBUGFS_FOPS(name, _read, _write) \ static struct b43_debugfs_fops fops_##name = { \ .read = _read, \ .write = _write, \ .fops = { \ .open = simple_open, \ .read = b43_debugfs_read, \ .write = b43_debugfs_write, \ .llseek = generic_file_llseek, \ }, \ .file_struct_offset = offsetof(struct b43_dfsentry, \ file_##name), \ } B43_DEBUGFS_FOPS(shm16read, shm16read__read_file, shm16read__write_file); B43_DEBUGFS_FOPS(shm16write, NULL, shm16write__write_file); B43_DEBUGFS_FOPS(shm32read, shm32read__read_file, shm32read__write_file); B43_DEBUGFS_FOPS(shm32write, NULL, shm32write__write_file); B43_DEBUGFS_FOPS(mmio16read, mmio16read__read_file, mmio16read__write_file); B43_DEBUGFS_FOPS(mmio16write, NULL, mmio16write__write_file); B43_DEBUGFS_FOPS(mmio32read, mmio32read__read_file, mmio32read__write_file); B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file); B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL); B43_DEBUGFS_FOPS(restart, NULL, restart_write_file); B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL); bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature) { bool enabled; enabled = (dev->dfsentry && dev->dfsentry->dyn_debug[feature]); if (unlikely(enabled)) { /* Force full debugging messages, if the user enabled * some dynamic debugging feature. */ b43_modparam_verbose = B43_VERBOSITY_MAX; } return enabled; } static void b43_remove_dynamic_debug(struct b43_wldev *dev) { struct b43_dfsentry *e = dev->dfsentry; int i; for (i = 0; i < __B43_NR_DYNDBG; i++) debugfs_remove(e->dyn_debug_dentries[i]); } static void b43_add_dynamic_debug(struct b43_wldev *dev) { struct b43_dfsentry *e = dev->dfsentry; struct dentry *d; #define add_dyn_dbg(name, id, initstate) do { \ e->dyn_debug[id] = (initstate); \ d = debugfs_create_bool(name, 0600, e->subdir, \ &(e->dyn_debug[id])); \ if (!IS_ERR(d)) \ e->dyn_debug_dentries[id] = d; \ } while (0) add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, 0); add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, 0); add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, 0); add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, 0); add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0); add_dyn_dbg("debug_lo", B43_DBG_LO, 0); add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, 0); add_dyn_dbg("debug_keys", B43_DBG_KEYS, 0); add_dyn_dbg("debug_verbose_stats", B43_DBG_VERBOSESTATS, 0); #undef add_dyn_dbg } void b43_debugfs_add_device(struct b43_wldev *dev) { struct b43_dfsentry *e; struct b43_txstatus_log *log; char devdir[16]; B43_WARN_ON(!dev); e = kzalloc(sizeof(*e), GFP_KERNEL); if (!e) { b43err(dev->wl, "debugfs: add device OOM\n"); return; } e->dev = dev; log = &e->txstatlog; log->log = kcalloc(B43_NR_LOGGED_TXSTATUS, sizeof(struct b43_txstatus), GFP_KERNEL); if (!log->log) { b43err(dev->wl, "debugfs: add device txstatus OOM\n"); kfree(e); return; } log->end = -1; dev->dfsentry = e; snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); e->subdir = debugfs_create_dir(devdir, rootdir); if (!e->subdir || IS_ERR(e->subdir)) { if (e->subdir == ERR_PTR(-ENODEV)) { b43dbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " "enabled in kernel config\n"); } else { b43err(dev->wl, "debugfs: cannot create %s directory\n", devdir); } dev->dfsentry = NULL; kfree(log->log); kfree(e); return; } e->mmio16read_next = 0xFFFF; /* invalid address */ e->mmio32read_next = 0xFFFF; /* invalid address */ e->shm16read_routing_next = 0xFFFFFFFF; /* invalid routing */ e->shm16read_addr_next = 0xFFFFFFFF; /* invalid address */ e->shm32read_routing_next = 0xFFFFFFFF; /* invalid routing */ e->shm32read_addr_next = 0xFFFFFFFF; /* invalid address */ #define ADD_FILE(name, mode) \ do { \ struct dentry *d; \ d = debugfs_create_file(__stringify(name), \ mode, e->subdir, dev, \ &fops_##name.fops); \ e->file_##name.dentry = NULL; \ if (!IS_ERR(d)) \ e->file_##name.dentry = d; \ } while (0) ADD_FILE(shm16read, 0600); ADD_FILE(shm16write, 0200); ADD_FILE(shm32read, 0600); ADD_FILE(shm32write, 0200); ADD_FILE(mmio16read, 0600); ADD_FILE(mmio16write, 0200); ADD_FILE(mmio32read, 0600); ADD_FILE(mmio32write, 0200); ADD_FILE(txstat, 0400); ADD_FILE(restart, 0200); ADD_FILE(loctls, 0400); #undef ADD_FILE b43_add_dynamic_debug(dev); } void b43_debugfs_remove_device(struct b43_wldev *dev) { struct b43_dfsentry *e; if (!dev) return; e = dev->dfsentry; if (!e) return; b43_remove_dynamic_debug(dev); debugfs_remove(e->file_shm16read.dentry); debugfs_remove(e->file_shm16write.dentry); debugfs_remove(e->file_shm32read.dentry); debugfs_remove(e->file_shm32write.dentry); debugfs_remove(e->file_mmio16read.dentry); debugfs_remove(e->file_mmio16write.dentry); debugfs_remove(e->file_mmio32read.dentry); debugfs_remove(e->file_mmio32write.dentry); debugfs_remove(e->file_txstat.dentry); debugfs_remove(e->file_restart.dentry); debugfs_remove(e->file_loctls.dentry); debugfs_remove(e->subdir); kfree(e->txstatlog.log); kfree(e); } void b43_debugfs_log_txstat(struct b43_wldev *dev, const struct b43_txstatus *status) { struct b43_dfsentry *e = dev->dfsentry; struct b43_txstatus_log *log; struct b43_txstatus *cur; int i; if (!e) return; log = &e->txstatlog; i = log->end + 1; if (i == B43_NR_LOGGED_TXSTATUS) i = 0; log->end = i; cur = &(log->log[i]); memcpy(cur, status, sizeof(*cur)); } void b43_debugfs_init(void) { rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); if (IS_ERR(rootdir)) rootdir = NULL; } void b43_debugfs_exit(void) { debugfs_remove(rootdir); }
gpl-2.0
kannu1994/sgs2_kernel
net/mac80211/wpa.c
727
15955
/* * Copyright 2002-2004, Instant802 Networks, Inc. * Copyright 2008, Jouni Malinen <j@w1.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/compiler.h> #include <linux/ieee80211.h> #include <linux/gfp.h> #include <asm/unaligned.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "michael.h" #include "tkip.h" #include "aes_ccm.h" #include "aes_cmac.h" #include "wpa.h" ieee80211_tx_result ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) { u8 *data, *key, *mic; size_t data_len; unsigned int hdrlen; struct ieee80211_hdr *hdr; struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int tail; hdr = (struct ieee80211_hdr *)skb->data; if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control)) return TX_CONTINUE; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen) return TX_DROP; data = skb->data + hdrlen; data_len = skb->len - hdrlen; if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) { /* Need to use software crypto for the test */ info->control.hw_key = NULL; } if (info->control.hw_key && !(tx->flags & IEEE80211_TX_FRAGMENTED) && !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { /* hwaccel - with no need for SW-generated MMIC */ return TX_CONTINUE; } tail = MICHAEL_MIC_LEN; if (!info->control.hw_key) tail += TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < TKIP_IV_LEN)) return TX_DROP; key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; mic = skb_put(skb, MICHAEL_MIC_LEN); michael_mic(key, hdr, data, data_len, mic); if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) mic[0]++; return TX_CONTINUE; } ieee80211_rx_result ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) { u8 *data, *key = NULL; size_t data_len; unsigned int hdrlen; u8 mic[MICHAEL_MIC_LEN]; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int queue = rx->queue; /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */ if (rx->queue == NUM_RX_DATA_QUEUES - 1) queue = 0; /* * it makes no sense to check for MIC errors on anything other * than data frames. */ if (!ieee80211_is_data_present(hdr->frame_control)) return RX_CONTINUE; /* * No way to verify the MIC if the hardware stripped it or * the IV with the key index. In this case we have solely rely * on the driver to set RX_FLAG_MMIC_ERROR in the event of a * MIC failure report. */ if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) { if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail; if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key) goto update_iv; return RX_CONTINUE; } /* * Some hardware seems to generate Michael MIC failure reports; even * though, the frame was not encrypted with TKIP and therefore has no * MIC. Ignore the flag them to avoid triggering countermeasures. */ if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || !(status->flag & RX_FLAG_DECRYPTED)) return RX_CONTINUE; if (rx->sdata->vif.type == NL80211_IFTYPE_AP && rx->key->conf.keyidx) { /* * APs with pairwise keys should never receive Michael MIC * errors for non-zero keyidx because these are reserved for * group keys and only the AP is sending real multicast * frames in the BSS. ( */ return RX_DROP_UNUSABLE; } if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen + MICHAEL_MIC_LEN) return RX_DROP_UNUSABLE; data = skb->data + hdrlen; data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; michael_mic(key, hdr, data, data_len, mic); if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) goto mic_fail; /* remove Michael MIC from payload */ skb_trim(skb, skb->len - MICHAEL_MIC_LEN); update_iv: /* update IV in key information to be able to detect replays */ rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32; rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16; return RX_CONTINUE; mic_fail: /* * In some cases the key can be unset - e.g. a multicast packet, in * a driver that supports HW encryption. Send up the key idx only if * the key is set. */ mac80211_ev_michael_mic_failure(rx->sdata, rx->key ? rx->key->conf.keyidx : -1, (void *) skb->data, NULL, GFP_ATOMIC); return RX_DROP_UNUSABLE; } static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned int hdrlen; int len, tail; u8 *pos; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { /* hwaccel - with no need for software-generated IV */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < TKIP_IV_LEN)) return -1; pos = skb_push(skb, TKIP_IV_LEN); memmove(pos, pos + TKIP_IV_LEN, hdrlen); pos += hdrlen; /* Increase IV for the frame */ key->u.tkip.tx.iv16++; if (key->u.tkip.tx.iv16 == 0) key->u.tkip.tx.iv32++; pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16); /* hwaccel - with software IV */ if (info->control.hw_key) return 0; /* Add room for ICV */ skb_put(skb, TKIP_ICV_LEN); hdr = (struct ieee80211_hdr *) skb->data; return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, key, pos, len, hdr->addr2); } ieee80211_tx_result ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; ieee80211_tx_set_protected(tx); do { if (tkip_encrypt_skb(tx, skb) < 0) return TX_DROP; } while ((skb = skb->next)); return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; int hdrlen, res, hwaccel = 0; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); int queue = rx->queue; /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */ if (rx->queue == NUM_RX_DATA_QUEUES - 1) queue = 0; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control)) return RX_CONTINUE; if (!rx->sta || skb->len - hdrlen < 12) return RX_DROP_UNUSABLE; /* * Let TKIP code verify IV, but skip decryption. * In the case where hardware checks the IV as well, * we don't even get here, see ieee80211_rx_h_decrypt() */ if (status->flag & RX_FLAG_DECRYPTED) hwaccel = 1; res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, key, skb->data + hdrlen, skb->len - hdrlen, rx->sta->sta.addr, hdr->addr1, hwaccel, queue, &rx->tkip_iv32, &rx->tkip_iv16); if (res != TKIP_DECRYPT_OK) return RX_DROP_UNUSABLE; /* Trim ICV */ skb_trim(skb, skb->len - TKIP_ICV_LEN); /* Remove IV */ memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen); skb_pull(skb, TKIP_IV_LEN); return RX_CONTINUE; } static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch, int encrypted) { __le16 mask_fc; int a4_included, mgmt; u8 qos_tid; u8 *b_0, *aad; u16 data_len, len_a; unsigned int hdrlen; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; b_0 = scratch + 3 * AES_BLOCK_LEN; aad = scratch + 4 * AES_BLOCK_LEN; /* * Mask FC: zero subtype b4 b5 b6 (if not mgmt) * Retry, PwrMgt, MoreData; set Protected */ mgmt = ieee80211_is_mgmt(hdr->frame_control); mask_fc = hdr->frame_control; mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); if (!mgmt) mask_fc &= ~cpu_to_le16(0x0070); mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); hdrlen = ieee80211_hdrlen(hdr->frame_control); len_a = hdrlen - 2; a4_included = ieee80211_has_a4(hdr->frame_control); if (ieee80211_is_data_qos(hdr->frame_control)) qos_tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; else qos_tid = 0; data_len = skb->len - hdrlen - CCMP_HDR_LEN; if (encrypted) data_len -= CCMP_MIC_LEN; /* First block, b_0 */ b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */ /* Nonce: Nonce Flags | A2 | PN * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7) */ b_0[1] = qos_tid | (mgmt << 4); memcpy(&b_0[2], hdr->addr2, ETH_ALEN); memcpy(&b_0[8], pn, CCMP_PN_LEN); /* l(m) */ put_unaligned_be16(data_len, &b_0[14]); /* AAD (extra authenticate-only data) / masked 802.11 header * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ put_unaligned_be16(len_a, &aad[0]); put_unaligned(mask_fc, (__le16 *)&aad[2]); memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN); /* Mask Seq#, leave Frag# */ aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; aad[23] = 0; if (a4_included) { memcpy(&aad[24], hdr->addr4, ETH_ALEN); aad[30] = qos_tid; aad[31] = 0; } else { memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN); aad[24] = qos_tid; } } static inline void ccmp_pn2hdr(u8 *hdr, u8 *pn, int key_id) { hdr[0] = pn[5]; hdr[1] = pn[4]; hdr[2] = 0; hdr[3] = 0x20 | (key_id << 6); hdr[4] = pn[3]; hdr[5] = pn[2]; hdr[6] = pn[1]; hdr[7] = pn[0]; } static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr) { pn[0] = hdr[7]; pn[1] = hdr[6]; pn[2] = hdr[5]; pn[3] = hdr[4]; pn[4] = hdr[1]; pn[5] = hdr[0]; } static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int hdrlen, len, tail; u8 *pos, *pn; int i; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { /* * hwaccel has no need for preallocated room for CCMP * header or MIC fields */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = CCMP_MIC_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < CCMP_HDR_LEN)) return -1; pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdrlen); hdr = (struct ieee80211_hdr *) pos; pos += hdrlen; /* PN = PN + 1 */ pn = key->u.ccmp.tx_pn; for (i = CCMP_PN_LEN - 1; i >= 0; i--) { pn[i]++; if (pn[i]) break; } ccmp_pn2hdr(pos, pn, key->conf.keyidx); /* hwaccel - with software CCMP header */ if (info->control.hw_key) return 0; pos += CCMP_HDR_LEN; ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0); ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, key->u.ccmp.tx_crypto_buf, pos, len, pos, skb_put(skb, CCMP_MIC_LEN)); return 0; } ieee80211_tx_result ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; ieee80211_tx_set_protected(tx); do { if (ccmp_encrypt_skb(tx, skb) < 0) return TX_DROP; } while ((skb = skb->next)); return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; int hdrlen; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 pn[CCMP_PN_LEN]; int data_len; int queue; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control) && !ieee80211_is_robust_mgmt_frame(hdr)) return RX_CONTINUE; data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; if (!rx->sta || data_len < 0) return RX_DROP_UNUSABLE; ccmp_hdr2pn(pn, skb->data + hdrlen); queue = ieee80211_is_mgmt(hdr->frame_control) ? NUM_RX_DATA_QUEUES : rx->queue; if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) { key->u.ccmp.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1); if (ieee80211_aes_ccm_decrypt( key->u.ccmp.tfm, key->u.ccmp.rx_crypto_buf, skb->data + hdrlen + CCMP_HDR_LEN, data_len, skb->data + skb->len - CCMP_MIC_LEN, skb->data + hdrlen + CCMP_HDR_LEN)) return RX_DROP_UNUSABLE; } memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN); /* Remove CCMP header and MIC */ skb_trim(skb, skb->len - CCMP_MIC_LEN); memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); skb_pull(skb, CCMP_HDR_LEN); return RX_CONTINUE; } static void bip_aad(struct sk_buff *skb, u8 *aad) { /* BIP AAD: FC(masked) || A1 || A2 || A3 */ /* FC type/subtype */ aad[0] = skb->data[0]; /* Mask FC Retry, PwrMgt, MoreData flags to zero */ aad[1] = skb->data[1] & ~(BIT(4) | BIT(5) | BIT(6)); /* A1 || A2 || A3 */ memcpy(aad + 2, skb->data + 4, 3 * ETH_ALEN); } static inline void bip_ipn_swap(u8 *d, const u8 *s) { *d++ = s[5]; *d++ = s[4]; *d++ = s[3]; *d++ = s[2]; *d++ = s[1]; *d = s[0]; } ieee80211_tx_result ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_key *key = tx->key; struct ieee80211_mmie *mmie; u8 *pn, aad[20]; int i; if (info->control.hw_key) return 0; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; mmie = (struct ieee80211_mmie *) skb_put(skb, sizeof(*mmie)); mmie->element_id = WLAN_EID_MMIE; mmie->length = sizeof(*mmie) - 2; mmie->key_id = cpu_to_le16(key->conf.keyidx); /* PN = PN + 1 */ pn = key->u.aes_cmac.tx_pn; for (i = sizeof(key->u.aes_cmac.tx_pn) - 1; i >= 0; i--) { pn[i]++; if (pn[i]) break; } bip_ipn_swap(mmie->sequence_number, pn); bip_aad(skb, aad); /* * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */ ieee80211_aes_cmac(key->u.aes_cmac.tfm, key->u.aes_cmac.tx_crypto_buf, aad, skb->data + 24, skb->len - 24, mmie->mic); return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie *mmie; u8 aad[20], mic[8], ipn[6]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) return RX_CONTINUE; if (skb->len < 24 + sizeof(*mmie)) return RX_DROP_UNUSABLE; mmie = (struct ieee80211_mmie *) (skb->data + skb->len - sizeof(*mmie)); if (mmie->element_id != WLAN_EID_MMIE || mmie->length != sizeof(*mmie) - 2) return RX_DROP_UNUSABLE; /* Invalid MMIE */ bip_ipn_swap(ipn, mmie->sequence_number); if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { key->u.aes_cmac.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ bip_aad(skb, aad); ieee80211_aes_cmac(key->u.aes_cmac.tfm, key->u.aes_cmac.rx_crypto_buf, aad, skb->data + 24, skb->len - 24, mic); if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { key->u.aes_cmac.icverrors++; return RX_DROP_UNUSABLE; } } memcpy(key->u.aes_cmac.rx_pn, ipn, 6); /* Remove MMIE */ skb_trim(skb, skb->len - sizeof(*mmie)); return RX_CONTINUE; }
gpl-2.0
stevezuo/ak98_kernel
arch/sh/kernel/cpu/sh2a/fpu.c
727
15108
/* * Save/restore floating point context for signal handlers. * * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * FIXME! These routines can be optimized in big endian case. */ #include <linux/sched.h> #include <linux/signal.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/fpu.h> /* The PR (precision) bit in the FP Status Register must be clear when * an frchg instruction is executed, otherwise the instruction is undefined. * Executing frchg with PR set causes a trap on some SH4 implementations. */ #define FPSCR_RCHG 0x00000000 /* * Save FPU registers onto task structure. * Assume called with FPU enabled (SR.FD=0). */ void save_fpu(struct task_struct *tsk, struct pt_regs *regs) { unsigned long dummy; clear_tsk_thread_flag(tsk, TIF_USEDFPU); enable_fpu(); asm volatile("sts.l fpul, @-%0\n\t" "sts.l fpscr, @-%0\n\t" "fmov.s fr15, @-%0\n\t" "fmov.s fr14, @-%0\n\t" "fmov.s fr13, @-%0\n\t" "fmov.s fr12, @-%0\n\t" "fmov.s fr11, @-%0\n\t" "fmov.s fr10, @-%0\n\t" "fmov.s fr9, @-%0\n\t" "fmov.s fr8, @-%0\n\t" "fmov.s fr7, @-%0\n\t" "fmov.s fr6, @-%0\n\t" "fmov.s fr5, @-%0\n\t" "fmov.s fr4, @-%0\n\t" "fmov.s fr3, @-%0\n\t" "fmov.s fr2, @-%0\n\t" "fmov.s fr1, @-%0\n\t" "fmov.s fr0, @-%0\n\t" "lds %3, fpscr\n\t" : "=r" (dummy) : "0" ((char *)(&tsk->thread.fpu.hard.status)), "r" (FPSCR_RCHG), "r" (FPSCR_INIT) : "memory"); disable_fpu(); release_fpu(regs); } static void restore_fpu(struct task_struct *tsk) { unsigned long dummy; enable_fpu(); asm volatile("fmov.s @%0+, fr0\n\t" "fmov.s @%0+, fr1\n\t" "fmov.s @%0+, fr2\n\t" "fmov.s @%0+, fr3\n\t" "fmov.s @%0+, fr4\n\t" "fmov.s @%0+, fr5\n\t" "fmov.s @%0+, fr6\n\t" "fmov.s @%0+, fr7\n\t" "fmov.s @%0+, fr8\n\t" "fmov.s @%0+, fr9\n\t" "fmov.s @%0+, fr10\n\t" "fmov.s @%0+, fr11\n\t" "fmov.s @%0+, fr12\n\t" "fmov.s @%0+, fr13\n\t" "fmov.s @%0+, fr14\n\t" "fmov.s @%0+, fr15\n\t" "lds.l @%0+, fpscr\n\t" "lds.l @%0+, fpul\n\t" : "=r" (dummy) : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG) : "memory"); disable_fpu(); } /* * Load the FPU with signalling NANS. This bit pattern we're using * has the property that no matter wether considered as single or as * double precission represents signaling NANS. */ static void fpu_init(void) { enable_fpu(); asm volatile("lds %0, fpul\n\t" "fsts fpul, fr0\n\t" "fsts fpul, fr1\n\t" "fsts fpul, fr2\n\t" "fsts fpul, fr3\n\t" "fsts fpul, fr4\n\t" "fsts fpul, fr5\n\t" "fsts fpul, fr6\n\t" "fsts fpul, fr7\n\t" "fsts fpul, fr8\n\t" "fsts fpul, fr9\n\t" "fsts fpul, fr10\n\t" "fsts fpul, fr11\n\t" "fsts fpul, fr12\n\t" "fsts fpul, fr13\n\t" "fsts fpul, fr14\n\t" "fsts fpul, fr15\n\t" "lds %2, fpscr\n\t" : /* no output */ : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT)); disable_fpu(); } /* * Emulate arithmetic ops on denormalized number for some FPU insns. */ /* denormalized float * float */ static int denormal_mulf(int hx, int hy) { unsigned int ix, iy; unsigned long long m, n; int exp, w; ix = hx & 0x7fffffff; iy = hy & 0x7fffffff; if (iy < 0x00800000 || ix == 0) return ((hx ^ hy) & 0x80000000); exp = (iy & 0x7f800000) >> 23; ix &= 0x007fffff; iy = (iy & 0x007fffff) | 0x00800000; m = (unsigned long long)ix * iy; n = m; w = -1; while (n) { n >>= 1; w++; } /* FIXME: use guard bits */ exp += w - 126 - 46; if (exp > 0) ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23); else if (exp + 22 >= 0) ix = (int) (m >> (w - 22 - exp)) & 0x007fffff; else ix = 0; ix |= (hx ^ hy) & 0x80000000; return ix; } /* denormalized double * double */ static void mult64(unsigned long long x, unsigned long long y, unsigned long long *highp, unsigned long long *lowp) { unsigned long long sub0, sub1, sub2, sub3; unsigned long long high, low; sub0 = (x >> 32) * (unsigned long) (y >> 32); sub1 = (x & 0xffffffffLL) * (unsigned long) (y >> 32); sub2 = (x >> 32) * (unsigned long) (y & 0xffffffffLL); sub3 = (x & 0xffffffffLL) * (unsigned long) (y & 0xffffffffLL); low = sub3; high = 0LL; sub3 += (sub1 << 32); if (low > sub3) high++; low = sub3; sub3 += (sub2 << 32); if (low > sub3) high++; low = sub3; high += (sub1 >> 32) + (sub2 >> 32); high += sub0; *lowp = low; *highp = high; } static inline long long rshift64(unsigned long long mh, unsigned long long ml, int n) { if (n >= 64) return mh >> (n - 64); return (mh << (64 - n)) | (ml >> n); } static long long denormal_muld(long long hx, long long hy) { unsigned long long ix, iy; unsigned long long mh, ml, nh, nl; int exp, w; ix = hx & 0x7fffffffffffffffLL; iy = hy & 0x7fffffffffffffffLL; if (iy < 0x0010000000000000LL || ix == 0) return ((hx ^ hy) & 0x8000000000000000LL); exp = (iy & 0x7ff0000000000000LL) >> 52; ix &= 0x000fffffffffffffLL; iy = (iy & 0x000fffffffffffffLL) | 0x0010000000000000LL; mult64(ix, iy, &mh, &ml); nh = mh; nl = ml; w = -1; if (nh) { while (nh) { nh >>= 1; w++;} w += 64; } else while (nl) { nl >>= 1; w++;} /* FIXME: use guard bits */ exp += w - 1022 - 52 * 2; if (exp > 0) ix = (rshift64(mh, ml, w - 52) & 0x000fffffffffffffLL) | ((long long)exp << 52); else if (exp + 51 >= 0) ix = rshift64(mh, ml, w - 51 - exp) & 0x000fffffffffffffLL; else ix = 0; ix |= (hx ^ hy) & 0x8000000000000000LL; return ix; } /* ix - iy where iy: denormal and ix, iy >= 0 */ static int denormal_subf1(unsigned int ix, unsigned int iy) { int frac; int exp; if (ix < 0x00800000) return ix - iy; exp = (ix & 0x7f800000) >> 23; if (exp - 1 > 31) return ix; iy >>= exp - 1; if (iy == 0) return ix; frac = (ix & 0x007fffff) | 0x00800000; frac -= iy; while (frac < 0x00800000) { if (--exp == 0) return frac; frac <<= 1; } return (exp << 23) | (frac & 0x007fffff); } /* ix + iy where iy: denormal and ix, iy >= 0 */ static int denormal_addf1(unsigned int ix, unsigned int iy) { int frac; int exp; if (ix < 0x00800000) return ix + iy; exp = (ix & 0x7f800000) >> 23; if (exp - 1 > 31) return ix; iy >>= exp - 1; if (iy == 0) return ix; frac = (ix & 0x007fffff) | 0x00800000; frac += iy; if (frac >= 0x01000000) { frac >>= 1; ++exp; } return (exp << 23) | (frac & 0x007fffff); } static int denormal_addf(int hx, int hy) { unsigned int ix, iy; int sign; if ((hx ^ hy) & 0x80000000) { sign = hx & 0x80000000; ix = hx & 0x7fffffff; iy = hy & 0x7fffffff; if (iy < 0x00800000) { ix = denormal_subf1(ix, iy); if ((int) ix < 0) { ix = -ix; sign ^= 0x80000000; } } else { ix = denormal_subf1(iy, ix); sign ^= 0x80000000; } } else { sign = hx & 0x80000000; ix = hx & 0x7fffffff; iy = hy & 0x7fffffff; if (iy < 0x00800000) ix = denormal_addf1(ix, iy); else ix = denormal_addf1(iy, ix); } return sign | ix; } /* ix - iy where iy: denormal and ix, iy >= 0 */ static long long denormal_subd1(unsigned long long ix, unsigned long long iy) { long long frac; int exp; if (ix < 0x0010000000000000LL) return ix - iy; exp = (ix & 0x7ff0000000000000LL) >> 52; if (exp - 1 > 63) return ix; iy >>= exp - 1; if (iy == 0) return ix; frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL; frac -= iy; while (frac < 0x0010000000000000LL) { if (--exp == 0) return frac; frac <<= 1; } return ((long long)exp << 52) | (frac & 0x000fffffffffffffLL); } /* ix + iy where iy: denormal and ix, iy >= 0 */ static long long denormal_addd1(unsigned long long ix, unsigned long long iy) { long long frac; long long exp; if (ix < 0x0010000000000000LL) return ix + iy; exp = (ix & 0x7ff0000000000000LL) >> 52; if (exp - 1 > 63) return ix; iy >>= exp - 1; if (iy == 0) return ix; frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL; frac += iy; if (frac >= 0x0020000000000000LL) { frac >>= 1; ++exp; } return (exp << 52) | (frac & 0x000fffffffffffffLL); } static long long denormal_addd(long long hx, long long hy) { unsigned long long ix, iy; long long sign; if ((hx ^ hy) & 0x8000000000000000LL) { sign = hx & 0x8000000000000000LL; ix = hx & 0x7fffffffffffffffLL; iy = hy & 0x7fffffffffffffffLL; if (iy < 0x0010000000000000LL) { ix = denormal_subd1(ix, iy); if ((int) ix < 0) { ix = -ix; sign ^= 0x8000000000000000LL; } } else { ix = denormal_subd1(iy, ix); sign ^= 0x8000000000000000LL; } } else { sign = hx & 0x8000000000000000LL; ix = hx & 0x7fffffffffffffffLL; iy = hy & 0x7fffffffffffffffLL; if (iy < 0x0010000000000000LL) ix = denormal_addd1(ix, iy); else ix = denormal_addd1(iy, ix); } return sign | ix; } /** * denormal_to_double - Given denormalized float number, * store double float * * @fpu: Pointer to sh_fpu_hard structure * @n: Index to FP register */ static void denormal_to_double (struct sh_fpu_hard_struct *fpu, int n) { unsigned long du, dl; unsigned long x = fpu->fpul; int exp = 1023 - 126; if (x != 0 && (x & 0x7f800000) == 0) { du = (x & 0x80000000); while ((x & 0x00800000) == 0) { x <<= 1; exp--; } x &= 0x007fffff; du |= (exp << 20) | (x >> 3); dl = x << 29; fpu->fp_regs[n] = du; fpu->fp_regs[n+1] = dl; } } /** * ieee_fpe_handler - Handle denormalized number exception * * @regs: Pointer to register structure * * Returns 1 when it's handled (should not cause exception). */ static int ieee_fpe_handler (struct pt_regs *regs) { unsigned short insn = *(unsigned short *) regs->pc; unsigned short finsn; unsigned long nextpc; int nib[4] = { (insn >> 12) & 0xf, (insn >> 8) & 0xf, (insn >> 4) & 0xf, insn & 0xf}; if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */ regs->pr = regs->pc + 4; if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */ nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3); finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */ if (regs->sr & 1) nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); else nextpc = regs->pc + 4; finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */ if (regs->sr & 1) nextpc = regs->pc + 4; else nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x4 && nib[3] == 0xb && (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */ nextpc = regs->regs[nib[1]]; finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x0 && nib[3] == 0x3 && (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */ nextpc = regs->pc + 4 + regs->regs[nib[1]]; finsn = *(unsigned short *) (regs->pc + 2); } else if (insn == 0x000b) { /* rts */ nextpc = regs->pr; finsn = *(unsigned short *) (regs->pc + 2); } else { nextpc = regs->pc + 2; finsn = insn; } #define FPSCR_FPU_ERROR (1 << 17) if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ struct task_struct *tsk = current; if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) { /* FPU error */ denormal_to_double (&tsk->thread.fpu.hard, (finsn >> 8) & 0xf); } else return 0; regs->pc = nextpc; return 1; } else if ((finsn & 0xf00f) == 0xf002) { /* fmul */ struct task_struct *tsk = current; int fpscr; int n, m, prec; unsigned int hx, hy; n = (finsn >> 8) & 0xf; m = (finsn >> 4) & 0xf; hx = tsk->thread.fpu.hard.fp_regs[n]; hy = tsk->thread.fpu.hard.fp_regs[m]; fpscr = tsk->thread.fpu.hard.fpscr; prec = fpscr & (1 << 19); if ((fpscr & FPSCR_FPU_ERROR) && (prec && ((hx & 0x7fffffff) < 0x00100000 || (hy & 0x7fffffff) < 0x00100000))) { long long llx, lly; /* FPU error because of denormal */ llx = ((long long) hx << 32) | tsk->thread.fpu.hard.fp_regs[n+1]; lly = ((long long) hy << 32) | tsk->thread.fpu.hard.fp_regs[m+1]; if ((hx & 0x7fffffff) >= 0x00100000) llx = denormal_muld(lly, llx); else llx = denormal_muld(llx, lly); tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff; } else if ((fpscr & FPSCR_FPU_ERROR) && (!prec && ((hx & 0x7fffffff) < 0x00800000 || (hy & 0x7fffffff) < 0x00800000))) { /* FPU error because of denormal */ if ((hx & 0x7fffffff) >= 0x00800000) hx = denormal_mulf(hy, hx); else hx = denormal_mulf(hx, hy); tsk->thread.fpu.hard.fp_regs[n] = hx; } else return 0; regs->pc = nextpc; return 1; } else if ((finsn & 0xf00e) == 0xf000) { /* fadd, fsub */ struct task_struct *tsk = current; int fpscr; int n, m, prec; unsigned int hx, hy; n = (finsn >> 8) & 0xf; m = (finsn >> 4) & 0xf; hx = tsk->thread.fpu.hard.fp_regs[n]; hy = tsk->thread.fpu.hard.fp_regs[m]; fpscr = tsk->thread.fpu.hard.fpscr; prec = fpscr & (1 << 19); if ((fpscr & FPSCR_FPU_ERROR) && (prec && ((hx & 0x7fffffff) < 0x00100000 || (hy & 0x7fffffff) < 0x00100000))) { long long llx, lly; /* FPU error because of denormal */ llx = ((long long) hx << 32) | tsk->thread.fpu.hard.fp_regs[n+1]; lly = ((long long) hy << 32) | tsk->thread.fpu.hard.fp_regs[m+1]; if ((finsn & 0xf00f) == 0xf000) llx = denormal_addd(llx, lly); else llx = denormal_addd(llx, lly ^ (1LL << 63)); tsk->thread.fpu.hard.fp_regs[n] = llx >> 32; tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff; } else if ((fpscr & FPSCR_FPU_ERROR) && (!prec && ((hx & 0x7fffffff) < 0x00800000 || (hy & 0x7fffffff) < 0x00800000))) { /* FPU error because of denormal */ if ((finsn & 0xf00f) == 0xf000) hx = denormal_addf(hx, hy); else hx = denormal_addf(hx, hy ^ 0x80000000); tsk->thread.fpu.hard.fp_regs[n] = hx; } else return 0; regs->pc = nextpc; return 1; } return 0; } BUILD_TRAP_HANDLER(fpu_error) { struct task_struct *tsk = current; TRAP_HANDLER_DECL; save_fpu(tsk, regs); if (ieee_fpe_handler(regs)) { tsk->thread.fpu.hard.fpscr &= ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); grab_fpu(regs); restore_fpu(tsk); set_tsk_thread_flag(tsk, TIF_USEDFPU); return; } force_sig(SIGFPE, tsk); } BUILD_TRAP_HANDLER(fpu_state_restore) { struct task_struct *tsk = current; TRAP_HANDLER_DECL; grab_fpu(regs); if (!user_mode(regs)) { printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); return; } if (used_math()) { /* Using the FPU again. */ restore_fpu(tsk); } else { /* First time FPU user. */ fpu_init(); set_used_math(); } set_tsk_thread_flag(tsk, TIF_USEDFPU); }
gpl-2.0
bhundven/blastoff_kernel_samsung_galaxys4g
arch/arm/mach-s5pv210/setup-fb-24bpp.c
727
1852
/* linux/arch/arm/plat-s5pv210/setup-fb-24bpp.c * * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Base s5pv210 setup information for 24bpp LCD framebuffer * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/fb.h> #include <mach/regs-fb.h> #include <mach/gpio.h> #include <mach/map.h> #include <plat/fb.h> #include <mach/regs-clock.h> #include <plat/gpio-cfg.h> void s5pv210_fb_gpio_setup_24bpp(void) { unsigned int gpio = 0; for (gpio = S5PV210_GPF0(0); gpio <= S5PV210_GPF0(7); gpio++) { s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2)); s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE); s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4); } for (gpio = S5PV210_GPF1(0); gpio <= S5PV210_GPF1(7); gpio++) { s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2)); s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE); s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4); } for (gpio = S5PV210_GPF2(0); gpio <= S5PV210_GPF2(7); gpio++) { s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2)); s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE); s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4); } for (gpio = S5PV210_GPF3(0); gpio <= S5PV210_GPF3(3); gpio++) { s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2)); s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE); s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4); } /* Set DISPLAY_CONTROL register for Display path selection. * * ouput | RGB | I80 | ITU * ----------------------------------- * 00 | MIE | FIMD | FIMD * 01 | MDNIE | MDNIE | FIMD * 10 | FIMD | FIMD | FIMD * 11 | FIMD | FIMD | FIMD */ writel(0x2, S5P_MDNIE_SEL); }
gpl-2.0
loverlucia/linux-3.10.101
arch/arm/mach-ks8695/time.c
1239
4865
/* * arch/arm/mach-ks8695/time.c * * Copyright (C) 2006 Ben Dooks <ben@simtec.co.uk> * Copyright (C) 2006 Simtec Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/io.h> #include <linux/clockchips.h> #include <asm/mach/time.h> #include <asm/system_misc.h> #include <mach/regs-irq.h> #include "generic.h" #define KS8695_TMR_OFFSET (0xF0000 + 0xE400) #define KS8695_TMR_VA (KS8695_IO_VA + KS8695_TMR_OFFSET) #define KS8695_TMR_PA (KS8695_IO_PA + KS8695_TMR_OFFSET) /* * Timer registers */ #define KS8695_TMCON (0x00) /* Timer Control Register */ #define KS8695_T1TC (0x04) /* Timer 1 Timeout Count Register */ #define KS8695_T0TC (0x08) /* Timer 0 Timeout Count Register */ #define KS8695_T1PD (0x0C) /* Timer 1 Pulse Count Register */ #define KS8695_T0PD (0x10) /* Timer 0 Pulse Count Register */ /* Timer Control Register */ #define TMCON_T1EN (1 << 1) /* Timer 1 Enable */ #define TMCON_T0EN (1 << 0) /* Timer 0 Enable */ /* Timer0 Timeout Counter Register */ #define T0TC_WATCHDOG (0xff) /* Enable watchdog mode */ static void ks8695_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { u32 tmcon; if (mode == CLOCK_EVT_FEAT_PERIODIC) { u32 rate = DIV_ROUND_CLOSEST(KS8695_CLOCK_RATE, HZ); u32 half = DIV_ROUND_CLOSEST(rate, 2); /* Disable timer 1 */ tmcon = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON); tmcon &= ~TMCON_T1EN; writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON); /* Both registers need to count down */ writel_relaxed(half, KS8695_TMR_VA + KS8695_T1TC); writel_relaxed(half, KS8695_TMR_VA + KS8695_T1PD); /* Re-enable timer1 */ tmcon |= TMCON_T1EN; writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON); } } static int ks8695_set_next_event(unsigned long cycles, struct clock_event_device *evt) { u32 half = DIV_ROUND_CLOSEST(cycles, 2); u32 tmcon; /* Disable timer 1 */ tmcon = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON); tmcon &= ~TMCON_T1EN; writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON); /* Both registers need to count down */ writel_relaxed(half, KS8695_TMR_VA + KS8695_T1TC); writel_relaxed(half, KS8695_TMR_VA + KS8695_T1PD); /* Re-enable timer1 */ tmcon |= TMCON_T1EN; writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON); return 0; } static struct clock_event_device clockevent_ks8695 = { .name = "ks8695_t1tc", .rating = 300, /* Reasonably fast and accurate clock event */ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, .set_next_event = ks8695_set_next_event, .set_mode = ks8695_set_mode, }; /* * IRQ handler for the timer. */ static irqreturn_t ks8695_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clockevent_ks8695; evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction ks8695_timer_irq = { .name = "ks8695_tick", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = ks8695_timer_interrupt, }; static void ks8695_timer_setup(void) { unsigned long tmcon; /* Disable timer 0 and 1 */ tmcon = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON); tmcon &= ~TMCON_T0EN; tmcon &= ~TMCON_T1EN; writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON); /* * Use timer 1 to fire IRQs on the timeline, minimum 2 cycles * (one on each counter) maximum 2*2^32, but the API will only * accept up to a 32bit full word (0xFFFFFFFFU). */ clockevents_config_and_register(&clockevent_ks8695, KS8695_CLOCK_RATE, 2, 0xFFFFFFFFU); } void __init ks8695_timer_init(void) { ks8695_timer_setup(); /* Enable timer interrupts */ setup_irq(KS8695_IRQ_TIMER1, &ks8695_timer_irq); } void ks8695_restart(char mode, const char *cmd) { unsigned int reg; if (mode == 's') soft_restart(0); /* disable timer0 */ reg = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON); writel_relaxed(reg & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); /* enable watchdog mode */ writel_relaxed((10 << 8) | T0TC_WATCHDOG, KS8695_TMR_VA + KS8695_T0TC); /* re-enable timer0 */ writel_relaxed(reg | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON); }
gpl-2.0
sssemil/kernel_sniper-ICS
fs/ocfs2/alloc.c
1239
191459
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * alloc.c * * Extent allocs and frees * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/quotaops.h> #include <linux/blkdev.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "aops.h" #include "blockcheck.h" #include "dlmglue.h" #include "extent_map.h" #include "inode.h" #include "journal.h" #include "localalloc.h" #include "suballoc.h" #include "sysfile.h" #include "file.h" #include "super.h" #include "uptodate.h" #include "xattr.h" #include "refcounttree.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" enum ocfs2_contig_type { CONTIG_NONE = 0, CONTIG_LEFT, CONTIG_RIGHT, CONTIG_LEFTRIGHT, }; static enum ocfs2_contig_type ocfs2_extent_rec_contig(struct super_block *sb, struct ocfs2_extent_rec *ext, struct ocfs2_extent_rec *insert_rec); /* * Operations for a specific extent tree type. * * To implement an on-disk btree (extent tree) type in ocfs2, add * an ocfs2_extent_tree_operations structure and the matching * ocfs2_init_<thingy>_extent_tree() function. That's pretty much it * for the allocation portion of the extent tree. */ struct ocfs2_extent_tree_operations { /* * last_eb_blk is the block number of the right most leaf extent * block. Most on-disk structures containing an extent tree store * this value for fast access. The ->eo_set_last_eb_blk() and * ->eo_get_last_eb_blk() operations access this value. They are * both required. */ void (*eo_set_last_eb_blk)(struct ocfs2_extent_tree *et, u64 blkno); u64 (*eo_get_last_eb_blk)(struct ocfs2_extent_tree *et); /* * The on-disk structure usually keeps track of how many total * clusters are stored in this extent tree. This function updates * that value. new_clusters is the delta, and must be * added to the total. Required. */ void (*eo_update_clusters)(struct ocfs2_extent_tree *et, u32 new_clusters); /* * If this extent tree is supported by an extent map, insert * a record into the map. */ void (*eo_extent_map_insert)(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec); /* * If this extent tree is supported by an extent map, truncate the * map to clusters, */ void (*eo_extent_map_truncate)(struct ocfs2_extent_tree *et, u32 clusters); /* * If ->eo_insert_check() exists, it is called before rec is * inserted into the extent tree. It is optional. */ int (*eo_insert_check)(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec); int (*eo_sanity_check)(struct ocfs2_extent_tree *et); /* * -------------------------------------------------------------- * The remaining are internal to ocfs2_extent_tree and don't have * accessor functions */ /* * ->eo_fill_root_el() takes et->et_object and sets et->et_root_el. * It is required. */ void (*eo_fill_root_el)(struct ocfs2_extent_tree *et); /* * ->eo_fill_max_leaf_clusters sets et->et_max_leaf_clusters if * it exists. If it does not, et->et_max_leaf_clusters is set * to 0 (unlimited). Optional. */ void (*eo_fill_max_leaf_clusters)(struct ocfs2_extent_tree *et); /* * ->eo_extent_contig test whether the 2 ocfs2_extent_rec * are contiguous or not. Optional. Don't need to set it if use * ocfs2_extent_rec as the tree leaf. */ enum ocfs2_contig_type (*eo_extent_contig)(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *ext, struct ocfs2_extent_rec *insert_rec); }; /* * Pre-declare ocfs2_dinode_et_ops so we can use it as a sanity check * in the methods. */ static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et); static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno); static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et, u32 clusters); static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec); static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et, u32 clusters); static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec); static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et); static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et); static struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = { .eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk, .eo_update_clusters = ocfs2_dinode_update_clusters, .eo_extent_map_insert = ocfs2_dinode_extent_map_insert, .eo_extent_map_truncate = ocfs2_dinode_extent_map_truncate, .eo_insert_check = ocfs2_dinode_insert_check, .eo_sanity_check = ocfs2_dinode_sanity_check, .eo_fill_root_el = ocfs2_dinode_fill_root_el, }; static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { struct ocfs2_dinode *di = et->et_object; BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); di->i_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et) { struct ocfs2_dinode *di = et->et_object; BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); return le64_to_cpu(di->i_last_eb_blk); } static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci); struct ocfs2_dinode *di = et->et_object; le32_add_cpu(&di->i_clusters, clusters); spin_lock(&oi->ip_lock); oi->ip_clusters = le32_to_cpu(di->i_clusters); spin_unlock(&oi->ip_lock); } static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) { struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode; ocfs2_extent_map_insert_rec(inode, rec); } static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et, u32 clusters) { struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode; ocfs2_extent_map_trunc(inode, clusters); } static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) { struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci); struct ocfs2_super *osb = OCFS2_SB(oi->vfs_inode.i_sb); BUG_ON(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL); mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) && (oi->ip_clusters != le32_to_cpu(rec->e_cpos)), "Device %s, asking for sparse allocation: inode %llu, " "cpos %u, clusters %u\n", osb->dev_str, (unsigned long long)oi->ip_blkno, rec->e_cpos, oi->ip_clusters); return 0; } static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et) { struct ocfs2_dinode *di = et->et_object; BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); BUG_ON(!OCFS2_IS_VALID_DINODE(di)); return 0; } static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et) { struct ocfs2_dinode *di = et->et_object; et->et_root_el = &di->id2.i_list; } static void ocfs2_xattr_value_fill_root_el(struct ocfs2_extent_tree *et) { struct ocfs2_xattr_value_buf *vb = et->et_object; et->et_root_el = &vb->vb_xv->xr_list; } static void ocfs2_xattr_value_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { struct ocfs2_xattr_value_buf *vb = et->et_object; vb->vb_xv->xr_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et) { struct ocfs2_xattr_value_buf *vb = et->et_object; return le64_to_cpu(vb->vb_xv->xr_last_eb_blk); } static void ocfs2_xattr_value_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_xattr_value_buf *vb = et->et_object; le32_add_cpu(&vb->vb_xv->xr_clusters, clusters); } static struct ocfs2_extent_tree_operations ocfs2_xattr_value_et_ops = { .eo_set_last_eb_blk = ocfs2_xattr_value_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_xattr_value_get_last_eb_blk, .eo_update_clusters = ocfs2_xattr_value_update_clusters, .eo_fill_root_el = ocfs2_xattr_value_fill_root_el, }; static void ocfs2_xattr_tree_fill_root_el(struct ocfs2_extent_tree *et) { struct ocfs2_xattr_block *xb = et->et_object; et->et_root_el = &xb->xb_attrs.xb_root.xt_list; } static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct ocfs2_extent_tree *et) { struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); et->et_max_leaf_clusters = ocfs2_clusters_for_bytes(sb, OCFS2_MAX_XATTR_TREE_LEAF_SIZE); } static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { struct ocfs2_xattr_block *xb = et->et_object; struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root; xt->xt_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et) { struct ocfs2_xattr_block *xb = et->et_object; struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root; return le64_to_cpu(xt->xt_last_eb_blk); } static void ocfs2_xattr_tree_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_xattr_block *xb = et->et_object; le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, clusters); } static struct ocfs2_extent_tree_operations ocfs2_xattr_tree_et_ops = { .eo_set_last_eb_blk = ocfs2_xattr_tree_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_xattr_tree_get_last_eb_blk, .eo_update_clusters = ocfs2_xattr_tree_update_clusters, .eo_fill_root_el = ocfs2_xattr_tree_fill_root_el, .eo_fill_max_leaf_clusters = ocfs2_xattr_tree_fill_max_leaf_clusters, }; static void ocfs2_dx_root_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { struct ocfs2_dx_root_block *dx_root = et->et_object; dx_root->dr_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_dx_root_get_last_eb_blk(struct ocfs2_extent_tree *et) { struct ocfs2_dx_root_block *dx_root = et->et_object; return le64_to_cpu(dx_root->dr_last_eb_blk); } static void ocfs2_dx_root_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_dx_root_block *dx_root = et->et_object; le32_add_cpu(&dx_root->dr_clusters, clusters); } static int ocfs2_dx_root_sanity_check(struct ocfs2_extent_tree *et) { struct ocfs2_dx_root_block *dx_root = et->et_object; BUG_ON(!OCFS2_IS_VALID_DX_ROOT(dx_root)); return 0; } static void ocfs2_dx_root_fill_root_el(struct ocfs2_extent_tree *et) { struct ocfs2_dx_root_block *dx_root = et->et_object; et->et_root_el = &dx_root->dr_list; } static struct ocfs2_extent_tree_operations ocfs2_dx_root_et_ops = { .eo_set_last_eb_blk = ocfs2_dx_root_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_dx_root_get_last_eb_blk, .eo_update_clusters = ocfs2_dx_root_update_clusters, .eo_sanity_check = ocfs2_dx_root_sanity_check, .eo_fill_root_el = ocfs2_dx_root_fill_root_el, }; static void ocfs2_refcount_tree_fill_root_el(struct ocfs2_extent_tree *et) { struct ocfs2_refcount_block *rb = et->et_object; et->et_root_el = &rb->rf_list; } static void ocfs2_refcount_tree_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno) { struct ocfs2_refcount_block *rb = et->et_object; rb->rf_last_eb_blk = cpu_to_le64(blkno); } static u64 ocfs2_refcount_tree_get_last_eb_blk(struct ocfs2_extent_tree *et) { struct ocfs2_refcount_block *rb = et->et_object; return le64_to_cpu(rb->rf_last_eb_blk); } static void ocfs2_refcount_tree_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_refcount_block *rb = et->et_object; le32_add_cpu(&rb->rf_clusters, clusters); } static enum ocfs2_contig_type ocfs2_refcount_tree_extent_contig(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *ext, struct ocfs2_extent_rec *insert_rec) { return CONTIG_NONE; } static struct ocfs2_extent_tree_operations ocfs2_refcount_tree_et_ops = { .eo_set_last_eb_blk = ocfs2_refcount_tree_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_refcount_tree_get_last_eb_blk, .eo_update_clusters = ocfs2_refcount_tree_update_clusters, .eo_fill_root_el = ocfs2_refcount_tree_fill_root_el, .eo_extent_contig = ocfs2_refcount_tree_extent_contig, }; static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct buffer_head *bh, ocfs2_journal_access_func access, void *obj, struct ocfs2_extent_tree_operations *ops) { et->et_ops = ops; et->et_root_bh = bh; et->et_ci = ci; et->et_root_journal_access = access; if (!obj) obj = (void *)bh->b_data; et->et_object = obj; et->et_ops->eo_fill_root_el(et); if (!et->et_ops->eo_fill_max_leaf_clusters) et->et_max_leaf_clusters = 0; else et->et_ops->eo_fill_max_leaf_clusters(et); } void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct buffer_head *bh) { __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_di, NULL, &ocfs2_dinode_et_ops); } void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct buffer_head *bh) { __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_xb, NULL, &ocfs2_xattr_tree_et_ops); } void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct ocfs2_xattr_value_buf *vb) { __ocfs2_init_extent_tree(et, ci, vb->vb_bh, vb->vb_access, vb, &ocfs2_xattr_value_et_ops); } void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct buffer_head *bh) { __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_dr, NULL, &ocfs2_dx_root_et_ops); } void ocfs2_init_refcount_extent_tree(struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ci, struct buffer_head *bh) { __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_rb, NULL, &ocfs2_refcount_tree_et_ops); } static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 new_last_eb_blk) { et->et_ops->eo_set_last_eb_blk(et, new_last_eb_blk); } static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et) { return et->et_ops->eo_get_last_eb_blk(et); } static inline void ocfs2_et_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { et->et_ops->eo_update_clusters(et, clusters); } static inline void ocfs2_et_extent_map_insert(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) { if (et->et_ops->eo_extent_map_insert) et->et_ops->eo_extent_map_insert(et, rec); } static inline void ocfs2_et_extent_map_truncate(struct ocfs2_extent_tree *et, u32 clusters) { if (et->et_ops->eo_extent_map_truncate) et->et_ops->eo_extent_map_truncate(et, clusters); } static inline int ocfs2_et_root_journal_access(handle_t *handle, struct ocfs2_extent_tree *et, int type) { return et->et_root_journal_access(handle, et->et_ci, et->et_root_bh, type); } static inline enum ocfs2_contig_type ocfs2_et_extent_contig(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec, struct ocfs2_extent_rec *insert_rec) { if (et->et_ops->eo_extent_contig) return et->et_ops->eo_extent_contig(et, rec, insert_rec); return ocfs2_extent_rec_contig( ocfs2_metadata_cache_get_super(et->et_ci), rec, insert_rec); } static inline int ocfs2_et_insert_check(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) { int ret = 0; if (et->et_ops->eo_insert_check) ret = et->et_ops->eo_insert_check(et, rec); return ret; } static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et) { int ret = 0; if (et->et_ops->eo_sanity_check) ret = et->et_ops->eo_sanity_check(et); return ret; } static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, struct ocfs2_extent_block *eb); static void ocfs2_adjust_rightmost_records(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_rec *insert_rec); /* * Reset the actual path elements so that we can re-use the structure * to build another path. Generally, this involves freeing the buffer * heads. */ void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root) { int i, start = 0, depth = 0; struct ocfs2_path_item *node; if (keep_root) start = 1; for(i = start; i < path_num_items(path); i++) { node = &path->p_node[i]; brelse(node->bh); node->bh = NULL; node->el = NULL; } /* * Tree depth may change during truncate, or insert. If we're * keeping the root extent list, then make sure that our path * structure reflects the proper depth. */ if (keep_root) depth = le16_to_cpu(path_root_el(path)->l_tree_depth); else path_root_access(path) = NULL; path->p_tree_depth = depth; } void ocfs2_free_path(struct ocfs2_path *path) { if (path) { ocfs2_reinit_path(path, 0); kfree(path); } } /* * All the elements of src into dest. After this call, src could be freed * without affecting dest. * * Both paths should have the same root. Any non-root elements of dest * will be freed. */ static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src) { int i; BUG_ON(path_root_bh(dest) != path_root_bh(src)); BUG_ON(path_root_el(dest) != path_root_el(src)); BUG_ON(path_root_access(dest) != path_root_access(src)); ocfs2_reinit_path(dest, 1); for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { dest->p_node[i].bh = src->p_node[i].bh; dest->p_node[i].el = src->p_node[i].el; if (dest->p_node[i].bh) get_bh(dest->p_node[i].bh); } } /* * Make the *dest path the same as src and re-initialize src path to * have a root only. */ static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src) { int i; BUG_ON(path_root_bh(dest) != path_root_bh(src)); BUG_ON(path_root_access(dest) != path_root_access(src)); for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { brelse(dest->p_node[i].bh); dest->p_node[i].bh = src->p_node[i].bh; dest->p_node[i].el = src->p_node[i].el; src->p_node[i].bh = NULL; src->p_node[i].el = NULL; } } /* * Insert an extent block at given index. * * This will not take an additional reference on eb_bh. */ static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index, struct buffer_head *eb_bh) { struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data; /* * Right now, no root bh is an extent block, so this helps * catch code errors with dinode trees. The assertion can be * safely removed if we ever need to insert extent block * structures at the root. */ BUG_ON(index == 0); path->p_node[index].bh = eb_bh; path->p_node[index].el = &eb->h_list; } static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh, struct ocfs2_extent_list *root_el, ocfs2_journal_access_func access) { struct ocfs2_path *path; BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH); path = kzalloc(sizeof(*path), GFP_NOFS); if (path) { path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth); get_bh(root_bh); path_root_bh(path) = root_bh; path_root_el(path) = root_el; path_root_access(path) = access; } return path; } struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path) { return ocfs2_new_path(path_root_bh(path), path_root_el(path), path_root_access(path)); } struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et) { return ocfs2_new_path(et->et_root_bh, et->et_root_el, et->et_root_journal_access); } /* * Journal the buffer at depth idx. All idx>0 are extent_blocks, * otherwise it's the root_access function. * * I don't like the way this function's name looks next to * ocfs2_journal_access_path(), but I don't have a better one. */ int ocfs2_path_bh_journal_access(handle_t *handle, struct ocfs2_caching_info *ci, struct ocfs2_path *path, int idx) { ocfs2_journal_access_func access = path_root_access(path); if (!access) access = ocfs2_journal_access; if (idx) access = ocfs2_journal_access_eb; return access(handle, ci, path->p_node[idx].bh, OCFS2_JOURNAL_ACCESS_WRITE); } /* * Convenience function to journal all components in a path. */ int ocfs2_journal_access_path(struct ocfs2_caching_info *ci, handle_t *handle, struct ocfs2_path *path) { int i, ret = 0; if (!path) goto out; for(i = 0; i < path_num_items(path); i++) { ret = ocfs2_path_bh_journal_access(handle, ci, path, i); if (ret < 0) { mlog_errno(ret); goto out; } } out: return ret; } /* * Return the index of the extent record which contains cluster #v_cluster. * -1 is returned if it was not found. * * Should work fine on interior and exterior nodes. */ int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster) { int ret = -1; int i; struct ocfs2_extent_rec *rec; u32 rec_end, rec_start, clusters; for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { rec = &el->l_recs[i]; rec_start = le32_to_cpu(rec->e_cpos); clusters = ocfs2_rec_clusters(el, rec); rec_end = rec_start + clusters; if (v_cluster >= rec_start && v_cluster < rec_end) { ret = i; break; } } return ret; } /* * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and * ocfs2_extent_rec_contig only work properly against leaf nodes! */ static int ocfs2_block_extent_contig(struct super_block *sb, struct ocfs2_extent_rec *ext, u64 blkno) { u64 blk_end = le64_to_cpu(ext->e_blkno); blk_end += ocfs2_clusters_to_blocks(sb, le16_to_cpu(ext->e_leaf_clusters)); return blkno == blk_end; } static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left, struct ocfs2_extent_rec *right) { u32 left_range; left_range = le32_to_cpu(left->e_cpos) + le16_to_cpu(left->e_leaf_clusters); return (left_range == le32_to_cpu(right->e_cpos)); } static enum ocfs2_contig_type ocfs2_extent_rec_contig(struct super_block *sb, struct ocfs2_extent_rec *ext, struct ocfs2_extent_rec *insert_rec) { u64 blkno = le64_to_cpu(insert_rec->e_blkno); /* * Refuse to coalesce extent records with different flag * fields - we don't want to mix unwritten extents with user * data. */ if (ext->e_flags != insert_rec->e_flags) return CONTIG_NONE; if (ocfs2_extents_adjacent(ext, insert_rec) && ocfs2_block_extent_contig(sb, ext, blkno)) return CONTIG_RIGHT; blkno = le64_to_cpu(ext->e_blkno); if (ocfs2_extents_adjacent(insert_rec, ext) && ocfs2_block_extent_contig(sb, insert_rec, blkno)) return CONTIG_LEFT; return CONTIG_NONE; } /* * NOTE: We can have pretty much any combination of contiguousness and * appending. * * The usefulness of APPEND_TAIL is more in that it lets us know that * we'll have to update the path to that leaf. */ enum ocfs2_append_type { APPEND_NONE = 0, APPEND_TAIL, }; enum ocfs2_split_type { SPLIT_NONE = 0, SPLIT_LEFT, SPLIT_RIGHT, }; struct ocfs2_insert_type { enum ocfs2_split_type ins_split; enum ocfs2_append_type ins_appending; enum ocfs2_contig_type ins_contig; int ins_contig_index; int ins_tree_depth; }; struct ocfs2_merge_ctxt { enum ocfs2_contig_type c_contig_type; int c_has_empty_extent; int c_split_covers_rec; }; static int ocfs2_validate_extent_block(struct super_block *sb, struct buffer_head *bh) { int rc; struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)bh->b_data; trace_ocfs2_validate_extent_block((unsigned long long)bh->b_blocknr); BUG_ON(!buffer_uptodate(bh)); /* * If the ecc fails, we return the error but otherwise * leave the filesystem running. We know any error is * local to this block. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check); if (rc) { mlog(ML_ERROR, "Checksum failed for extent block %llu\n", (unsigned long long)bh->b_blocknr); return rc; } /* * Errors after here are fatal. */ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { ocfs2_error(sb, "Extent block #%llu has bad signature %.*s", (unsigned long long)bh->b_blocknr, 7, eb->h_signature); return -EINVAL; } if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) { ocfs2_error(sb, "Extent block #%llu has an invalid h_blkno " "of %llu", (unsigned long long)bh->b_blocknr, (unsigned long long)le64_to_cpu(eb->h_blkno)); return -EINVAL; } if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) { ocfs2_error(sb, "Extent block #%llu has an invalid " "h_fs_generation of #%u", (unsigned long long)bh->b_blocknr, le32_to_cpu(eb->h_fs_generation)); return -EINVAL; } return 0; } int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno, struct buffer_head **bh) { int rc; struct buffer_head *tmp = *bh; rc = ocfs2_read_block(ci, eb_blkno, &tmp, ocfs2_validate_extent_block); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!rc && !*bh) *bh = tmp; return rc; } /* * How many free extents have we got before we need more meta data? */ int ocfs2_num_free_extents(struct ocfs2_super *osb, struct ocfs2_extent_tree *et) { int retval; struct ocfs2_extent_list *el = NULL; struct ocfs2_extent_block *eb; struct buffer_head *eb_bh = NULL; u64 last_eb_blk = 0; el = et->et_root_el; last_eb_blk = ocfs2_et_get_last_eb_blk(et); if (last_eb_blk) { retval = ocfs2_read_extent_block(et->et_ci, last_eb_blk, &eb_bh); if (retval < 0) { mlog_errno(retval); goto bail; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; } BUG_ON(el->l_tree_depth != 0); retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec); bail: brelse(eb_bh); trace_ocfs2_num_free_extents(retval); return retval; } /* expects array to already be allocated * * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and * l_count for you */ static int ocfs2_create_new_meta_bhs(handle_t *handle, struct ocfs2_extent_tree *et, int wanted, struct ocfs2_alloc_context *meta_ac, struct buffer_head *bhs[]) { int count, status, i; u16 suballoc_bit_start; u32 num_got; u64 suballoc_loc, first_blkno; struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci)); struct ocfs2_extent_block *eb; count = 0; while (count < wanted) { status = ocfs2_claim_metadata(handle, meta_ac, wanted - count, &suballoc_loc, &suballoc_bit_start, &num_got, &first_blkno); if (status < 0) { mlog_errno(status); goto bail; } for(i = count; i < (num_got + count); i++) { bhs[i] = sb_getblk(osb->sb, first_blkno); if (bhs[i] == NULL) { status = -EIO; mlog_errno(status); goto bail; } ocfs2_set_new_buffer_uptodate(et->et_ci, bhs[i]); status = ocfs2_journal_access_eb(handle, et->et_ci, bhs[i], OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; } memset(bhs[i]->b_data, 0, osb->sb->s_blocksize); eb = (struct ocfs2_extent_block *) bhs[i]->b_data; /* Ok, setup the minimal stuff here. */ strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); eb->h_blkno = cpu_to_le64(first_blkno); eb->h_fs_generation = cpu_to_le32(osb->fs_generation); eb->h_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); eb->h_suballoc_loc = cpu_to_le64(suballoc_loc); eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); eb->h_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb)); suballoc_bit_start++; first_blkno++; /* We'll also be dirtied by the caller, so * this isn't absolutely necessary. */ ocfs2_journal_dirty(handle, bhs[i]); } count += num_got; } status = 0; bail: if (status < 0) { for(i = 0; i < wanted; i++) { brelse(bhs[i]); bhs[i] = NULL; } mlog_errno(status); } return status; } /* * Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth(). * * Returns the sum of the rightmost extent rec logical offset and * cluster count. * * ocfs2_add_branch() uses this to determine what logical cluster * value should be populated into the leftmost new branch records. * * ocfs2_shift_tree_depth() uses this to determine the # clusters * value for the new topmost tree record. */ static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el) { int i; i = le16_to_cpu(el->l_next_free_rec) - 1; return le32_to_cpu(el->l_recs[i].e_cpos) + ocfs2_rec_clusters(el, &el->l_recs[i]); } /* * Change range of the branches in the right most path according to the leaf * extent block's rightmost record. */ static int ocfs2_adjust_rightmost_branch(handle_t *handle, struct ocfs2_extent_tree *et) { int status; struct ocfs2_path *path = NULL; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; path = ocfs2_new_path_from_et(et); if (!path) { status = -ENOMEM; return status; } status = ocfs2_find_path(et->et_ci, path, UINT_MAX); if (status < 0) { mlog_errno(status); goto out; } status = ocfs2_extend_trans(handle, path_num_items(path)); if (status < 0) { mlog_errno(status); goto out; } status = ocfs2_journal_access_path(et->et_ci, handle, path); if (status < 0) { mlog_errno(status); goto out; } el = path_leaf_el(path); rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1]; ocfs2_adjust_rightmost_records(handle, et, path, rec); out: ocfs2_free_path(path); return status; } /* * Add an entire tree branch to our inode. eb_bh is the extent block * to start at, if we don't want to start the branch at the root * structure. * * last_eb_bh is required as we have to update it's next_leaf pointer * for the new last extent block. * * the new branch will be 'empty' in the sense that every block will * contain a single record with cluster count == 0. */ static int ocfs2_add_branch(handle_t *handle, struct ocfs2_extent_tree *et, struct buffer_head *eb_bh, struct buffer_head **last_eb_bh, struct ocfs2_alloc_context *meta_ac) { int status, new_blocks, i; u64 next_blkno, new_last_eb_blk; struct buffer_head *bh; struct buffer_head **new_eb_bhs = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *eb_el; struct ocfs2_extent_list *el; u32 new_cpos, root_end; BUG_ON(!last_eb_bh || !*last_eb_bh); if (eb_bh) { eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; } else el = et->et_root_el; /* we never add a branch to a leaf. */ BUG_ON(!el->l_tree_depth); new_blocks = le16_to_cpu(el->l_tree_depth); eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data; new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list); root_end = ocfs2_sum_rightmost_rec(et->et_root_el); /* * If there is a gap before the root end and the real end * of the righmost leaf block, we need to remove the gap * between new_cpos and root_end first so that the tree * is consistent after we add a new branch(it will start * from new_cpos). */ if (root_end > new_cpos) { trace_ocfs2_adjust_rightmost_branch( (unsigned long long) ocfs2_metadata_cache_owner(et->et_ci), root_end, new_cpos); status = ocfs2_adjust_rightmost_branch(handle, et); if (status) { mlog_errno(status); goto bail; } } /* allocate the number of new eb blocks we need */ new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *), GFP_KERNEL); if (!new_eb_bhs) { status = -ENOMEM; mlog_errno(status); goto bail; } status = ocfs2_create_new_meta_bhs(handle, et, new_blocks, meta_ac, new_eb_bhs); if (status < 0) { mlog_errno(status); goto bail; } /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be * linked with the rest of the tree. * conversly, new_eb_bhs[0] is the new bottommost leaf. * * when we leave the loop, new_last_eb_blk will point to the * newest leaf, and next_blkno will point to the topmost extent * block. */ next_blkno = new_last_eb_blk = 0; for(i = 0; i < new_blocks; i++) { bh = new_eb_bhs[i]; eb = (struct ocfs2_extent_block *) bh->b_data; /* ocfs2_create_new_meta_bhs() should create it right! */ BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); eb_el = &eb->h_list; status = ocfs2_journal_access_eb(handle, et->et_ci, bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; } eb->h_next_leaf_blk = 0; eb_el->l_tree_depth = cpu_to_le16(i); eb_el->l_next_free_rec = cpu_to_le16(1); /* * This actually counts as an empty extent as * c_clusters == 0 */ eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos); eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno); /* * eb_el isn't always an interior node, but even leaf * nodes want a zero'd flags and reserved field so * this gets the whole 32 bits regardless of use. */ eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0); if (!eb_el->l_tree_depth) new_last_eb_blk = le64_to_cpu(eb->h_blkno); ocfs2_journal_dirty(handle, bh); next_blkno = le64_to_cpu(eb->h_blkno); } /* This is a bit hairy. We want to update up to three blocks * here without leaving any of them in an inconsistent state * in case of error. We don't have to worry about * journal_dirty erroring as it won't unless we've aborted the * handle (in which case we would never be here) so reserving * the write with journal_access is all we need to do. */ status = ocfs2_journal_access_eb(handle, et->et_ci, *last_eb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } status = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } if (eb_bh) { status = ocfs2_journal_access_eb(handle, et->et_ci, eb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } } /* Link the new branch into the rest of the tree (el will * either be on the root_bh, or the extent block passed in. */ i = le16_to_cpu(el->l_next_free_rec); el->l_recs[i].e_blkno = cpu_to_le64(next_blkno); el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); el->l_recs[i].e_int_clusters = 0; le16_add_cpu(&el->l_next_free_rec, 1); /* fe needs a new last extent block pointer, as does the * next_leaf on the previously last-extent-block. */ ocfs2_et_set_last_eb_blk(et, new_last_eb_blk); eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk); ocfs2_journal_dirty(handle, *last_eb_bh); ocfs2_journal_dirty(handle, et->et_root_bh); if (eb_bh) ocfs2_journal_dirty(handle, eb_bh); /* * Some callers want to track the rightmost leaf so pass it * back here. */ brelse(*last_eb_bh); get_bh(new_eb_bhs[0]); *last_eb_bh = new_eb_bhs[0]; status = 0; bail: if (new_eb_bhs) { for (i = 0; i < new_blocks; i++) brelse(new_eb_bhs[i]); kfree(new_eb_bhs); } return status; } /* * adds another level to the allocation tree. * returns back the new extent block so you can add a branch to it * after this call. */ static int ocfs2_shift_tree_depth(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_alloc_context *meta_ac, struct buffer_head **ret_new_eb_bh) { int status, i; u32 new_clusters; struct buffer_head *new_eb_bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *root_el; struct ocfs2_extent_list *eb_el; status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac, &new_eb_bh); if (status < 0) { mlog_errno(status); goto bail; } eb = (struct ocfs2_extent_block *) new_eb_bh->b_data; /* ocfs2_create_new_meta_bhs() should create it right! */ BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); eb_el = &eb->h_list; root_el = et->et_root_el; status = ocfs2_journal_access_eb(handle, et->et_ci, new_eb_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; } /* copy the root extent list data into the new extent block */ eb_el->l_tree_depth = root_el->l_tree_depth; eb_el->l_next_free_rec = root_el->l_next_free_rec; for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++) eb_el->l_recs[i] = root_el->l_recs[i]; ocfs2_journal_dirty(handle, new_eb_bh); status = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } new_clusters = ocfs2_sum_rightmost_rec(eb_el); /* update root_bh now */ le16_add_cpu(&root_el->l_tree_depth, 1); root_el->l_recs[0].e_cpos = 0; root_el->l_recs[0].e_blkno = eb->h_blkno; root_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters); for (i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++) memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec)); root_el->l_next_free_rec = cpu_to_le16(1); /* If this is our 1st tree depth shift, then last_eb_blk * becomes the allocated extent block */ if (root_el->l_tree_depth == cpu_to_le16(1)) ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); ocfs2_journal_dirty(handle, et->et_root_bh); *ret_new_eb_bh = new_eb_bh; new_eb_bh = NULL; status = 0; bail: brelse(new_eb_bh); return status; } /* * Should only be called when there is no space left in any of the * leaf nodes. What we want to do is find the lowest tree depth * non-leaf extent block with room for new records. There are three * valid results of this search: * * 1) a lowest extent block is found, then we pass it back in * *lowest_eb_bh and return '0' * * 2) the search fails to find anything, but the root_el has room. We * pass NULL back in *lowest_eb_bh, but still return '0' * * 3) the search fails to find anything AND the root_el is full, in * which case we return > 0 * * return status < 0 indicates an error. */ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et, struct buffer_head **target_bh) { int status = 0, i; u64 blkno; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct buffer_head *bh = NULL; struct buffer_head *lowest_bh = NULL; *target_bh = NULL; el = et->et_root_el; while(le16_to_cpu(el->l_tree_depth) > 1) { if (le16_to_cpu(el->l_next_free_rec) == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu has empty " "extent list (next_free_rec == 0)", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); status = -EIO; goto bail; } i = le16_to_cpu(el->l_next_free_rec) - 1; blkno = le64_to_cpu(el->l_recs[i].e_blkno); if (!blkno) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu has extent " "list where extent # %d has no physical " "block start", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i); status = -EIO; goto bail; } brelse(bh); bh = NULL; status = ocfs2_read_extent_block(et->et_ci, blkno, &bh); if (status < 0) { mlog_errno(status); goto bail; } eb = (struct ocfs2_extent_block *) bh->b_data; el = &eb->h_list; if (le16_to_cpu(el->l_next_free_rec) < le16_to_cpu(el->l_count)) { brelse(lowest_bh); lowest_bh = bh; get_bh(lowest_bh); } } /* If we didn't find one and the fe doesn't have any room, * then return '1' */ el = et->et_root_el; if (!lowest_bh && (el->l_next_free_rec == el->l_count)) status = 1; *target_bh = lowest_bh; bail: brelse(bh); return status; } /* * Grow a b-tree so that it has more records. * * We might shift the tree depth in which case existing paths should * be considered invalid. * * Tree depth after the grow is returned via *final_depth. * * *last_eb_bh will be updated by ocfs2_add_branch(). */ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et, int *final_depth, struct buffer_head **last_eb_bh, struct ocfs2_alloc_context *meta_ac) { int ret, shift; struct ocfs2_extent_list *el = et->et_root_el; int depth = le16_to_cpu(el->l_tree_depth); struct buffer_head *bh = NULL; BUG_ON(meta_ac == NULL); shift = ocfs2_find_branch_target(et, &bh); if (shift < 0) { ret = shift; mlog_errno(ret); goto out; } /* We traveled all the way to the bottom of the allocation tree * and didn't find room for any more extents - we need to add * another tree level */ if (shift) { BUG_ON(bh); trace_ocfs2_grow_tree( (unsigned long long) ocfs2_metadata_cache_owner(et->et_ci), depth); /* ocfs2_shift_tree_depth will return us a buffer with * the new extent block (so we can pass that to * ocfs2_add_branch). */ ret = ocfs2_shift_tree_depth(handle, et, meta_ac, &bh); if (ret < 0) { mlog_errno(ret); goto out; } depth++; if (depth == 1) { /* * Special case: we have room now if we shifted from * tree_depth 0, so no more work needs to be done. * * We won't be calling add_branch, so pass * back *last_eb_bh as the new leaf. At depth * zero, it should always be null so there's * no reason to brelse. */ BUG_ON(*last_eb_bh); get_bh(bh); *last_eb_bh = bh; goto out; } } /* call ocfs2_add_branch to add the final part of the tree with * the new data. */ ret = ocfs2_add_branch(handle, et, bh, last_eb_bh, meta_ac); if (ret < 0) { mlog_errno(ret); goto out; } out: if (final_depth) *final_depth = depth; brelse(bh); return ret; } /* * This function will discard the rightmost extent record. */ static void ocfs2_shift_records_right(struct ocfs2_extent_list *el) { int next_free = le16_to_cpu(el->l_next_free_rec); int count = le16_to_cpu(el->l_count); unsigned int num_bytes; BUG_ON(!next_free); /* This will cause us to go off the end of our extent list. */ BUG_ON(next_free >= count); num_bytes = sizeof(struct ocfs2_extent_rec) * next_free; memmove(&el->l_recs[1], &el->l_recs[0], num_bytes); } static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el, struct ocfs2_extent_rec *insert_rec) { int i, insert_index, next_free, has_empty, num_bytes; u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos); struct ocfs2_extent_rec *rec; next_free = le16_to_cpu(el->l_next_free_rec); has_empty = ocfs2_is_empty_extent(&el->l_recs[0]); BUG_ON(!next_free); /* The tree code before us didn't allow enough room in the leaf. */ BUG_ON(el->l_next_free_rec == el->l_count && !has_empty); /* * The easiest way to approach this is to just remove the * empty extent and temporarily decrement next_free. */ if (has_empty) { /* * If next_free was 1 (only an empty extent), this * loop won't execute, which is fine. We still want * the decrement above to happen. */ for(i = 0; i < (next_free - 1); i++) el->l_recs[i] = el->l_recs[i+1]; next_free--; } /* * Figure out what the new record index should be. */ for(i = 0; i < next_free; i++) { rec = &el->l_recs[i]; if (insert_cpos < le32_to_cpu(rec->e_cpos)) break; } insert_index = i; trace_ocfs2_rotate_leaf(insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count)); BUG_ON(insert_index < 0); BUG_ON(insert_index >= le16_to_cpu(el->l_count)); BUG_ON(insert_index > next_free); /* * No need to memmove if we're just adding to the tail. */ if (insert_index != next_free) { BUG_ON(next_free >= le16_to_cpu(el->l_count)); num_bytes = next_free - insert_index; num_bytes *= sizeof(struct ocfs2_extent_rec); memmove(&el->l_recs[insert_index + 1], &el->l_recs[insert_index], num_bytes); } /* * Either we had an empty extent, and need to re-increment or * there was no empty extent on a non full rightmost leaf node, * in which case we still need to increment. */ next_free++; el->l_next_free_rec = cpu_to_le16(next_free); /* * Make sure none of the math above just messed up our tree. */ BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)); el->l_recs[insert_index] = *insert_rec; } static void ocfs2_remove_empty_extent(struct ocfs2_extent_list *el) { int size, num_recs = le16_to_cpu(el->l_next_free_rec); BUG_ON(num_recs == 0); if (ocfs2_is_empty_extent(&el->l_recs[0])) { num_recs--; size = num_recs * sizeof(struct ocfs2_extent_rec); memmove(&el->l_recs[0], &el->l_recs[1], size); memset(&el->l_recs[num_recs], 0, sizeof(struct ocfs2_extent_rec)); el->l_next_free_rec = cpu_to_le16(num_recs); } } /* * Create an empty extent record . * * l_next_free_rec may be updated. * * If an empty extent already exists do nothing. */ static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el) { int next_free = le16_to_cpu(el->l_next_free_rec); BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); if (next_free == 0) goto set_and_inc; if (ocfs2_is_empty_extent(&el->l_recs[0])) return; mlog_bug_on_msg(el->l_count == el->l_next_free_rec, "Asked to create an empty extent in a full list:\n" "count = %u, tree depth = %u", le16_to_cpu(el->l_count), le16_to_cpu(el->l_tree_depth)); ocfs2_shift_records_right(el); set_and_inc: le16_add_cpu(&el->l_next_free_rec, 1); memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); } /* * For a rotation which involves two leaf nodes, the "root node" is * the lowest level tree node which contains a path to both leafs. This * resulting set of information can be used to form a complete "subtree" * * This function is passed two full paths from the dinode down to a * pair of adjacent leaves. It's task is to figure out which path * index contains the subtree root - this can be the root index itself * in a worst-case rotation. * * The array index of the subtree root is passed back. */ int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et, struct ocfs2_path *left, struct ocfs2_path *right) { int i = 0; /* * Check that the caller passed in two paths from the same tree. */ BUG_ON(path_root_bh(left) != path_root_bh(right)); do { i++; /* * The caller didn't pass two adjacent paths. */ mlog_bug_on_msg(i > left->p_tree_depth, "Owner %llu, left depth %u, right depth %u\n" "left leaf blk %llu, right leaf blk %llu\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), left->p_tree_depth, right->p_tree_depth, (unsigned long long)path_leaf_bh(left)->b_blocknr, (unsigned long long)path_leaf_bh(right)->b_blocknr); } while (left->p_node[i].bh->b_blocknr == right->p_node[i].bh->b_blocknr); return i - 1; } typedef void (path_insert_t)(void *, struct buffer_head *); /* * Traverse a btree path in search of cpos, starting at root_el. * * This code can be called with a cpos larger than the tree, in which * case it will return the rightmost path. */ static int __ocfs2_find_path(struct ocfs2_caching_info *ci, struct ocfs2_extent_list *root_el, u32 cpos, path_insert_t *func, void *data) { int i, ret = 0; u32 range; u64 blkno; struct buffer_head *bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; el = root_el; while (el->l_tree_depth) { if (le16_to_cpu(el->l_next_free_rec) == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(ci), "Owner %llu has empty extent list at " "depth %u\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), le16_to_cpu(el->l_tree_depth)); ret = -EROFS; goto out; } for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) { rec = &el->l_recs[i]; /* * In the case that cpos is off the allocation * tree, this should just wind up returning the * rightmost record. */ range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) break; } blkno = le64_to_cpu(el->l_recs[i].e_blkno); if (blkno == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(ci), "Owner %llu has bad blkno in extent list " "at depth %u (index %d)\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), le16_to_cpu(el->l_tree_depth), i); ret = -EROFS; goto out; } brelse(bh); bh = NULL; ret = ocfs2_read_extent_block(ci, blkno, &bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) bh->b_data; el = &eb->h_list; if (le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)) { ocfs2_error(ocfs2_metadata_cache_get_super(ci), "Owner %llu has bad count in extent list " "at block %llu (next free=%u, count=%u)\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)bh->b_blocknr, le16_to_cpu(el->l_next_free_rec), le16_to_cpu(el->l_count)); ret = -EROFS; goto out; } if (func) func(data, bh); } out: /* * Catch any trailing bh that the loop didn't handle. */ brelse(bh); return ret; } /* * Given an initialized path (that is, it has a valid root extent * list), this function will traverse the btree in search of the path * which would contain cpos. * * The path traveled is recorded in the path structure. * * Note that this will not do any comparisons on leaf node extent * records, so it will work fine in the case that we just added a tree * branch. */ struct find_path_data { int index; struct ocfs2_path *path; }; static void find_path_ins(void *data, struct buffer_head *bh) { struct find_path_data *fp = data; get_bh(bh); ocfs2_path_insert_eb(fp->path, fp->index, bh); fp->index++; } int ocfs2_find_path(struct ocfs2_caching_info *ci, struct ocfs2_path *path, u32 cpos) { struct find_path_data data; data.index = 1; data.path = path; return __ocfs2_find_path(ci, path_root_el(path), cpos, find_path_ins, &data); } static void find_leaf_ins(void *data, struct buffer_head *bh) { struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data; struct ocfs2_extent_list *el = &eb->h_list; struct buffer_head **ret = data; /* We want to retain only the leaf block. */ if (le16_to_cpu(el->l_tree_depth) == 0) { get_bh(bh); *ret = bh; } } /* * Find the leaf block in the tree which would contain cpos. No * checking of the actual leaf is done. * * Some paths want to call this instead of allocating a path structure * and calling ocfs2_find_path(). * * This function doesn't handle non btree extent lists. */ int ocfs2_find_leaf(struct ocfs2_caching_info *ci, struct ocfs2_extent_list *root_el, u32 cpos, struct buffer_head **leaf_bh) { int ret; struct buffer_head *bh = NULL; ret = __ocfs2_find_path(ci, root_el, cpos, find_leaf_ins, &bh); if (ret) { mlog_errno(ret); goto out; } *leaf_bh = bh; out: return ret; } /* * Adjust the adjacent records (left_rec, right_rec) involved in a rotation. * * Basically, we've moved stuff around at the bottom of the tree and * we need to fix up the extent records above the changes to reflect * the new changes. * * left_rec: the record on the left. * left_child_el: is the child list pointed to by left_rec * right_rec: the record to the right of left_rec * right_child_el: is the child list pointed to by right_rec * * By definition, this only works on interior nodes. */ static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec, struct ocfs2_extent_list *left_child_el, struct ocfs2_extent_rec *right_rec, struct ocfs2_extent_list *right_child_el) { u32 left_clusters, right_end; /* * Interior nodes never have holes. Their cpos is the cpos of * the leftmost record in their child list. Their cluster * count covers the full theoretical range of their child list * - the range between their cpos and the cpos of the record * immediately to their right. */ left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos); if (!ocfs2_rec_clusters(right_child_el, &right_child_el->l_recs[0])) { BUG_ON(right_child_el->l_tree_depth); BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1); left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos); } left_clusters -= le32_to_cpu(left_rec->e_cpos); left_rec->e_int_clusters = cpu_to_le32(left_clusters); /* * Calculate the rightmost cluster count boundary before * moving cpos - we will need to adjust clusters after * updating e_cpos to keep the same highest cluster count. */ right_end = le32_to_cpu(right_rec->e_cpos); right_end += le32_to_cpu(right_rec->e_int_clusters); right_rec->e_cpos = left_rec->e_cpos; le32_add_cpu(&right_rec->e_cpos, left_clusters); right_end -= le32_to_cpu(right_rec->e_cpos); right_rec->e_int_clusters = cpu_to_le32(right_end); } /* * Adjust the adjacent root node records involved in a * rotation. left_el_blkno is passed in as a key so that we can easily * find it's index in the root list. */ static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el, struct ocfs2_extent_list *left_el, struct ocfs2_extent_list *right_el, u64 left_el_blkno) { int i; BUG_ON(le16_to_cpu(root_el->l_tree_depth) <= le16_to_cpu(left_el->l_tree_depth)); for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) { if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno) break; } /* * The path walking code should have never returned a root and * two paths which are not adjacent. */ BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1)); ocfs2_adjust_adjacent_records(&root_el->l_recs[i], left_el, &root_el->l_recs[i + 1], right_el); } /* * We've changed a leaf block (in right_path) and need to reflect that * change back up the subtree. * * This happens in multiple places: * - When we've moved an extent record from the left path leaf to the right * path leaf to make room for an empty extent in the left path leaf. * - When our insert into the right path leaf is at the leftmost edge * and requires an update of the path immediately to it's left. This * can occur at the end of some types of rotation and appending inserts. * - When we've adjusted the last extent record in the left path leaf and the * 1st extent record in the right path leaf during cross extent block merge. */ static void ocfs2_complete_edge_insert(handle_t *handle, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index) { int i, idx; struct ocfs2_extent_list *el, *left_el, *right_el; struct ocfs2_extent_rec *left_rec, *right_rec; struct buffer_head *root_bh = left_path->p_node[subtree_index].bh; /* * Update the counts and position values within all the * interior nodes to reflect the leaf rotation we just did. * * The root node is handled below the loop. * * We begin the loop with right_el and left_el pointing to the * leaf lists and work our way up. * * NOTE: within this loop, left_el and right_el always refer * to the *child* lists. */ left_el = path_leaf_el(left_path); right_el = path_leaf_el(right_path); for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) { trace_ocfs2_complete_edge_insert(i); /* * One nice property of knowing that all of these * nodes are below the root is that we only deal with * the leftmost right node record and the rightmost * left node record. */ el = left_path->p_node[i].el; idx = le16_to_cpu(left_el->l_next_free_rec) - 1; left_rec = &el->l_recs[idx]; el = right_path->p_node[i].el; right_rec = &el->l_recs[0]; ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec, right_el); ocfs2_journal_dirty(handle, left_path->p_node[i].bh); ocfs2_journal_dirty(handle, right_path->p_node[i].bh); /* * Setup our list pointers now so that the current * parents become children in the next iteration. */ left_el = left_path->p_node[i].el; right_el = right_path->p_node[i].el; } /* * At the root node, adjust the two adjacent records which * begin our path to the leaves. */ el = left_path->p_node[subtree_index].el; left_el = left_path->p_node[subtree_index + 1].el; right_el = right_path->p_node[subtree_index + 1].el; ocfs2_adjust_root_records(el, left_el, right_el, left_path->p_node[subtree_index + 1].bh->b_blocknr); root_bh = left_path->p_node[subtree_index].bh; ocfs2_journal_dirty(handle, root_bh); } static int ocfs2_rotate_subtree_right(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index) { int ret, i; struct buffer_head *right_leaf_bh; struct buffer_head *left_leaf_bh = NULL; struct buffer_head *root_bh; struct ocfs2_extent_list *right_el, *left_el; struct ocfs2_extent_rec move_rec; left_leaf_bh = path_leaf_bh(left_path); left_el = path_leaf_el(left_path); if (left_el->l_next_free_rec != left_el->l_count) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Inode %llu has non-full interior leaf node %llu" "(next free = %u)", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)left_leaf_bh->b_blocknr, le16_to_cpu(left_el->l_next_free_rec)); return -EROFS; } /* * This extent block may already have an empty record, so we * return early if so. */ if (ocfs2_is_empty_extent(&left_el->l_recs[0])) return 0; root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); goto out; } for(i = subtree_index + 1; i < path_num_items(right_path); i++) { ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); goto out; } } right_leaf_bh = path_leaf_bh(right_path); right_el = path_leaf_el(right_path); /* This is a code error, not a disk corruption. */ mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails " "because rightmost leaf block %llu is empty\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)right_leaf_bh->b_blocknr); ocfs2_create_empty_extent(right_el); ocfs2_journal_dirty(handle, right_leaf_bh); /* Do the copy now. */ i = le16_to_cpu(left_el->l_next_free_rec) - 1; move_rec = left_el->l_recs[i]; right_el->l_recs[0] = move_rec; /* * Clear out the record we just copied and shift everything * over, leaving an empty extent in the left leaf. * * We temporarily subtract from next_free_rec so that the * shift will lose the tail record (which is now defunct). */ le16_add_cpu(&left_el->l_next_free_rec, -1); ocfs2_shift_records_right(left_el); memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); le16_add_cpu(&left_el->l_next_free_rec, 1); ocfs2_journal_dirty(handle, left_leaf_bh); ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); out: return ret; } /* * Given a full path, determine what cpos value would return us a path * containing the leaf immediately to the left of the current one. * * Will return zero if the path passed in is already the leftmost path. */ int ocfs2_find_cpos_for_left_leaf(struct super_block *sb, struct ocfs2_path *path, u32 *cpos) { int i, j, ret = 0; u64 blkno; struct ocfs2_extent_list *el; BUG_ON(path->p_tree_depth == 0); *cpos = 0; blkno = path_leaf_bh(path)->b_blocknr; /* Start at the tree node just above the leaf and work our way up. */ i = path->p_tree_depth - 1; while (i >= 0) { el = path->p_node[i].el; /* * Find the extent record just before the one in our * path. */ for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) { if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) { if (j == 0) { if (i == 0) { /* * We've determined that the * path specified is already * the leftmost one - return a * cpos of zero. */ goto out; } /* * The leftmost record points to our * leaf - we need to travel up the * tree one level. */ goto next_node; } *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos); *cpos = *cpos + ocfs2_rec_clusters(el, &el->l_recs[j - 1]); *cpos = *cpos - 1; goto out; } } /* * If we got here, we never found a valid node where * the tree indicated one should be. */ ocfs2_error(sb, "Invalid extent tree at extent block %llu\n", (unsigned long long)blkno); ret = -EROFS; goto out; next_node: blkno = path->p_node[i].bh->b_blocknr; i--; } out: return ret; } /* * Extend the transaction by enough credits to complete the rotation, * and still leave at least the original number of credits allocated * to this transaction. */ static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth, int op_credits, struct ocfs2_path *path) { int ret = 0; int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits; if (handle->h_buffer_credits < credits) ret = ocfs2_extend_trans(handle, credits - handle->h_buffer_credits); return ret; } /* * Trap the case where we're inserting into the theoretical range past * the _actual_ left leaf range. Otherwise, we'll rotate a record * whose cpos is less than ours into the right leaf. * * It's only necessary to look at the rightmost record of the left * leaf because the logic that calls us should ensure that the * theoretical ranges in the path components above the leaves are * correct. */ static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path, u32 insert_cpos) { struct ocfs2_extent_list *left_el; struct ocfs2_extent_rec *rec; int next_free; left_el = path_leaf_el(left_path); next_free = le16_to_cpu(left_el->l_next_free_rec); rec = &left_el->l_recs[next_free - 1]; if (insert_cpos > le32_to_cpu(rec->e_cpos)) return 1; return 0; } static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos) { int next_free = le16_to_cpu(el->l_next_free_rec); unsigned int range; struct ocfs2_extent_rec *rec; if (next_free == 0) return 0; rec = &el->l_recs[0]; if (ocfs2_is_empty_extent(rec)) { /* Empty list. */ if (next_free == 1) return 0; rec = &el->l_recs[1]; } range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) return 1; return 0; } /* * Rotate all the records in a btree right one record, starting at insert_cpos. * * The path to the rightmost leaf should be passed in. * * The array is assumed to be large enough to hold an entire path (tree depth). * * Upon successful return from this function: * * - The 'right_path' array will contain a path to the leaf block * whose range contains e_cpos. * - That leaf block will have a single empty extent in list index 0. * - In the case that the rotation requires a post-insert update, * *ret_left_path will contain a valid path which can be passed to * ocfs2_insert_path(). */ static int ocfs2_rotate_tree_right(handle_t *handle, struct ocfs2_extent_tree *et, enum ocfs2_split_type split, u32 insert_cpos, struct ocfs2_path *right_path, struct ocfs2_path **ret_left_path) { int ret, start, orig_credits = handle->h_buffer_credits; u32 cpos; struct ocfs2_path *left_path = NULL; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); *ret_left_path = NULL; left_path = ocfs2_new_path_from_path(right_path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos); if (ret) { mlog_errno(ret); goto out; } trace_ocfs2_rotate_tree_right( (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), insert_cpos, cpos); /* * What we want to do here is: * * 1) Start with the rightmost path. * * 2) Determine a path to the leaf block directly to the left * of that leaf. * * 3) Determine the 'subtree root' - the lowest level tree node * which contains a path to both leaves. * * 4) Rotate the subtree. * * 5) Find the next subtree by considering the left path to be * the new right path. * * The check at the top of this while loop also accepts * insert_cpos == cpos because cpos is only a _theoretical_ * value to get us the left path - insert_cpos might very well * be filling that hole. * * Stop at a cpos of '0' because we either started at the * leftmost branch (i.e., a tree with one branch and a * rotation inside of it), or we've gone as far as we can in * rotating subtrees. */ while (cpos && insert_cpos <= cpos) { trace_ocfs2_rotate_tree_right( (unsigned long long) ocfs2_metadata_cache_owner(et->et_ci), insert_cpos, cpos); ret = ocfs2_find_path(et->et_ci, left_path, cpos); if (ret) { mlog_errno(ret); goto out; } mlog_bug_on_msg(path_leaf_bh(left_path) == path_leaf_bh(right_path), "Owner %llu: error during insert of %u " "(left path cpos %u) results in two identical " "paths ending at %llu\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), insert_cpos, cpos, (unsigned long long) path_leaf_bh(left_path)->b_blocknr); if (split == SPLIT_NONE && ocfs2_rotate_requires_path_adjustment(left_path, insert_cpos)) { /* * We've rotated the tree as much as we * should. The rest is up to * ocfs2_insert_path() to complete, after the * record insertion. We indicate this * situation by returning the left path. * * The reason we don't adjust the records here * before the record insert is that an error * later might break the rule where a parent * record e_cpos will reflect the actual * e_cpos of the 1st nonempty record of the * child list. */ *ret_left_path = left_path; goto out_ret_path; } start = ocfs2_find_subtree_root(et, left_path, right_path); trace_ocfs2_rotate_subtree(start, (unsigned long long) right_path->p_node[start].bh->b_blocknr, right_path->p_tree_depth); ret = ocfs2_extend_rotate_transaction(handle, start, orig_credits, right_path); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_rotate_subtree_right(handle, et, left_path, right_path, start); if (ret) { mlog_errno(ret); goto out; } if (split != SPLIT_NONE && ocfs2_leftmost_rec_contains(path_leaf_el(right_path), insert_cpos)) { /* * A rotate moves the rightmost left leaf * record over to the leftmost right leaf * slot. If we're doing an extent split * instead of a real insert, then we have to * check that the extent to be split wasn't * just moved over. If it was, then we can * exit here, passing left_path back - * ocfs2_split_extent() is smart enough to * search both leaves. */ *ret_left_path = left_path; goto out_ret_path; } /* * There is no need to re-read the next right path * as we know that it'll be our current left * path. Optimize by copying values instead. */ ocfs2_mv_path(right_path, left_path); ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos); if (ret) { mlog_errno(ret); goto out; } } out: ocfs2_free_path(left_path); out_ret_path: return ret; } static int ocfs2_update_edge_lengths(handle_t *handle, struct ocfs2_extent_tree *et, int subtree_index, struct ocfs2_path *path) { int i, idx, ret; struct ocfs2_extent_rec *rec; struct ocfs2_extent_list *el; struct ocfs2_extent_block *eb; u32 range; /* * In normal tree rotation process, we will never touch the * tree branch above subtree_index and ocfs2_extend_rotate_transaction * doesn't reserve the credits for them either. * * But we do have a special case here which will update the rightmost * records for all the bh in the path. * So we have to allocate extra credits and access them. */ ret = ocfs2_extend_trans(handle, subtree_index); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); goto out; } /* Path should always be rightmost. */ eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; BUG_ON(eb->h_next_leaf_blk != 0ULL); el = &eb->h_list; BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0); idx = le16_to_cpu(el->l_next_free_rec) - 1; rec = &el->l_recs[idx]; range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); for (i = 0; i < path->p_tree_depth; i++) { el = path->p_node[i].el; idx = le16_to_cpu(el->l_next_free_rec) - 1; rec = &el->l_recs[idx]; rec->e_int_clusters = cpu_to_le32(range); le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos)); ocfs2_journal_dirty(handle, path->p_node[i].bh); } out: return ret; } static void ocfs2_unlink_path(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_path *path, int unlink_start) { int ret, i; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct buffer_head *bh; for(i = unlink_start; i < path_num_items(path); i++) { bh = path->p_node[i].bh; eb = (struct ocfs2_extent_block *)bh->b_data; /* * Not all nodes might have had their final count * decremented by the caller - handle this here. */ el = &eb->h_list; if (le16_to_cpu(el->l_next_free_rec) > 1) { mlog(ML_ERROR, "Inode %llu, attempted to remove extent block " "%llu with %u records\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)le64_to_cpu(eb->h_blkno), le16_to_cpu(el->l_next_free_rec)); ocfs2_journal_dirty(handle, bh); ocfs2_remove_from_cache(et->et_ci, bh); continue; } el->l_next_free_rec = 0; memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); ocfs2_journal_dirty(handle, bh); ret = ocfs2_cache_extent_block_free(dealloc, eb); if (ret) mlog_errno(ret); ocfs2_remove_from_cache(et->et_ci, bh); } } static void ocfs2_unlink_subtree(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index, struct ocfs2_cached_dealloc_ctxt *dealloc) { int i; struct buffer_head *root_bh = left_path->p_node[subtree_index].bh; struct ocfs2_extent_list *root_el = left_path->p_node[subtree_index].el; struct ocfs2_extent_list *el; struct ocfs2_extent_block *eb; el = path_leaf_el(left_path); eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data; for(i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++) if (root_el->l_recs[i].e_blkno == eb->h_blkno) break; BUG_ON(i >= le16_to_cpu(root_el->l_next_free_rec)); memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec)); le16_add_cpu(&root_el->l_next_free_rec, -1); eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; eb->h_next_leaf_blk = 0; ocfs2_journal_dirty(handle, root_bh); ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); ocfs2_unlink_path(handle, et, dealloc, right_path, subtree_index + 1); } static int ocfs2_rotate_subtree_left(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index, struct ocfs2_cached_dealloc_ctxt *dealloc, int *deleted) { int ret, i, del_right_subtree = 0, right_has_empty = 0; struct buffer_head *root_bh, *et_root_bh = path_root_bh(right_path); struct ocfs2_extent_list *right_leaf_el, *left_leaf_el; struct ocfs2_extent_block *eb; *deleted = 0; right_leaf_el = path_leaf_el(right_path); left_leaf_el = path_leaf_el(left_path); root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); if (!ocfs2_is_empty_extent(&left_leaf_el->l_recs[0])) return 0; eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data; if (ocfs2_is_empty_extent(&right_leaf_el->l_recs[0])) { /* * It's legal for us to proceed if the right leaf is * the rightmost one and it has an empty extent. There * are two cases to handle - whether the leaf will be * empty after removal or not. If the leaf isn't empty * then just remove the empty extent up front. The * next block will handle empty leaves by flagging * them for unlink. * * Non rightmost leaves will throw -EAGAIN and the * caller can manually move the subtree and retry. */ if (eb->h_next_leaf_blk != 0ULL) return -EAGAIN; if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) { ret = ocfs2_journal_access_eb(handle, et->et_ci, path_leaf_bh(right_path), OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ocfs2_remove_empty_extent(right_leaf_el); } else right_has_empty = 1; } if (eb->h_next_leaf_blk == 0ULL && le16_to_cpu(right_leaf_el->l_next_free_rec) == 1) { /* * We have to update i_last_eb_blk during the meta * data delete. */ ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } del_right_subtree = 1; } /* * Getting here with an empty extent in the right path implies * that it's the rightmost path and will be deleted. */ BUG_ON(right_has_empty && !del_right_subtree); ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); goto out; } for(i = subtree_index + 1; i < path_num_items(right_path); i++) { ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); goto out; } } if (!right_has_empty) { /* * Only do this if we're moving a real * record. Otherwise, the action is delayed until * after removal of the right path in which case we * can do a simple shift to remove the empty extent. */ ocfs2_rotate_leaf(left_leaf_el, &right_leaf_el->l_recs[0]); memset(&right_leaf_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); } if (eb->h_next_leaf_blk == 0ULL) { /* * Move recs over to get rid of empty extent, decrease * next_free. This is allowed to remove the last * extent in our leaf (setting l_next_free_rec to * zero) - the delete code below won't care. */ ocfs2_remove_empty_extent(right_leaf_el); } ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); ocfs2_journal_dirty(handle, path_leaf_bh(right_path)); if (del_right_subtree) { ocfs2_unlink_subtree(handle, et, left_path, right_path, subtree_index, dealloc); ret = ocfs2_update_edge_lengths(handle, et, subtree_index, left_path); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); /* * Removal of the extent in the left leaf was skipped * above so we could delete the right path * 1st. */ if (right_has_empty) ocfs2_remove_empty_extent(left_leaf_el); ocfs2_journal_dirty(handle, et_root_bh); *deleted = 1; } else ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); out: return ret; } /* * Given a full path, determine what cpos value would return us a path * containing the leaf immediately to the right of the current one. * * Will return zero if the path passed in is already the rightmost path. * * This looks similar, but is subtly different to * ocfs2_find_cpos_for_left_leaf(). */ int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, struct ocfs2_path *path, u32 *cpos) { int i, j, ret = 0; u64 blkno; struct ocfs2_extent_list *el; *cpos = 0; if (path->p_tree_depth == 0) return 0; blkno = path_leaf_bh(path)->b_blocknr; /* Start at the tree node just above the leaf and work our way up. */ i = path->p_tree_depth - 1; while (i >= 0) { int next_free; el = path->p_node[i].el; /* * Find the extent record just after the one in our * path. */ next_free = le16_to_cpu(el->l_next_free_rec); for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) { if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) { if (j == (next_free - 1)) { if (i == 0) { /* * We've determined that the * path specified is already * the rightmost one - return a * cpos of zero. */ goto out; } /* * The rightmost record points to our * leaf - we need to travel up the * tree one level. */ goto next_node; } *cpos = le32_to_cpu(el->l_recs[j + 1].e_cpos); goto out; } } /* * If we got here, we never found a valid node where * the tree indicated one should be. */ ocfs2_error(sb, "Invalid extent tree at extent block %llu\n", (unsigned long long)blkno); ret = -EROFS; goto out; next_node: blkno = path->p_node[i].bh->b_blocknr; i--; } out: return ret; } static int ocfs2_rotate_rightmost_leaf_left(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path) { int ret; struct buffer_head *bh = path_leaf_bh(path); struct ocfs2_extent_list *el = path_leaf_el(path); if (!ocfs2_is_empty_extent(&el->l_recs[0])) return 0; ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path, path_num_items(path) - 1); if (ret) { mlog_errno(ret); goto out; } ocfs2_remove_empty_extent(el); ocfs2_journal_dirty(handle, bh); out: return ret; } static int __ocfs2_rotate_tree_left(handle_t *handle, struct ocfs2_extent_tree *et, int orig_credits, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_path **empty_extent_path) { int ret, subtree_root, deleted; u32 right_cpos; struct ocfs2_path *left_path = NULL; struct ocfs2_path *right_path = NULL; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); BUG_ON(!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0]))); *empty_extent_path = NULL; ret = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos); if (ret) { mlog_errno(ret); goto out; } left_path = ocfs2_new_path_from_path(path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ocfs2_cp_path(left_path, path); right_path = ocfs2_new_path_from_path(path); if (!right_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } while (right_cpos) { ret = ocfs2_find_path(et->et_ci, right_path, right_cpos); if (ret) { mlog_errno(ret); goto out; } subtree_root = ocfs2_find_subtree_root(et, left_path, right_path); trace_ocfs2_rotate_subtree(subtree_root, (unsigned long long) right_path->p_node[subtree_root].bh->b_blocknr, right_path->p_tree_depth); ret = ocfs2_extend_rotate_transaction(handle, subtree_root, orig_credits, left_path); if (ret) { mlog_errno(ret); goto out; } /* * Caller might still want to make changes to the * tree root, so re-add it to the journal here. */ ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, 0); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_rotate_subtree_left(handle, et, left_path, right_path, subtree_root, dealloc, &deleted); if (ret == -EAGAIN) { /* * The rotation has to temporarily stop due to * the right subtree having an empty * extent. Pass it back to the caller for a * fixup. */ *empty_extent_path = right_path; right_path = NULL; goto out; } if (ret) { mlog_errno(ret); goto out; } /* * The subtree rotate might have removed records on * the rightmost edge. If so, then rotation is * complete. */ if (deleted) break; ocfs2_mv_path(left_path, right_path); ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &right_cpos); if (ret) { mlog_errno(ret); goto out; } } out: ocfs2_free_path(right_path); ocfs2_free_path(left_path); return ret; } static int ocfs2_remove_rightmost_path(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret, subtree_index; u32 cpos; struct ocfs2_path *left_path = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; ret = ocfs2_et_sanity_check(et); if (ret) goto out; /* * There's two ways we handle this depending on * whether path is the only existing one. */ ret = ocfs2_extend_rotate_transaction(handle, 0, handle->h_buffer_credits, path); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci), path, &cpos); if (ret) { mlog_errno(ret); goto out; } if (cpos) { /* * We have a path to the left of this one - it needs * an update too. */ left_path = ocfs2_new_path_from_path(path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, left_path, cpos); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, left_path); if (ret) { mlog_errno(ret); goto out; } subtree_index = ocfs2_find_subtree_root(et, left_path, path); ocfs2_unlink_subtree(handle, et, left_path, path, subtree_index, dealloc); ret = ocfs2_update_edge_lengths(handle, et, subtree_index, left_path); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); } else { /* * 'path' is also the leftmost path which * means it must be the only one. This gets * handled differently because we want to * revert the root back to having extents * in-line. */ ocfs2_unlink_path(handle, et, dealloc, path, 1); el = et->et_root_el; el->l_tree_depth = 0; el->l_next_free_rec = 0; memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); ocfs2_et_set_last_eb_blk(et, 0); } ocfs2_journal_dirty(handle, path_root_bh(path)); out: ocfs2_free_path(left_path); return ret; } /* * Left rotation of btree records. * * In many ways, this is (unsurprisingly) the opposite of right * rotation. We start at some non-rightmost path containing an empty * extent in the leaf block. The code works its way to the rightmost * path by rotating records to the left in every subtree. * * This is used by any code which reduces the number of extent records * in a leaf. After removal, an empty record should be placed in the * leftmost list position. * * This won't handle a length update of the rightmost path records if * the rightmost tree leaf record is removed so the caller is * responsible for detecting and correcting that. */ static int ocfs2_rotate_tree_left(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret, orig_credits = handle->h_buffer_credits; struct ocfs2_path *tmp_path = NULL, *restart_path = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; el = path_leaf_el(path); if (!ocfs2_is_empty_extent(&el->l_recs[0])) return 0; if (path->p_tree_depth == 0) { rightmost_no_delete: /* * Inline extents. This is trivially handled, so do * it up front. */ ret = ocfs2_rotate_rightmost_leaf_left(handle, et, path); if (ret) mlog_errno(ret); goto out; } /* * Handle rightmost branch now. There's several cases: * 1) simple rotation leaving records in there. That's trivial. * 2) rotation requiring a branch delete - there's no more * records left. Two cases of this: * a) There are branches to the left. * b) This is also the leftmost (the only) branch. * * 1) is handled via ocfs2_rotate_rightmost_leaf_left() * 2a) we need the left branch so that we can update it with the unlink * 2b) we need to bring the root back to inline extents. */ eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; el = &eb->h_list; if (eb->h_next_leaf_blk == 0) { /* * This gets a bit tricky if we're going to delete the * rightmost path. Get the other cases out of the way * 1st. */ if (le16_to_cpu(el->l_next_free_rec) > 1) goto rightmost_no_delete; if (le16_to_cpu(el->l_next_free_rec) == 0) { ret = -EIO; ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu has empty extent block at %llu", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)le64_to_cpu(eb->h_blkno)); goto out; } /* * XXX: The caller can not trust "path" any more after * this as it will have been deleted. What do we do? * * In theory the rotate-for-merge code will never get * here because it'll always ask for a rotate in a * nonempty list. */ ret = ocfs2_remove_rightmost_path(handle, et, path, dealloc); if (ret) mlog_errno(ret); goto out; } /* * Now we can loop, remembering the path we get from -EAGAIN * and restarting from there. */ try_rotate: ret = __ocfs2_rotate_tree_left(handle, et, orig_credits, path, dealloc, &restart_path); if (ret && ret != -EAGAIN) { mlog_errno(ret); goto out; } while (ret == -EAGAIN) { tmp_path = restart_path; restart_path = NULL; ret = __ocfs2_rotate_tree_left(handle, et, orig_credits, tmp_path, dealloc, &restart_path); if (ret && ret != -EAGAIN) { mlog_errno(ret); goto out; } ocfs2_free_path(tmp_path); tmp_path = NULL; if (ret == 0) goto try_rotate; } out: ocfs2_free_path(tmp_path); ocfs2_free_path(restart_path); return ret; } static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el, int index) { struct ocfs2_extent_rec *rec = &el->l_recs[index]; unsigned int size; if (rec->e_leaf_clusters == 0) { /* * We consumed all of the merged-from record. An empty * extent cannot exist anywhere but the 1st array * position, so move things over if the merged-from * record doesn't occupy that position. * * This creates a new empty extent so the caller * should be smart enough to have removed any existing * ones. */ if (index > 0) { BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0])); size = index * sizeof(struct ocfs2_extent_rec); memmove(&el->l_recs[1], &el->l_recs[0], size); } /* * Always memset - the caller doesn't check whether it * created an empty extent, so there could be junk in * the other fields. */ memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); } } static int ocfs2_get_right_path(struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path **ret_right_path) { int ret; u32 right_cpos; struct ocfs2_path *right_path = NULL; struct ocfs2_extent_list *left_el; *ret_right_path = NULL; /* This function shouldn't be called for non-trees. */ BUG_ON(left_path->p_tree_depth == 0); left_el = path_leaf_el(left_path); BUG_ON(left_el->l_next_free_rec != left_el->l_count); ret = ocfs2_find_cpos_for_right_leaf(ocfs2_metadata_cache_get_super(et->et_ci), left_path, &right_cpos); if (ret) { mlog_errno(ret); goto out; } /* This function shouldn't be called for the rightmost leaf. */ BUG_ON(right_cpos == 0); right_path = ocfs2_new_path_from_path(left_path); if (!right_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, right_path, right_cpos); if (ret) { mlog_errno(ret); goto out; } *ret_right_path = right_path; out: if (ret) ocfs2_free_path(right_path); return ret; } /* * Remove split_rec clusters from the record at index and merge them * onto the beginning of the record "next" to it. * For index < l_count - 1, the next means the extent rec at index + 1. * For index == l_count - 1, the "next" means the 1st extent rec of the * next extent block. */ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path, handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *split_rec, int index) { int ret, next_free, i; unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters); struct ocfs2_extent_rec *left_rec; struct ocfs2_extent_rec *right_rec; struct ocfs2_extent_list *right_el; struct ocfs2_path *right_path = NULL; int subtree_index = 0; struct ocfs2_extent_list *el = path_leaf_el(left_path); struct buffer_head *bh = path_leaf_bh(left_path); struct buffer_head *root_bh = NULL; BUG_ON(index >= le16_to_cpu(el->l_next_free_rec)); left_rec = &el->l_recs[index]; if (index == le16_to_cpu(el->l_next_free_rec) - 1 && le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) { /* we meet with a cross extent block merge. */ ret = ocfs2_get_right_path(et, left_path, &right_path); if (ret) { mlog_errno(ret); goto out; } right_el = path_leaf_el(right_path); next_free = le16_to_cpu(right_el->l_next_free_rec); BUG_ON(next_free <= 0); right_rec = &right_el->l_recs[0]; if (ocfs2_is_empty_extent(right_rec)) { BUG_ON(next_free <= 1); right_rec = &right_el->l_recs[1]; } BUG_ON(le32_to_cpu(left_rec->e_cpos) + le16_to_cpu(left_rec->e_leaf_clusters) != le32_to_cpu(right_rec->e_cpos)); subtree_index = ocfs2_find_subtree_root(et, left_path, right_path); ret = ocfs2_extend_rotate_transaction(handle, subtree_index, handle->h_buffer_credits, right_path); if (ret) { mlog_errno(ret); goto out; } root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); goto out; } for (i = subtree_index + 1; i < path_num_items(right_path); i++) { ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); goto out; } } } else { BUG_ON(index == le16_to_cpu(el->l_next_free_rec) - 1); right_rec = &el->l_recs[index + 1]; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, path_num_items(left_path) - 1); if (ret) { mlog_errno(ret); goto out; } le16_add_cpu(&left_rec->e_leaf_clusters, -split_clusters); le32_add_cpu(&right_rec->e_cpos, -split_clusters); le64_add_cpu(&right_rec->e_blkno, -ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci), split_clusters)); le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters); ocfs2_cleanup_merge(el, index); ocfs2_journal_dirty(handle, bh); if (right_path) { ocfs2_journal_dirty(handle, path_leaf_bh(right_path)); ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); } out: if (right_path) ocfs2_free_path(right_path); return ret; } static int ocfs2_get_left_path(struct ocfs2_extent_tree *et, struct ocfs2_path *right_path, struct ocfs2_path **ret_left_path) { int ret; u32 left_cpos; struct ocfs2_path *left_path = NULL; *ret_left_path = NULL; /* This function shouldn't be called for non-trees. */ BUG_ON(right_path->p_tree_depth == 0); ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci), right_path, &left_cpos); if (ret) { mlog_errno(ret); goto out; } /* This function shouldn't be called for the leftmost leaf. */ BUG_ON(left_cpos == 0); left_path = ocfs2_new_path_from_path(right_path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, left_path, left_cpos); if (ret) { mlog_errno(ret); goto out; } *ret_left_path = left_path; out: if (ret) ocfs2_free_path(left_path); return ret; } /* * Remove split_rec clusters from the record at index and merge them * onto the tail of the record "before" it. * For index > 0, the "before" means the extent rec at index - 1. * * For index == 0, the "before" means the last record of the previous * extent block. And there is also a situation that we may need to * remove the rightmost leaf extent block in the right_path and change * the right path to indicate the new rightmost path. */ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path, handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *split_rec, struct ocfs2_cached_dealloc_ctxt *dealloc, int index) { int ret, i, subtree_index = 0, has_empty_extent = 0; unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters); struct ocfs2_extent_rec *left_rec; struct ocfs2_extent_rec *right_rec; struct ocfs2_extent_list *el = path_leaf_el(right_path); struct buffer_head *bh = path_leaf_bh(right_path); struct buffer_head *root_bh = NULL; struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *left_el; BUG_ON(index < 0); right_rec = &el->l_recs[index]; if (index == 0) { /* we meet with a cross extent block merge. */ ret = ocfs2_get_left_path(et, right_path, &left_path); if (ret) { mlog_errno(ret); goto out; } left_el = path_leaf_el(left_path); BUG_ON(le16_to_cpu(left_el->l_next_free_rec) != le16_to_cpu(left_el->l_count)); left_rec = &left_el->l_recs[ le16_to_cpu(left_el->l_next_free_rec) - 1]; BUG_ON(le32_to_cpu(left_rec->e_cpos) + le16_to_cpu(left_rec->e_leaf_clusters) != le32_to_cpu(split_rec->e_cpos)); subtree_index = ocfs2_find_subtree_root(et, left_path, right_path); ret = ocfs2_extend_rotate_transaction(handle, subtree_index, handle->h_buffer_credits, left_path); if (ret) { mlog_errno(ret); goto out; } root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); goto out; } for (i = subtree_index + 1; i < path_num_items(right_path); i++) { ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); goto out; } } } else { left_rec = &el->l_recs[index - 1]; if (ocfs2_is_empty_extent(&el->l_recs[0])) has_empty_extent = 1; } ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, path_num_items(right_path) - 1); if (ret) { mlog_errno(ret); goto out; } if (has_empty_extent && index == 1) { /* * The easy case - we can just plop the record right in. */ *left_rec = *split_rec; has_empty_extent = 0; } else le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters); le32_add_cpu(&right_rec->e_cpos, split_clusters); le64_add_cpu(&right_rec->e_blkno, ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci), split_clusters)); le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters); ocfs2_cleanup_merge(el, index); ocfs2_journal_dirty(handle, bh); if (left_path) { ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); /* * In the situation that the right_rec is empty and the extent * block is empty also, ocfs2_complete_edge_insert can't handle * it and we need to delete the right extent block. */ if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 && le16_to_cpu(el->l_next_free_rec) == 1) { ret = ocfs2_remove_rightmost_path(handle, et, right_path, dealloc); if (ret) { mlog_errno(ret); goto out; } /* Now the rightmost extent block has been deleted. * So we use the new rightmost path. */ ocfs2_mv_path(right_path, left_path); left_path = NULL; } else ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); } out: if (left_path) ocfs2_free_path(left_path); return ret; } static int ocfs2_try_to_merge_extent(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int split_index, struct ocfs2_extent_rec *split_rec, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_merge_ctxt *ctxt) { int ret = 0; struct ocfs2_extent_list *el = path_leaf_el(path); struct ocfs2_extent_rec *rec = &el->l_recs[split_index]; BUG_ON(ctxt->c_contig_type == CONTIG_NONE); if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) { /* * The merge code will need to create an empty * extent to take the place of the newly * emptied slot. Remove any pre-existing empty * extents - having more than one in a leaf is * illegal. */ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; } split_index--; rec = &el->l_recs[split_index]; } if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) { /* * Left-right contig implies this. */ BUG_ON(!ctxt->c_split_covers_rec); /* * Since the leftright insert always covers the entire * extent, this call will delete the insert record * entirely, resulting in an empty extent record added to * the extent block. * * Since the adding of an empty extent shifts * everything back to the right, there's no need to * update split_index here. * * When the split_index is zero, we need to merge it to the * prevoius extent block. It is more efficient and easier * if we do merge_right first and merge_left later. */ ret = ocfs2_merge_rec_right(path, handle, et, split_rec, split_index); if (ret) { mlog_errno(ret); goto out; } /* * We can only get this from logic error above. */ BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0])); /* The merge left us with an empty extent, remove it. */ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; } rec = &el->l_recs[split_index]; /* * Note that we don't pass split_rec here on purpose - * we've merged it into the rec already. */ ret = ocfs2_merge_rec_left(path, handle, et, rec, dealloc, split_index); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); /* * Error from this last rotate is not critical, so * print but don't bubble it up. */ if (ret) mlog_errno(ret); ret = 0; } else { /* * Merge a record to the left or right. * * 'contig_type' is relative to the existing record, * so for example, if we're "right contig", it's to * the record on the left (hence the left merge). */ if (ctxt->c_contig_type == CONTIG_RIGHT) { ret = ocfs2_merge_rec_left(path, handle, et, split_rec, dealloc, split_index); if (ret) { mlog_errno(ret); goto out; } } else { ret = ocfs2_merge_rec_right(path, handle, et, split_rec, split_index); if (ret) { mlog_errno(ret); goto out; } } if (ctxt->c_split_covers_rec) { /* * The merge may have left an empty extent in * our leaf. Try to rotate it away. */ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) mlog_errno(ret); ret = 0; } } out: return ret; } static void ocfs2_subtract_from_rec(struct super_block *sb, enum ocfs2_split_type split, struct ocfs2_extent_rec *rec, struct ocfs2_extent_rec *split_rec) { u64 len_blocks; len_blocks = ocfs2_clusters_to_blocks(sb, le16_to_cpu(split_rec->e_leaf_clusters)); if (split == SPLIT_LEFT) { /* * Region is on the left edge of the existing * record. */ le32_add_cpu(&rec->e_cpos, le16_to_cpu(split_rec->e_leaf_clusters)); le64_add_cpu(&rec->e_blkno, len_blocks); le16_add_cpu(&rec->e_leaf_clusters, -le16_to_cpu(split_rec->e_leaf_clusters)); } else { /* * Region is on the right edge of the existing * record. */ le16_add_cpu(&rec->e_leaf_clusters, -le16_to_cpu(split_rec->e_leaf_clusters)); } } /* * Do the final bits of extent record insertion at the target leaf * list. If this leaf is part of an allocation tree, it is assumed * that the tree above has been prepared. */ static void ocfs2_insert_at_leaf(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *insert_rec, struct ocfs2_extent_list *el, struct ocfs2_insert_type *insert) { int i = insert->ins_contig_index; unsigned int range; struct ocfs2_extent_rec *rec; BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); if (insert->ins_split != SPLIT_NONE) { i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos)); BUG_ON(i == -1); rec = &el->l_recs[i]; ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci), insert->ins_split, rec, insert_rec); goto rotate; } /* * Contiguous insert - either left or right. */ if (insert->ins_contig != CONTIG_NONE) { rec = &el->l_recs[i]; if (insert->ins_contig == CONTIG_LEFT) { rec->e_blkno = insert_rec->e_blkno; rec->e_cpos = insert_rec->e_cpos; } le16_add_cpu(&rec->e_leaf_clusters, le16_to_cpu(insert_rec->e_leaf_clusters)); return; } /* * Handle insert into an empty leaf. */ if (le16_to_cpu(el->l_next_free_rec) == 0 || ((le16_to_cpu(el->l_next_free_rec) == 1) && ocfs2_is_empty_extent(&el->l_recs[0]))) { el->l_recs[0] = *insert_rec; el->l_next_free_rec = cpu_to_le16(1); return; } /* * Appending insert. */ if (insert->ins_appending == APPEND_TAIL) { i = le16_to_cpu(el->l_next_free_rec) - 1; rec = &el->l_recs[i]; range = le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters); BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range); mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >= le16_to_cpu(el->l_count), "owner %llu, depth %u, count %u, next free %u, " "rec.cpos %u, rec.clusters %u, " "insert.cpos %u, insert.clusters %u\n", ocfs2_metadata_cache_owner(et->et_ci), le16_to_cpu(el->l_tree_depth), le16_to_cpu(el->l_count), le16_to_cpu(el->l_next_free_rec), le32_to_cpu(el->l_recs[i].e_cpos), le16_to_cpu(el->l_recs[i].e_leaf_clusters), le32_to_cpu(insert_rec->e_cpos), le16_to_cpu(insert_rec->e_leaf_clusters)); i++; el->l_recs[i] = *insert_rec; le16_add_cpu(&el->l_next_free_rec, 1); return; } rotate: /* * Ok, we have to rotate. * * At this point, it is safe to assume that inserting into an * empty leaf and appending to a leaf have both been handled * above. * * This leaf needs to have space, either by the empty 1st * extent record, or by virtue of an l_next_rec < l_count. */ ocfs2_rotate_leaf(el, insert_rec); } static void ocfs2_adjust_rightmost_records(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_rec *insert_rec) { int ret, i, next_free; struct buffer_head *bh; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; /* * Update everything except the leaf block. */ for (i = 0; i < path->p_tree_depth; i++) { bh = path->p_node[i].bh; el = path->p_node[i].el; next_free = le16_to_cpu(el->l_next_free_rec); if (next_free == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu has a bad extent list", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); ret = -EIO; return; } rec = &el->l_recs[next_free - 1]; rec->e_int_clusters = insert_rec->e_cpos; le32_add_cpu(&rec->e_int_clusters, le16_to_cpu(insert_rec->e_leaf_clusters)); le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos)); ocfs2_journal_dirty(handle, bh); } } static int ocfs2_append_rec_to_path(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *insert_rec, struct ocfs2_path *right_path, struct ocfs2_path **ret_left_path) { int ret, next_free; struct ocfs2_extent_list *el; struct ocfs2_path *left_path = NULL; *ret_left_path = NULL; /* * This shouldn't happen for non-trees. The extent rec cluster * count manipulation below only works for interior nodes. */ BUG_ON(right_path->p_tree_depth == 0); /* * If our appending insert is at the leftmost edge of a leaf, * then we might need to update the rightmost records of the * neighboring path. */ el = path_leaf_el(right_path); next_free = le16_to_cpu(el->l_next_free_rec); if (next_free == 0 || (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) { u32 left_cpos; ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci), right_path, &left_cpos); if (ret) { mlog_errno(ret); goto out; } trace_ocfs2_append_rec_to_path( (unsigned long long) ocfs2_metadata_cache_owner(et->et_ci), le32_to_cpu(insert_rec->e_cpos), left_cpos); /* * No need to worry if the append is already in the * leftmost leaf. */ if (left_cpos) { left_path = ocfs2_new_path_from_path(right_path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, left_path, left_cpos); if (ret) { mlog_errno(ret); goto out; } /* * ocfs2_insert_path() will pass the left_path to the * journal for us. */ } } ret = ocfs2_journal_access_path(et->et_ci, handle, right_path); if (ret) { mlog_errno(ret); goto out; } ocfs2_adjust_rightmost_records(handle, et, right_path, insert_rec); *ret_left_path = left_path; ret = 0; out: if (ret != 0) ocfs2_free_path(left_path); return ret; } static void ocfs2_split_record(struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, struct ocfs2_extent_rec *split_rec, enum ocfs2_split_type split) { int index; u32 cpos = le32_to_cpu(split_rec->e_cpos); struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el; struct ocfs2_extent_rec *rec, *tmprec; right_el = path_leaf_el(right_path); if (left_path) left_el = path_leaf_el(left_path); el = right_el; insert_el = right_el; index = ocfs2_search_extent_list(el, cpos); if (index != -1) { if (index == 0 && left_path) { BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0])); /* * This typically means that the record * started in the left path but moved to the * right as a result of rotation. We either * move the existing record to the left, or we * do the later insert there. * * In this case, the left path should always * exist as the rotate code will have passed * it back for a post-insert update. */ if (split == SPLIT_LEFT) { /* * It's a left split. Since we know * that the rotate code gave us an * empty extent in the left path, we * can just do the insert there. */ insert_el = left_el; } else { /* * Right split - we have to move the * existing record over to the left * leaf. The insert will be into the * newly created empty extent in the * right leaf. */ tmprec = &right_el->l_recs[index]; ocfs2_rotate_leaf(left_el, tmprec); el = left_el; memset(tmprec, 0, sizeof(*tmprec)); index = ocfs2_search_extent_list(left_el, cpos); BUG_ON(index == -1); } } } else { BUG_ON(!left_path); BUG_ON(!ocfs2_is_empty_extent(&left_el->l_recs[0])); /* * Left path is easy - we can just allow the insert to * happen. */ el = left_el; insert_el = left_el; index = ocfs2_search_extent_list(el, cpos); BUG_ON(index == -1); } rec = &el->l_recs[index]; ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci), split, rec, split_rec); ocfs2_rotate_leaf(insert_el, split_rec); } /* * This function only does inserts on an allocation b-tree. For tree * depth = 0, ocfs2_insert_at_leaf() is called directly. * * right_path is the path we want to do the actual insert * in. left_path should only be passed in if we need to update that * portion of the tree after an edge insert. */ static int ocfs2_insert_path(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, struct ocfs2_extent_rec *insert_rec, struct ocfs2_insert_type *insert) { int ret, subtree_index; struct buffer_head *leaf_bh = path_leaf_bh(right_path); if (left_path) { /* * There's a chance that left_path got passed back to * us without being accounted for in the * journal. Extend our transaction here to be sure we * can change those blocks. */ ret = ocfs2_extend_trans(handle, left_path->p_tree_depth); if (ret < 0) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, left_path); if (ret < 0) { mlog_errno(ret); goto out; } } /* * Pass both paths to the journal. The majority of inserts * will be touching all components anyway. */ ret = ocfs2_journal_access_path(et->et_ci, handle, right_path); if (ret < 0) { mlog_errno(ret); goto out; } if (insert->ins_split != SPLIT_NONE) { /* * We could call ocfs2_insert_at_leaf() for some types * of splits, but it's easier to just let one separate * function sort it all out. */ ocfs2_split_record(et, left_path, right_path, insert_rec, insert->ins_split); /* * Split might have modified either leaf and we don't * have a guarantee that the later edge insert will * dirty this for us. */ if (left_path) ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); } else ocfs2_insert_at_leaf(et, insert_rec, path_leaf_el(right_path), insert); ocfs2_journal_dirty(handle, leaf_bh); if (left_path) { /* * The rotate code has indicated that we need to fix * up portions of the tree after the insert. * * XXX: Should we extend the transaction here? */ subtree_index = ocfs2_find_subtree_root(et, left_path, right_path); ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); } ret = 0; out: return ret; } static int ocfs2_do_insert_extent(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *insert_rec, struct ocfs2_insert_type *type) { int ret, rotate = 0; u32 cpos; struct ocfs2_path *right_path = NULL; struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *el; el = et->et_root_el; ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } if (le16_to_cpu(el->l_tree_depth) == 0) { ocfs2_insert_at_leaf(et, insert_rec, el, type); goto out_update_clusters; } right_path = ocfs2_new_path_from_et(et); if (!right_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } /* * Determine the path to start with. Rotations need the * rightmost path, everything else can go directly to the * target leaf. */ cpos = le32_to_cpu(insert_rec->e_cpos); if (type->ins_appending == APPEND_NONE && type->ins_contig == CONTIG_NONE) { rotate = 1; cpos = UINT_MAX; } ret = ocfs2_find_path(et->et_ci, right_path, cpos); if (ret) { mlog_errno(ret); goto out; } /* * Rotations and appends need special treatment - they modify * parts of the tree's above them. * * Both might pass back a path immediate to the left of the * one being inserted to. This will be cause * ocfs2_insert_path() to modify the rightmost records of * left_path to account for an edge insert. * * XXX: When modifying this code, keep in mind that an insert * can wind up skipping both of these two special cases... */ if (rotate) { ret = ocfs2_rotate_tree_right(handle, et, type->ins_split, le32_to_cpu(insert_rec->e_cpos), right_path, &left_path); if (ret) { mlog_errno(ret); goto out; } /* * ocfs2_rotate_tree_right() might have extended the * transaction without re-journaling our tree root. */ ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } } else if (type->ins_appending == APPEND_TAIL && type->ins_contig != CONTIG_LEFT) { ret = ocfs2_append_rec_to_path(handle, et, insert_rec, right_path, &left_path); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_insert_path(handle, et, left_path, right_path, insert_rec, type); if (ret) { mlog_errno(ret); goto out; } out_update_clusters: if (type->ins_split == SPLIT_NONE) ocfs2_et_update_clusters(et, le16_to_cpu(insert_rec->e_leaf_clusters)); ocfs2_journal_dirty(handle, et->et_root_bh); out: ocfs2_free_path(left_path); ocfs2_free_path(right_path); return ret; } static enum ocfs2_contig_type ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_list *el, int index, struct ocfs2_extent_rec *split_rec) { int status; enum ocfs2_contig_type ret = CONTIG_NONE; u32 left_cpos, right_cpos; struct ocfs2_extent_rec *rec = NULL; struct ocfs2_extent_list *new_el; struct ocfs2_path *left_path = NULL, *right_path = NULL; struct buffer_head *bh; struct ocfs2_extent_block *eb; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); if (index > 0) { rec = &el->l_recs[index - 1]; } else if (path->p_tree_depth > 0) { status = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos); if (status) goto out; if (left_cpos != 0) { left_path = ocfs2_new_path_from_path(path); if (!left_path) goto out; status = ocfs2_find_path(et->et_ci, left_path, left_cpos); if (status) goto out; new_el = path_leaf_el(left_path); if (le16_to_cpu(new_el->l_next_free_rec) != le16_to_cpu(new_el->l_count)) { bh = path_leaf_bh(left_path); eb = (struct ocfs2_extent_block *)bh->b_data; ocfs2_error(sb, "Extent block #%llu has an " "invalid l_next_free_rec of " "%d. It should have " "matched the l_count of %d", (unsigned long long)le64_to_cpu(eb->h_blkno), le16_to_cpu(new_el->l_next_free_rec), le16_to_cpu(new_el->l_count)); status = -EINVAL; goto out; } rec = &new_el->l_recs[ le16_to_cpu(new_el->l_next_free_rec) - 1]; } } /* * We're careful to check for an empty extent record here - * the merge code will know what to do if it sees one. */ if (rec) { if (index == 1 && ocfs2_is_empty_extent(rec)) { if (split_rec->e_cpos == el->l_recs[index].e_cpos) ret = CONTIG_RIGHT; } else { ret = ocfs2_et_extent_contig(et, rec, split_rec); } } rec = NULL; if (index < (le16_to_cpu(el->l_next_free_rec) - 1)) rec = &el->l_recs[index + 1]; else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) && path->p_tree_depth > 0) { status = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos); if (status) goto out; if (right_cpos == 0) goto out; right_path = ocfs2_new_path_from_path(path); if (!right_path) goto out; status = ocfs2_find_path(et->et_ci, right_path, right_cpos); if (status) goto out; new_el = path_leaf_el(right_path); rec = &new_el->l_recs[0]; if (ocfs2_is_empty_extent(rec)) { if (le16_to_cpu(new_el->l_next_free_rec) <= 1) { bh = path_leaf_bh(right_path); eb = (struct ocfs2_extent_block *)bh->b_data; ocfs2_error(sb, "Extent block #%llu has an " "invalid l_next_free_rec of %d", (unsigned long long)le64_to_cpu(eb->h_blkno), le16_to_cpu(new_el->l_next_free_rec)); status = -EINVAL; goto out; } rec = &new_el->l_recs[1]; } } if (rec) { enum ocfs2_contig_type contig_type; contig_type = ocfs2_et_extent_contig(et, rec, split_rec); if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT) ret = CONTIG_LEFTRIGHT; else if (ret == CONTIG_NONE) ret = contig_type; } out: if (left_path) ocfs2_free_path(left_path); if (right_path) ocfs2_free_path(right_path); return ret; } static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et, struct ocfs2_insert_type *insert, struct ocfs2_extent_list *el, struct ocfs2_extent_rec *insert_rec) { int i; enum ocfs2_contig_type contig_type = CONTIG_NONE; BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { contig_type = ocfs2_et_extent_contig(et, &el->l_recs[i], insert_rec); if (contig_type != CONTIG_NONE) { insert->ins_contig_index = i; break; } } insert->ins_contig = contig_type; if (insert->ins_contig != CONTIG_NONE) { struct ocfs2_extent_rec *rec = &el->l_recs[insert->ins_contig_index]; unsigned int len = le16_to_cpu(rec->e_leaf_clusters) + le16_to_cpu(insert_rec->e_leaf_clusters); /* * Caller might want us to limit the size of extents, don't * calculate contiguousness if we might exceed that limit. */ if (et->et_max_leaf_clusters && (len > et->et_max_leaf_clusters)) insert->ins_contig = CONTIG_NONE; } } /* * This should only be called against the righmost leaf extent list. * * ocfs2_figure_appending_type() will figure out whether we'll have to * insert at the tail of the rightmost leaf. * * This should also work against the root extent list for tree's with 0 * depth. If we consider the root extent list to be the rightmost leaf node * then the logic here makes sense. */ static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert, struct ocfs2_extent_list *el, struct ocfs2_extent_rec *insert_rec) { int i; u32 cpos = le32_to_cpu(insert_rec->e_cpos); struct ocfs2_extent_rec *rec; insert->ins_appending = APPEND_NONE; BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); if (!el->l_next_free_rec) goto set_tail_append; if (ocfs2_is_empty_extent(&el->l_recs[0])) { /* Were all records empty? */ if (le16_to_cpu(el->l_next_free_rec) == 1) goto set_tail_append; } i = le16_to_cpu(el->l_next_free_rec) - 1; rec = &el->l_recs[i]; if (cpos >= (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters))) goto set_tail_append; return; set_tail_append: insert->ins_appending = APPEND_TAIL; } /* * Helper function called at the beginning of an insert. * * This computes a few things that are commonly used in the process of * inserting into the btree: * - Whether the new extent is contiguous with an existing one. * - The current tree depth. * - Whether the insert is an appending one. * - The total # of free records in the tree. * * All of the information is stored on the ocfs2_insert_type * structure. */ static int ocfs2_figure_insert_type(struct ocfs2_extent_tree *et, struct buffer_head **last_eb_bh, struct ocfs2_extent_rec *insert_rec, int *free_records, struct ocfs2_insert_type *insert) { int ret; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct ocfs2_path *path = NULL; struct buffer_head *bh = NULL; insert->ins_split = SPLIT_NONE; el = et->et_root_el; insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth); if (el->l_tree_depth) { /* * If we have tree depth, we read in the * rightmost extent block ahead of time as * ocfs2_figure_insert_type() and ocfs2_add_branch() * may want it later. */ ret = ocfs2_read_extent_block(et->et_ci, ocfs2_et_get_last_eb_blk(et), &bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) bh->b_data; el = &eb->h_list; } /* * Unless we have a contiguous insert, we'll need to know if * there is room left in our allocation tree for another * extent record. * * XXX: This test is simplistic, we can search for empty * extent records too. */ *free_records = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec); if (!insert->ins_tree_depth) { ocfs2_figure_contig_type(et, insert, el, insert_rec); ocfs2_figure_appending_type(insert, el, insert_rec); return 0; } path = ocfs2_new_path_from_et(et); if (!path) { ret = -ENOMEM; mlog_errno(ret); goto out; } /* * In the case that we're inserting past what the tree * currently accounts for, ocfs2_find_path() will return for * us the rightmost tree path. This is accounted for below in * the appending code. */ ret = ocfs2_find_path(et->et_ci, path, le32_to_cpu(insert_rec->e_cpos)); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); /* * Now that we have the path, there's two things we want to determine: * 1) Contiguousness (also set contig_index if this is so) * * 2) Are we doing an append? We can trivially break this up * into two types of appends: simple record append, or a * rotate inside the tail leaf. */ ocfs2_figure_contig_type(et, insert, el, insert_rec); /* * The insert code isn't quite ready to deal with all cases of * left contiguousness. Specifically, if it's an insert into * the 1st record in a leaf, it will require the adjustment of * cluster count on the last record of the path directly to it's * left. For now, just catch that case and fool the layers * above us. This works just fine for tree_depth == 0, which * is why we allow that above. */ if (insert->ins_contig == CONTIG_LEFT && insert->ins_contig_index == 0) insert->ins_contig = CONTIG_NONE; /* * Ok, so we can simply compare against last_eb to figure out * whether the path doesn't exist. This will only happen in * the case that we're doing a tail append, so maybe we can * take advantage of that information somehow. */ if (ocfs2_et_get_last_eb_blk(et) == path_leaf_bh(path)->b_blocknr) { /* * Ok, ocfs2_find_path() returned us the rightmost * tree path. This might be an appending insert. There are * two cases: * 1) We're doing a true append at the tail: * -This might even be off the end of the leaf * 2) We're "appending" by rotating in the tail */ ocfs2_figure_appending_type(insert, el, insert_rec); } out: ocfs2_free_path(path); if (ret == 0) *last_eb_bh = bh; else brelse(bh); return ret; } /* * Insert an extent into a btree. * * The caller needs to update the owning btree's cluster count. */ int ocfs2_insert_extent(handle_t *handle, struct ocfs2_extent_tree *et, u32 cpos, u64 start_blk, u32 new_clusters, u8 flags, struct ocfs2_alloc_context *meta_ac) { int status; int uninitialized_var(free_records); struct buffer_head *last_eb_bh = NULL; struct ocfs2_insert_type insert = {0, }; struct ocfs2_extent_rec rec; trace_ocfs2_insert_extent_start( (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos, new_clusters); memset(&rec, 0, sizeof(rec)); rec.e_cpos = cpu_to_le32(cpos); rec.e_blkno = cpu_to_le64(start_blk); rec.e_leaf_clusters = cpu_to_le16(new_clusters); rec.e_flags = flags; status = ocfs2_et_insert_check(et, &rec); if (status) { mlog_errno(status); goto bail; } status = ocfs2_figure_insert_type(et, &last_eb_bh, &rec, &free_records, &insert); if (status < 0) { mlog_errno(status); goto bail; } trace_ocfs2_insert_extent(insert.ins_appending, insert.ins_contig, insert.ins_contig_index, free_records, insert.ins_tree_depth); if (insert.ins_contig == CONTIG_NONE && free_records == 0) { status = ocfs2_grow_tree(handle, et, &insert.ins_tree_depth, &last_eb_bh, meta_ac); if (status) { mlog_errno(status); goto bail; } } /* Finally, we can add clusters. This might rotate the tree for us. */ status = ocfs2_do_insert_extent(handle, et, &rec, &insert); if (status < 0) mlog_errno(status); else ocfs2_et_extent_map_insert(et, &rec); bail: brelse(last_eb_bh); return status; } /* * Allcate and add clusters into the extent b-tree. * The new clusters(clusters_to_add) will be inserted at logical_offset. * The extent b-tree's root is specified by et, and * it is not limited to the file storage. Any extent tree can use this * function if it implements the proper ocfs2_extent_tree. */ int ocfs2_add_clusters_in_btree(handle_t *handle, struct ocfs2_extent_tree *et, u32 *logical_offset, u32 clusters_to_add, int mark_unwritten, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, enum ocfs2_alloc_restarted *reason_ret) { int status = 0, err = 0; int free_extents; enum ocfs2_alloc_restarted reason = RESTART_NONE; u32 bit_off, num_bits; u64 block; u8 flags = 0; struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci)); BUG_ON(!clusters_to_add); if (mark_unwritten) flags = OCFS2_EXT_UNWRITTEN; free_extents = ocfs2_num_free_extents(osb, et); if (free_extents < 0) { status = free_extents; mlog_errno(status); goto leave; } /* there are two cases which could cause us to EAGAIN in the * we-need-more-metadata case: * 1) we haven't reserved *any* * 2) we are so fragmented, we've needed to add metadata too * many times. */ if (!free_extents && !meta_ac) { err = -1; status = -EAGAIN; reason = RESTART_META; goto leave; } else if ((!free_extents) && (ocfs2_alloc_context_bits_left(meta_ac) < ocfs2_extend_meta_needed(et->et_root_el))) { err = -2; status = -EAGAIN; reason = RESTART_META; goto leave; } status = __ocfs2_claim_clusters(handle, data_ac, 1, clusters_to_add, &bit_off, &num_bits); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); goto leave; } BUG_ON(num_bits > clusters_to_add); /* reserve our write early -- insert_extent may update the tree root */ status = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; } block = ocfs2_clusters_to_blocks(osb->sb, bit_off); trace_ocfs2_add_clusters_in_btree( (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), bit_off, num_bits); status = ocfs2_insert_extent(handle, et, *logical_offset, block, num_bits, flags, meta_ac); if (status < 0) { mlog_errno(status); goto leave; } ocfs2_journal_dirty(handle, et->et_root_bh); clusters_to_add -= num_bits; *logical_offset += num_bits; if (clusters_to_add) { err = clusters_to_add; status = -EAGAIN; reason = RESTART_TRANS; } leave: if (reason_ret) *reason_ret = reason; trace_ocfs2_add_clusters_in_btree_ret(status, reason, err); return status; } static void ocfs2_make_right_split_rec(struct super_block *sb, struct ocfs2_extent_rec *split_rec, u32 cpos, struct ocfs2_extent_rec *rec) { u32 rec_cpos = le32_to_cpu(rec->e_cpos); u32 rec_range = rec_cpos + le16_to_cpu(rec->e_leaf_clusters); memset(split_rec, 0, sizeof(struct ocfs2_extent_rec)); split_rec->e_cpos = cpu_to_le32(cpos); split_rec->e_leaf_clusters = cpu_to_le16(rec_range - cpos); split_rec->e_blkno = rec->e_blkno; le64_add_cpu(&split_rec->e_blkno, ocfs2_clusters_to_blocks(sb, cpos - rec_cpos)); split_rec->e_flags = rec->e_flags; } static int ocfs2_split_and_insert(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct buffer_head **last_eb_bh, int split_index, struct ocfs2_extent_rec *orig_split_rec, struct ocfs2_alloc_context *meta_ac) { int ret = 0, depth; unsigned int insert_range, rec_range, do_leftright = 0; struct ocfs2_extent_rec tmprec; struct ocfs2_extent_list *rightmost_el; struct ocfs2_extent_rec rec; struct ocfs2_extent_rec split_rec = *orig_split_rec; struct ocfs2_insert_type insert; struct ocfs2_extent_block *eb; leftright: /* * Store a copy of the record on the stack - it might move * around as the tree is manipulated below. */ rec = path_leaf_el(path)->l_recs[split_index]; rightmost_el = et->et_root_el; depth = le16_to_cpu(rightmost_el->l_tree_depth); if (depth) { BUG_ON(!(*last_eb_bh)); eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; rightmost_el = &eb->h_list; } if (le16_to_cpu(rightmost_el->l_next_free_rec) == le16_to_cpu(rightmost_el->l_count)) { ret = ocfs2_grow_tree(handle, et, &depth, last_eb_bh, meta_ac); if (ret) { mlog_errno(ret); goto out; } } memset(&insert, 0, sizeof(struct ocfs2_insert_type)); insert.ins_appending = APPEND_NONE; insert.ins_contig = CONTIG_NONE; insert.ins_tree_depth = depth; insert_range = le32_to_cpu(split_rec.e_cpos) + le16_to_cpu(split_rec.e_leaf_clusters); rec_range = le32_to_cpu(rec.e_cpos) + le16_to_cpu(rec.e_leaf_clusters); if (split_rec.e_cpos == rec.e_cpos) { insert.ins_split = SPLIT_LEFT; } else if (insert_range == rec_range) { insert.ins_split = SPLIT_RIGHT; } else { /* * Left/right split. We fake this as a right split * first and then make a second pass as a left split. */ insert.ins_split = SPLIT_RIGHT; ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci), &tmprec, insert_range, &rec); split_rec = tmprec; BUG_ON(do_leftright); do_leftright = 1; } ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert); if (ret) { mlog_errno(ret); goto out; } if (do_leftright == 1) { u32 cpos; struct ocfs2_extent_list *el; do_leftright++; split_rec = *orig_split_rec; ocfs2_reinit_path(path, 1); cpos = le32_to_cpu(split_rec.e_cpos); ret = ocfs2_find_path(et->et_ci, path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); split_index = ocfs2_search_extent_list(el, cpos); goto leftright; } out: return ret; } static int ocfs2_replace_extent_rec(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_list *el, int split_index, struct ocfs2_extent_rec *split_rec) { int ret; ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path, path_num_items(path) - 1); if (ret) { mlog_errno(ret); goto out; } el->l_recs[split_index] = *split_rec; ocfs2_journal_dirty(handle, path_leaf_bh(path)); out: return ret; } /* * Split part or all of the extent record at split_index in the leaf * pointed to by path. Merge with the contiguous extent record if needed. * * Care is taken to handle contiguousness so as to not grow the tree. * * meta_ac is not strictly necessary - we only truly need it if growth * of the tree is required. All other cases will degrade into a less * optimal tree layout. * * last_eb_bh should be the rightmost leaf block for any extent * btree. Since a split may grow the tree or a merge might shrink it, * the caller cannot trust the contents of that buffer after this call. * * This code is optimized for readability - several passes might be * made over certain portions of the tree. All of those blocks will * have been brought into cache (and pinned via the journal), so the * extra overhead is not expressed in terms of disk reads. */ int ocfs2_split_extent(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int split_index, struct ocfs2_extent_rec *split_rec, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret = 0; struct ocfs2_extent_list *el = path_leaf_el(path); struct buffer_head *last_eb_bh = NULL; struct ocfs2_extent_rec *rec = &el->l_recs[split_index]; struct ocfs2_merge_ctxt ctxt; struct ocfs2_extent_list *rightmost_el; if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) || ((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) < (le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) { ret = -EIO; mlog_errno(ret); goto out; } ctxt.c_contig_type = ocfs2_figure_merge_contig_type(et, path, el, split_index, split_rec); /* * The core merge / split code wants to know how much room is * left in this allocation tree, so we pass the * rightmost extent list. */ if (path->p_tree_depth) { struct ocfs2_extent_block *eb; ret = ocfs2_read_extent_block(et->et_ci, ocfs2_et_get_last_eb_blk(et), &last_eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; rightmost_el = &eb->h_list; } else rightmost_el = path_root_el(path); if (rec->e_cpos == split_rec->e_cpos && rec->e_leaf_clusters == split_rec->e_leaf_clusters) ctxt.c_split_covers_rec = 1; else ctxt.c_split_covers_rec = 0; ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); trace_ocfs2_split_extent(split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent, ctxt.c_split_covers_rec); if (ctxt.c_contig_type == CONTIG_NONE) { if (ctxt.c_split_covers_rec) ret = ocfs2_replace_extent_rec(handle, et, path, el, split_index, split_rec); else ret = ocfs2_split_and_insert(handle, et, path, &last_eb_bh, split_index, split_rec, meta_ac); if (ret) mlog_errno(ret); } else { ret = ocfs2_try_to_merge_extent(handle, et, path, split_index, split_rec, dealloc, &ctxt); if (ret) mlog_errno(ret); } out: brelse(last_eb_bh); return ret; } /* * Change the flags of the already-existing extent at cpos for len clusters. * * new_flags: the flags we want to set. * clear_flags: the flags we want to clear. * phys: the new physical offset we want this new extent starts from. * * If the existing extent is larger than the request, initiate a * split. An attempt will be made at merging with adjacent extents. * * The caller is responsible for passing down meta_ac if we'll need it. */ int ocfs2_change_extent_flag(handle_t *handle, struct ocfs2_extent_tree *et, u32 cpos, u32 len, u32 phys, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc, int new_flags, int clear_flags) { int ret, index; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); u64 start_blkno = ocfs2_clusters_to_blocks(sb, phys); struct ocfs2_extent_rec split_rec; struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; left_path = ocfs2_new_path_from_et(et); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, left_path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(left_path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { ocfs2_error(sb, "Owner %llu has an extent at cpos %u which can no " "longer be found.\n", (unsigned long long) ocfs2_metadata_cache_owner(et->et_ci), cpos); ret = -EROFS; goto out; } ret = -EIO; rec = &el->l_recs[index]; if (new_flags && (rec->e_flags & new_flags)) { mlog(ML_ERROR, "Owner %llu tried to set %d flags on an " "extent that already had them", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), new_flags); goto out; } if (clear_flags && !(rec->e_flags & clear_flags)) { mlog(ML_ERROR, "Owner %llu tried to clear %d flags on an " "extent that didn't have them", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), clear_flags); goto out; } memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec)); split_rec.e_cpos = cpu_to_le32(cpos); split_rec.e_leaf_clusters = cpu_to_le16(len); split_rec.e_blkno = cpu_to_le64(start_blkno); split_rec.e_flags = rec->e_flags; if (new_flags) split_rec.e_flags |= new_flags; if (clear_flags) split_rec.e_flags &= ~clear_flags; ret = ocfs2_split_extent(handle, et, left_path, index, &split_rec, meta_ac, dealloc); if (ret) mlog_errno(ret); out: ocfs2_free_path(left_path); return ret; } /* * Mark the already-existing extent at cpos as written for len clusters. * This removes the unwritten extent flag. * * If the existing extent is larger than the request, initiate a * split. An attempt will be made at merging with adjacent extents. * * The caller is responsible for passing down meta_ac if we'll need it. */ int ocfs2_mark_extent_written(struct inode *inode, struct ocfs2_extent_tree *et, handle_t *handle, u32 cpos, u32 len, u32 phys, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret; trace_ocfs2_mark_extent_written( (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos, len, phys); if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " "that are being written to, but the feature bit " "is not set in the super block.", (unsigned long long)OCFS2_I(inode)->ip_blkno); ret = -EROFS; goto out; } /* * XXX: This should be fixed up so that we just re-insert the * next extent records. */ ocfs2_et_extent_map_truncate(et, 0); ret = ocfs2_change_extent_flag(handle, et, cpos, len, phys, meta_ac, dealloc, 0, OCFS2_EXT_UNWRITTEN); if (ret) mlog_errno(ret); out: return ret; } static int ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int index, u32 new_range, struct ocfs2_alloc_context *meta_ac) { int ret, depth, credits; struct buffer_head *last_eb_bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_list *rightmost_el, *el; struct ocfs2_extent_rec split_rec; struct ocfs2_extent_rec *rec; struct ocfs2_insert_type insert; /* * Setup the record to split before we grow the tree. */ el = path_leaf_el(path); rec = &el->l_recs[index]; ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci), &split_rec, new_range, rec); depth = path->p_tree_depth; if (depth > 0) { ret = ocfs2_read_extent_block(et->et_ci, ocfs2_et_get_last_eb_blk(et), &last_eb_bh); if (ret < 0) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; rightmost_el = &eb->h_list; } else rightmost_el = path_leaf_el(path); credits = path->p_tree_depth + ocfs2_extend_meta_needed(et->et_root_el); ret = ocfs2_extend_trans(handle, credits); if (ret) { mlog_errno(ret); goto out; } if (le16_to_cpu(rightmost_el->l_next_free_rec) == le16_to_cpu(rightmost_el->l_count)) { ret = ocfs2_grow_tree(handle, et, &depth, &last_eb_bh, meta_ac); if (ret) { mlog_errno(ret); goto out; } } memset(&insert, 0, sizeof(struct ocfs2_insert_type)); insert.ins_appending = APPEND_NONE; insert.ins_contig = CONTIG_NONE; insert.ins_split = SPLIT_RIGHT; insert.ins_tree_depth = depth; ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert); if (ret) mlog_errno(ret); out: brelse(last_eb_bh); return ret; } static int ocfs2_truncate_rec(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int index, struct ocfs2_cached_dealloc_ctxt *dealloc, u32 cpos, u32 len) { int ret; u32 left_cpos, rec_range, trunc_range; int wants_rotate = 0, is_rightmost_tree_rec = 0; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *el = path_leaf_el(path); struct ocfs2_extent_rec *rec; struct ocfs2_extent_block *eb; if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) { ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; } index--; } if (index == (le16_to_cpu(el->l_next_free_rec) - 1) && path->p_tree_depth) { /* * Check whether this is the rightmost tree record. If * we remove all of this record or part of its right * edge then an update of the record lengths above it * will be required. */ eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; if (eb->h_next_leaf_blk == 0) is_rightmost_tree_rec = 1; } rec = &el->l_recs[index]; if (index == 0 && path->p_tree_depth && le32_to_cpu(rec->e_cpos) == cpos) { /* * Changing the leftmost offset (via partial or whole * record truncate) of an interior (or rightmost) path * means we have to update the subtree that is formed * by this leaf and the one to it's left. * * There are two cases we can skip: * 1) Path is the leftmost one in our btree. * 2) The leaf is rightmost and will be empty after * we remove the extent record - the rotate code * knows how to update the newly formed edge. */ ret = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos); if (ret) { mlog_errno(ret); goto out; } if (left_cpos && le16_to_cpu(el->l_next_free_rec) > 1) { left_path = ocfs2_new_path_from_path(path); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, left_path, left_cpos); if (ret) { mlog_errno(ret); goto out; } } } ret = ocfs2_extend_rotate_transaction(handle, 0, handle->h_buffer_credits, path); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_path(et->et_ci, handle, left_path); if (ret) { mlog_errno(ret); goto out; } rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); trunc_range = cpos + len; if (le32_to_cpu(rec->e_cpos) == cpos && rec_range == trunc_range) { int next_free; memset(rec, 0, sizeof(*rec)); ocfs2_cleanup_merge(el, index); wants_rotate = 1; next_free = le16_to_cpu(el->l_next_free_rec); if (is_rightmost_tree_rec && next_free > 1) { /* * We skip the edge update if this path will * be deleted by the rotate code. */ rec = &el->l_recs[next_free - 1]; ocfs2_adjust_rightmost_records(handle, et, path, rec); } } else if (le32_to_cpu(rec->e_cpos) == cpos) { /* Remove leftmost portion of the record. */ le32_add_cpu(&rec->e_cpos, len); le64_add_cpu(&rec->e_blkno, ocfs2_clusters_to_blocks(sb, len)); le16_add_cpu(&rec->e_leaf_clusters, -len); } else if (rec_range == trunc_range) { /* Remove rightmost portion of the record */ le16_add_cpu(&rec->e_leaf_clusters, -len); if (is_rightmost_tree_rec) ocfs2_adjust_rightmost_records(handle, et, path, rec); } else { /* Caller should have trapped this. */ mlog(ML_ERROR, "Owner %llu: Invalid record truncate: (%u, %u) " "(%u, %u)\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), le32_to_cpu(rec->e_cpos), le16_to_cpu(rec->e_leaf_clusters), cpos, len); BUG(); } if (left_path) { int subtree_index; subtree_index = ocfs2_find_subtree_root(et, left_path, path); ocfs2_complete_edge_insert(handle, left_path, path, subtree_index); } ocfs2_journal_dirty(handle, path_leaf_bh(path)); ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; } out: ocfs2_free_path(left_path); return ret; } int ocfs2_remove_extent(handle_t *handle, struct ocfs2_extent_tree *et, u32 cpos, u32 len, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret, index; u32 rec_range, trunc_range; struct ocfs2_extent_rec *rec; struct ocfs2_extent_list *el; struct ocfs2_path *path = NULL; /* * XXX: Why are we truncating to 0 instead of wherever this * affects us? */ ocfs2_et_extent_map_truncate(et, 0); path = ocfs2_new_path_from_et(et); if (!path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu has an extent at cpos %u which can no " "longer be found.\n", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos); ret = -EROFS; goto out; } /* * We have 3 cases of extent removal: * 1) Range covers the entire extent rec * 2) Range begins or ends on one edge of the extent rec * 3) Range is in the middle of the extent rec (no shared edges) * * For case 1 we remove the extent rec and left rotate to * fill the hole. * * For case 2 we just shrink the existing extent rec, with a * tree update if the shrinking edge is also the edge of an * extent block. * * For case 3 we do a right split to turn the extent rec into * something case 2 can handle. */ rec = &el->l_recs[index]; rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); trunc_range = cpos + len; BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range); trace_ocfs2_remove_extent( (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos, len, index, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) { ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, cpos, len); if (ret) { mlog_errno(ret); goto out; } } else { ret = ocfs2_split_tree(handle, et, path, index, trunc_range, meta_ac); if (ret) { mlog_errno(ret); goto out; } /* * The split could have manipulated the tree enough to * move the record location, so we have to look for it again. */ ocfs2_reinit_path(path, 1); ret = ocfs2_find_path(et->et_ci, path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu: split at cpos %u lost record.", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos); ret = -EROFS; goto out; } /* * Double check our values here. If anything is fishy, * it's easier to catch it at the top level. */ rec = &el->l_recs[index]; rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (rec_range != trunc_range) { ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Owner %llu: error after split at cpos %u" "trunc len %u, existing record is (%u,%u)", (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos, len, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); ret = -EROFS; goto out; } ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, cpos, len); if (ret) { mlog_errno(ret); goto out; } } out: ocfs2_free_path(path); return ret; } /* * ocfs2_reserve_blocks_for_rec_trunc() would look basically the * same as ocfs2_lock_alloctors(), except for it accepts a blocks * number to reserve some extra blocks, and it only handles meta * data allocations. * * Currently, only ocfs2_remove_btree_range() uses it for truncating * and punching holes. */ static int ocfs2_reserve_blocks_for_rec_trunc(struct inode *inode, struct ocfs2_extent_tree *et, u32 extents_to_split, struct ocfs2_alloc_context **ac, int extra_blocks) { int ret = 0, num_free_extents; unsigned int max_recs_needed = 2 * extents_to_split; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); *ac = NULL; num_free_extents = ocfs2_num_free_extents(osb, et); if (num_free_extents < 0) { ret = num_free_extents; mlog_errno(ret); goto out; } if (!num_free_extents || (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) extra_blocks += ocfs2_extend_meta_needed(et->et_root_el); if (extra_blocks) { ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, ac); if (ret < 0) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } } out: if (ret) { if (*ac) { ocfs2_free_alloc_context(*ac); *ac = NULL; } } return ret; } int ocfs2_remove_btree_range(struct inode *inode, struct ocfs2_extent_tree *et, u32 cpos, u32 phys_cpos, u32 len, int flags, struct ocfs2_cached_dealloc_ctxt *dealloc, u64 refcount_loc) { int ret, credits = 0, extra_blocks = 0; u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; handle_t *handle; struct ocfs2_alloc_context *meta_ac = NULL; struct ocfs2_refcount_tree *ref_tree = NULL; if ((flags & OCFS2_EXT_REFCOUNTED) && len) { BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1, &ref_tree, NULL); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_prepare_refcount_change_for_del(inode, refcount_loc, phys_blkno, len, &credits, &extra_blocks); if (ret < 0) { mlog_errno(ret); goto out; } } ret = ocfs2_reserve_blocks_for_rec_trunc(inode, et, 1, &meta_ac, extra_blocks); if (ret) { mlog_errno(ret); return ret; } mutex_lock(&tl_inode->i_mutex); if (ocfs2_truncate_log_needs_flush(osb)) { ret = __ocfs2_flush_truncate_log(osb); if (ret < 0) { mlog_errno(ret); goto out; } } handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb) + credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } dquot_free_space_nodirty(inode, ocfs2_clusters_to_bytes(inode->i_sb, len)); ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc); if (ret) { mlog_errno(ret); goto out_commit; } ocfs2_et_update_clusters(et, -len); ocfs2_journal_dirty(handle, et->et_root_bh); if (phys_blkno) { if (flags & OCFS2_EXT_REFCOUNTED) ret = ocfs2_decrease_refcount(inode, handle, ocfs2_blocks_to_clusters(osb->sb, phys_blkno), len, meta_ac, dealloc, 1); else ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len); if (ret) mlog_errno(ret); } out_commit: ocfs2_commit_trans(osb, handle); out: mutex_unlock(&tl_inode->i_mutex); if (meta_ac) ocfs2_free_alloc_context(meta_ac); if (ref_tree) ocfs2_unlock_refcount_tree(osb, ref_tree, 1); return ret; } int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb) { struct buffer_head *tl_bh = osb->osb_tl_bh; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; di = (struct ocfs2_dinode *) tl_bh->b_data; tl = &di->id2.i_dealloc; mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count), "slot %d, invalid truncate log parameters: used = " "%u, count = %u\n", osb->slot_num, le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count)); return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count); } static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl, unsigned int new_start) { unsigned int tail_index; unsigned int current_tail; /* No records, nothing to coalesce */ if (!le16_to_cpu(tl->tl_used)) return 0; tail_index = le16_to_cpu(tl->tl_used) - 1; current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start); current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters); return current_tail == new_start; } int ocfs2_truncate_log_append(struct ocfs2_super *osb, handle_t *handle, u64 start_blk, unsigned int num_clusters) { int status, index; unsigned int start_cluster, tl_count; struct inode *tl_inode = osb->osb_tl_inode; struct buffer_head *tl_bh = osb->osb_tl_bh; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; BUG_ON(mutex_trylock(&tl_inode->i_mutex)); start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk); di = (struct ocfs2_dinode *) tl_bh->b_data; /* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated * by the underlying call to ocfs2_read_inode_block(), so any * corruption is a code bug */ BUG_ON(!OCFS2_IS_VALID_DINODE(di)); tl = &di->id2.i_dealloc; tl_count = le16_to_cpu(tl->tl_count); mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || tl_count == 0, "Truncate record count on #%llu invalid " "wanted %u, actual %u\n", (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, ocfs2_truncate_recs_per_inode(osb->sb), le16_to_cpu(tl->tl_count)); /* Caller should have known to flush before calling us. */ index = le16_to_cpu(tl->tl_used); if (index >= tl_count) { status = -ENOSPC; mlog_errno(status); goto bail; } status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } trace_ocfs2_truncate_log_append( (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index, start_cluster, num_clusters); if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) { /* * Move index back to the record we are coalescing with. * ocfs2_truncate_log_can_coalesce() guarantees nonzero */ index--; num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters); trace_ocfs2_truncate_log_append( (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index, le32_to_cpu(tl->tl_recs[index].t_start), num_clusters); } else { tl->tl_recs[index].t_start = cpu_to_le32(start_cluster); tl->tl_used = cpu_to_le16(index + 1); } tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters); ocfs2_journal_dirty(handle, tl_bh); osb->truncated_clusters += num_clusters; bail: return status; } static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, handle_t *handle, struct inode *data_alloc_inode, struct buffer_head *data_alloc_bh) { int status = 0; int i; unsigned int num_clusters; u64 start_blk; struct ocfs2_truncate_rec rec; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; struct inode *tl_inode = osb->osb_tl_inode; struct buffer_head *tl_bh = osb->osb_tl_bh; di = (struct ocfs2_dinode *) tl_bh->b_data; tl = &di->id2.i_dealloc; i = le16_to_cpu(tl->tl_used) - 1; while (i >= 0) { /* Caller has given us at least enough credits to * update the truncate log dinode */ status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } tl->tl_used = cpu_to_le16(i); ocfs2_journal_dirty(handle, tl_bh); /* TODO: Perhaps we can calculate the bulk of the * credits up front rather than extending like * this. */ status = ocfs2_extend_trans(handle, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); if (status < 0) { mlog_errno(status); goto bail; } rec = tl->tl_recs[i]; start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb, le32_to_cpu(rec.t_start)); num_clusters = le32_to_cpu(rec.t_clusters); /* if start_blk is not set, we ignore the record as * invalid. */ if (start_blk) { trace_ocfs2_replay_truncate_records( (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, i, le32_to_cpu(rec.t_start), num_clusters); status = ocfs2_free_clusters(handle, data_alloc_inode, data_alloc_bh, start_blk, num_clusters); if (status < 0) { mlog_errno(status); goto bail; } } i--; } osb->truncated_clusters = 0; bail: return status; } /* Expects you to already be holding tl_inode->i_mutex */ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) { int status; unsigned int num_to_flush; handle_t *handle; struct inode *tl_inode = osb->osb_tl_inode; struct inode *data_alloc_inode = NULL; struct buffer_head *tl_bh = osb->osb_tl_bh; struct buffer_head *data_alloc_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; BUG_ON(mutex_trylock(&tl_inode->i_mutex)); di = (struct ocfs2_dinode *) tl_bh->b_data; /* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated * by the underlying call to ocfs2_read_inode_block(), so any * corruption is a code bug */ BUG_ON(!OCFS2_IS_VALID_DINODE(di)); tl = &di->id2.i_dealloc; num_to_flush = le16_to_cpu(tl->tl_used); trace_ocfs2_flush_truncate_log( (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, num_to_flush); if (!num_to_flush) { status = 0; goto out; } data_alloc_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!data_alloc_inode) { status = -EINVAL; mlog(ML_ERROR, "Could not get bitmap inode!\n"); goto out; } mutex_lock(&data_alloc_inode->i_mutex); status = ocfs2_inode_lock(data_alloc_inode, &data_alloc_bh, 1); if (status < 0) { mlog_errno(status); goto out_mutex; } handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto out_unlock; } status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode, data_alloc_bh); if (status < 0) mlog_errno(status); ocfs2_commit_trans(osb, handle); out_unlock: brelse(data_alloc_bh); ocfs2_inode_unlock(data_alloc_inode, 1); out_mutex: mutex_unlock(&data_alloc_inode->i_mutex); iput(data_alloc_inode); out: return status; } int ocfs2_flush_truncate_log(struct ocfs2_super *osb) { int status; struct inode *tl_inode = osb->osb_tl_inode; mutex_lock(&tl_inode->i_mutex); status = __ocfs2_flush_truncate_log(osb); mutex_unlock(&tl_inode->i_mutex); return status; } static void ocfs2_truncate_log_worker(struct work_struct *work) { int status; struct ocfs2_super *osb = container_of(work, struct ocfs2_super, osb_truncate_log_wq.work); status = ocfs2_flush_truncate_log(osb); if (status < 0) mlog_errno(status); else ocfs2_init_steal_slots(osb); } #define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ) void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb, int cancel) { if (osb->osb_tl_inode) { /* We want to push off log flushes while truncates are * still running. */ if (cancel) cancel_delayed_work(&osb->osb_truncate_log_wq); queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq, OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL); } } static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, int slot_num, struct inode **tl_inode, struct buffer_head **tl_bh) { int status; struct inode *inode = NULL; struct buffer_head *bh = NULL; inode = ocfs2_get_system_file_inode(osb, TRUNCATE_LOG_SYSTEM_INODE, slot_num); if (!inode) { status = -EINVAL; mlog(ML_ERROR, "Could not get load truncate log inode!\n"); goto bail; } status = ocfs2_read_inode_block(inode, &bh); if (status < 0) { iput(inode); mlog_errno(status); goto bail; } *tl_inode = inode; *tl_bh = bh; bail: return status; } /* called during the 1st stage of node recovery. we stamp a clean * truncate log and pass back a copy for processing later. if the * truncate log does not require processing, a *tl_copy is set to * NULL. */ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, int slot_num, struct ocfs2_dinode **tl_copy) { int status; struct inode *tl_inode = NULL; struct buffer_head *tl_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; *tl_copy = NULL; trace_ocfs2_begin_truncate_log_recovery(slot_num); status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh); if (status < 0) { mlog_errno(status); goto bail; } di = (struct ocfs2_dinode *) tl_bh->b_data; /* tl_bh is loaded from ocfs2_get_truncate_log_info(). It's * validated by the underlying call to ocfs2_read_inode_block(), * so any corruption is a code bug */ BUG_ON(!OCFS2_IS_VALID_DINODE(di)); tl = &di->id2.i_dealloc; if (le16_to_cpu(tl->tl_used)) { trace_ocfs2_truncate_log_recovery_num(le16_to_cpu(tl->tl_used)); *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL); if (!(*tl_copy)) { status = -ENOMEM; mlog_errno(status); goto bail; } /* Assuming the write-out below goes well, this copy * will be passed back to recovery for processing. */ memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size); /* All we need to do to clear the truncate log is set * tl_used. */ tl->tl_used = 0; ocfs2_compute_meta_ecc(osb->sb, tl_bh->b_data, &di->i_check); status = ocfs2_write_block(osb, tl_bh, INODE_CACHE(tl_inode)); if (status < 0) { mlog_errno(status); goto bail; } } bail: if (tl_inode) iput(tl_inode); brelse(tl_bh); if (status < 0 && (*tl_copy)) { kfree(*tl_copy); *tl_copy = NULL; mlog_errno(status); } return status; } int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, struct ocfs2_dinode *tl_copy) { int status = 0; int i; unsigned int clusters, num_recs, start_cluster; u64 start_blk; handle_t *handle; struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_truncate_log *tl; if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) { mlog(ML_ERROR, "Asked to recover my own truncate log!\n"); return -EINVAL; } tl = &tl_copy->id2.i_dealloc; num_recs = le16_to_cpu(tl->tl_used); trace_ocfs2_complete_truncate_log_recovery( (unsigned long long)le64_to_cpu(tl_copy->i_blkno), num_recs); mutex_lock(&tl_inode->i_mutex); for(i = 0; i < num_recs; i++) { if (ocfs2_truncate_log_needs_flush(osb)) { status = __ocfs2_flush_truncate_log(osb); if (status < 0) { mlog_errno(status); goto bail_up; } } handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_up; } clusters = le32_to_cpu(tl->tl_recs[i].t_clusters); start_cluster = le32_to_cpu(tl->tl_recs[i].t_start); start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster); status = ocfs2_truncate_log_append(osb, handle, start_blk, clusters); ocfs2_commit_trans(osb, handle); if (status < 0) { mlog_errno(status); goto bail_up; } } bail_up: mutex_unlock(&tl_inode->i_mutex); return status; } void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb) { int status; struct inode *tl_inode = osb->osb_tl_inode; if (tl_inode) { cancel_delayed_work(&osb->osb_truncate_log_wq); flush_workqueue(ocfs2_wq); status = ocfs2_flush_truncate_log(osb); if (status < 0) mlog_errno(status); brelse(osb->osb_tl_bh); iput(osb->osb_tl_inode); } } int ocfs2_truncate_log_init(struct ocfs2_super *osb) { int status; struct inode *tl_inode = NULL; struct buffer_head *tl_bh = NULL; status = ocfs2_get_truncate_log_info(osb, osb->slot_num, &tl_inode, &tl_bh); if (status < 0) mlog_errno(status); /* ocfs2_truncate_log_shutdown keys on the existence of * osb->osb_tl_inode so we don't set any of the osb variables * until we're sure all is well. */ INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker); osb->osb_tl_bh = tl_bh; osb->osb_tl_inode = tl_inode; return status; } /* * Delayed de-allocation of suballocator blocks. * * Some sets of block de-allocations might involve multiple suballocator inodes. * * The locking for this can get extremely complicated, especially when * the suballocator inodes to delete from aren't known until deep * within an unrelated codepath. * * ocfs2_extent_block structures are a good example of this - an inode * btree could have been grown by any number of nodes each allocating * out of their own suballoc inode. * * These structures allow the delay of block de-allocation until a * later time, when locking of multiple cluster inodes won't cause * deadlock. */ /* * Describe a single bit freed from a suballocator. For the block * suballocators, it represents one block. For the global cluster * allocator, it represents some clusters and free_bit indicates * clusters number. */ struct ocfs2_cached_block_free { struct ocfs2_cached_block_free *free_next; u64 free_bg; u64 free_blk; unsigned int free_bit; }; struct ocfs2_per_slot_free_list { struct ocfs2_per_slot_free_list *f_next_suballocator; int f_inode_type; int f_slot; struct ocfs2_cached_block_free *f_first; }; static int ocfs2_free_cached_blocks(struct ocfs2_super *osb, int sysfile_type, int slot, struct ocfs2_cached_block_free *head) { int ret; u64 bg_blkno; handle_t *handle; struct inode *inode; struct buffer_head *di_bh = NULL; struct ocfs2_cached_block_free *tmp; inode = ocfs2_get_system_file_inode(osb, sysfile_type, slot); if (!inode) { ret = -EINVAL; mlog_errno(ret); goto out; } mutex_lock(&inode->i_mutex); ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret) { mlog_errno(ret); goto out_mutex; } handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock; } while (head) { if (head->free_bg) bg_blkno = head->free_bg; else bg_blkno = ocfs2_which_suballoc_group(head->free_blk, head->free_bit); trace_ocfs2_free_cached_blocks( (unsigned long long)head->free_blk, head->free_bit); ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, head->free_bit, bg_blkno, 1); if (ret) { mlog_errno(ret); goto out_journal; } ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE); if (ret) { mlog_errno(ret); goto out_journal; } tmp = head; head = head->free_next; kfree(tmp); } out_journal: ocfs2_commit_trans(osb, handle); out_unlock: ocfs2_inode_unlock(inode, 1); brelse(di_bh); out_mutex: mutex_unlock(&inode->i_mutex); iput(inode); out: while(head) { /* Premature exit may have left some dangling items. */ tmp = head; head = head->free_next; kfree(tmp); } return ret; } int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, u64 blkno, unsigned int bit) { int ret = 0; struct ocfs2_cached_block_free *item; item = kzalloc(sizeof(*item), GFP_NOFS); if (item == NULL) { ret = -ENOMEM; mlog_errno(ret); return ret; } trace_ocfs2_cache_cluster_dealloc((unsigned long long)blkno, bit); item->free_blk = blkno; item->free_bit = bit; item->free_next = ctxt->c_global_allocator; ctxt->c_global_allocator = item; return ret; } static int ocfs2_free_cached_clusters(struct ocfs2_super *osb, struct ocfs2_cached_block_free *head) { struct ocfs2_cached_block_free *tmp; struct inode *tl_inode = osb->osb_tl_inode; handle_t *handle; int ret = 0; mutex_lock(&tl_inode->i_mutex); while (head) { if (ocfs2_truncate_log_needs_flush(osb)) { ret = __ocfs2_flush_truncate_log(osb); if (ret < 0) { mlog_errno(ret); break; } } handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); break; } ret = ocfs2_truncate_log_append(osb, handle, head->free_blk, head->free_bit); ocfs2_commit_trans(osb, handle); tmp = head; head = head->free_next; kfree(tmp); if (ret < 0) { mlog_errno(ret); break; } } mutex_unlock(&tl_inode->i_mutex); while (head) { /* Premature exit may have left some dangling items. */ tmp = head; head = head->free_next; kfree(tmp); } return ret; } int ocfs2_run_deallocs(struct ocfs2_super *osb, struct ocfs2_cached_dealloc_ctxt *ctxt) { int ret = 0, ret2; struct ocfs2_per_slot_free_list *fl; if (!ctxt) return 0; while (ctxt->c_first_suballocator) { fl = ctxt->c_first_suballocator; if (fl->f_first) { trace_ocfs2_run_deallocs(fl->f_inode_type, fl->f_slot); ret2 = ocfs2_free_cached_blocks(osb, fl->f_inode_type, fl->f_slot, fl->f_first); if (ret2) mlog_errno(ret2); if (!ret) ret = ret2; } ctxt->c_first_suballocator = fl->f_next_suballocator; kfree(fl); } if (ctxt->c_global_allocator) { ret2 = ocfs2_free_cached_clusters(osb, ctxt->c_global_allocator); if (ret2) mlog_errno(ret2); if (!ret) ret = ret2; ctxt->c_global_allocator = NULL; } return ret; } static struct ocfs2_per_slot_free_list * ocfs2_find_per_slot_free_list(int type, int slot, struct ocfs2_cached_dealloc_ctxt *ctxt) { struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator; while (fl) { if (fl->f_inode_type == type && fl->f_slot == slot) return fl; fl = fl->f_next_suballocator; } fl = kmalloc(sizeof(*fl), GFP_NOFS); if (fl) { fl->f_inode_type = type; fl->f_slot = slot; fl->f_first = NULL; fl->f_next_suballocator = ctxt->c_first_suballocator; ctxt->c_first_suballocator = fl; } return fl; } int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, int type, int slot, u64 suballoc, u64 blkno, unsigned int bit) { int ret; struct ocfs2_per_slot_free_list *fl; struct ocfs2_cached_block_free *item; fl = ocfs2_find_per_slot_free_list(type, slot, ctxt); if (fl == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; } item = kzalloc(sizeof(*item), GFP_NOFS); if (item == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; } trace_ocfs2_cache_block_dealloc(type, slot, (unsigned long long)suballoc, (unsigned long long)blkno, bit); item->free_bg = suballoc; item->free_blk = blkno; item->free_bit = bit; item->free_next = fl->f_first; fl->f_first = item; ret = 0; out: return ret; } static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, struct ocfs2_extent_block *eb) { return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE, le16_to_cpu(eb->h_suballoc_slot), le64_to_cpu(eb->h_suballoc_loc), le64_to_cpu(eb->h_blkno), le16_to_cpu(eb->h_suballoc_bit)); } static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh) { set_buffer_uptodate(bh); mark_buffer_dirty(bh); return 0; } void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, unsigned int from, unsigned int to, struct page *page, int zero, u64 *phys) { int ret, partial = 0; ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0); if (ret) mlog_errno(ret); if (zero) zero_user_segment(page, from, to); /* * Need to set the buffers we zero'd into uptodate * here if they aren't - ocfs2_map_page_blocks() * might've skipped some */ ret = walk_page_buffers(handle, page_buffers(page), from, to, &partial, ocfs2_zero_func); if (ret < 0) mlog_errno(ret); else if (ocfs2_should_order_data(inode)) { ret = ocfs2_jbd2_file_inode(handle, inode); if (ret < 0) mlog_errno(ret); } if (!partial) SetPageUptodate(page); flush_dcache_page(page); } static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, loff_t end, struct page **pages, int numpages, u64 phys, handle_t *handle) { int i; struct page *page; unsigned int from, to = PAGE_CACHE_SIZE; struct super_block *sb = inode->i_sb; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); if (numpages == 0) goto out; to = PAGE_CACHE_SIZE; for(i = 0; i < numpages; i++) { page = pages[i]; from = start & (PAGE_CACHE_SIZE - 1); if ((end >> PAGE_CACHE_SHIFT) == page->index) to = end & (PAGE_CACHE_SIZE - 1); BUG_ON(from > PAGE_CACHE_SIZE); BUG_ON(to > PAGE_CACHE_SIZE); ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, &phys); start = (page->index + 1) << PAGE_CACHE_SHIFT; } out: if (pages) ocfs2_unlock_and_free_pages(pages, numpages); } int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, struct page **pages, int *num) { int numpages, ret = 0; struct address_space *mapping = inode->i_mapping; unsigned long index; loff_t last_page_bytes; BUG_ON(start > end); numpages = 0; last_page_bytes = PAGE_ALIGN(end); index = start >> PAGE_CACHE_SHIFT; do { pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); if (!pages[numpages]) { ret = -ENOMEM; mlog_errno(ret); goto out; } numpages++; index++; } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT)); out: if (ret != 0) { if (pages) ocfs2_unlock_and_free_pages(pages, numpages); numpages = 0; } *num = numpages; return ret; } static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, struct page **pages, int *num) { struct super_block *sb = inode->i_sb; BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits); return ocfs2_grab_pages(inode, start, end, pages, num); } /* * Zero the area past i_size but still within an allocated * cluster. This avoids exposing nonzero data on subsequent file * extends. * * We need to call this before i_size is updated on the inode because * otherwise block_write_full_page() will skip writeout of pages past * i_size. The new_i_size parameter is passed for this reason. */ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, u64 range_start, u64 range_end) { int ret = 0, numpages; struct page **pages = NULL; u64 phys; unsigned int ext_flags; struct super_block *sb = inode->i_sb; /* * File systems which don't support sparse files zero on every * extend. */ if (!ocfs2_sparse_alloc(OCFS2_SB(sb))) return 0; pages = kcalloc(ocfs2_pages_per_cluster(sb), sizeof(struct page *), GFP_NOFS); if (pages == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; } if (range_start == range_end) goto out; ret = ocfs2_extent_map_get_blocks(inode, range_start >> sb->s_blocksize_bits, &phys, NULL, &ext_flags); if (ret) { mlog_errno(ret); goto out; } /* * Tail is a hole, or is marked unwritten. In either case, we * can count on read and write to return/push zero's. */ if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN) goto out; ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages, &numpages); if (ret) { mlog_errno(ret); goto out; } ocfs2_zero_cluster_pages(inode, range_start, range_end, pages, numpages, phys, handle); /* * Initiate writeout of the pages we zero'd here. We don't * wait on them - the truncate_inode_pages() call later will * do that for us. */ ret = filemap_fdatawrite_range(inode->i_mapping, range_start, range_end - 1); if (ret) mlog_errno(ret); out: if (pages) kfree(pages); return ret; } static void ocfs2_zero_dinode_id2_with_xattr(struct inode *inode, struct ocfs2_dinode *di) { unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits; unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size); if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL) memset(&di->id2, 0, blocksize - offsetof(struct ocfs2_dinode, id2) - xattrsize); else memset(&di->id2, 0, blocksize - offsetof(struct ocfs2_dinode, id2)); } void ocfs2_dinode_new_extent_list(struct inode *inode, struct ocfs2_dinode *di) { ocfs2_zero_dinode_id2_with_xattr(inode, di); di->id2.i_list.l_tree_depth = 0; di->id2.i_list.l_next_free_rec = 0; di->id2.i_list.l_count = cpu_to_le16( ocfs2_extent_recs_per_inode_with_xattr(inode->i_sb, di)); } void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_inline_data *idata = &di->id2.i_data; spin_lock(&oi->ip_lock); oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); spin_unlock(&oi->ip_lock); /* * We clear the entire i_data structure here so that all * fields can be properly initialized. */ ocfs2_zero_dinode_id2_with_xattr(inode, di); idata->id_count = cpu_to_le16( ocfs2_max_inline_data_with_xattr(inode->i_sb, di)); } int ocfs2_convert_inline_data_to_extents(struct inode *inode, struct buffer_head *di_bh) { int ret, i, has_data, num_pages = 0; handle_t *handle; u64 uninitialized_var(block); struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_alloc_context *data_ac = NULL; struct page **pages = NULL; loff_t end = osb->s_clustersize; struct ocfs2_extent_tree et; int did_quota = 0; has_data = i_size_read(inode) ? 1 : 0; if (has_data) { pages = kcalloc(ocfs2_pages_per_cluster(osb->sb), sizeof(struct page *), GFP_NOFS); if (pages == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_reserve_clusters(osb, 1, &data_ac); if (ret) { mlog_errno(ret); goto out; } } handle = ocfs2_start_trans(osb, ocfs2_inline_to_extents_credits(osb->sb)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } if (has_data) { u32 bit_off, num; unsigned int page_end; u64 phys; ret = dquot_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, 1)); if (ret) goto out_commit; did_quota = 1; data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &num); if (ret) { mlog_errno(ret); goto out_commit; } /* * Save two copies, one for insert, and one that can * be changed by ocfs2_map_and_dirty_page() below. */ block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); /* * Non sparse file systems zero on extend, so no need * to do that now. */ if (!ocfs2_sparse_alloc(osb) && PAGE_CACHE_SIZE < osb->s_clustersize) end = PAGE_CACHE_SIZE; ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); if (ret) { mlog_errno(ret); goto out_commit; } /* * This should populate the 1st page for us and mark * it up to date. */ ret = ocfs2_read_inline_data(inode, pages[0], di_bh); if (ret) { mlog_errno(ret); goto out_commit; } page_end = PAGE_CACHE_SIZE; if (PAGE_CACHE_SIZE > osb->s_clustersize) page_end = osb->s_clustersize; for (i = 0; i < num_pages; i++) ocfs2_map_and_dirty_page(inode, handle, 0, page_end, pages[i], i > 0, &phys); } spin_lock(&oi->ip_lock); oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); spin_unlock(&oi->ip_lock); ocfs2_dinode_new_extent_list(inode, di); ocfs2_journal_dirty(handle, di_bh); if (has_data) { /* * An error at this point should be extremely rare. If * this proves to be false, we could always re-build * the in-inode data from our pages. */ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL); if (ret) { mlog_errno(ret); goto out_commit; } inode->i_blocks = ocfs2_inode_sector_count(inode); } out_commit: if (ret < 0 && did_quota) dquot_free_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, 1)); ocfs2_commit_trans(osb, handle); out_unlock: if (data_ac) ocfs2_free_alloc_context(data_ac); out: if (pages) { ocfs2_unlock_and_free_pages(pages, num_pages); kfree(pages); } return ret; } /* * It is expected, that by the time you call this function, * inode->i_size and fe->i_size have been adjusted. * * WARNING: This will kfree the truncate context */ int ocfs2_commit_truncate(struct ocfs2_super *osb, struct inode *inode, struct buffer_head *di_bh) { int status = 0, i, flags = 0; u32 new_highest_cpos, range, trunc_cpos, trunc_len, phys_cpos, coff; u64 blkno = 0; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; struct ocfs2_path *path = NULL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_extent_list *root_el = &(di->id2.i_list); u64 refcount_loc = le64_to_cpu(di->i_refcount_loc); struct ocfs2_extent_tree et; struct ocfs2_cached_dealloc_ctxt dealloc; ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); ocfs2_init_dealloc_ctxt(&dealloc); new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb, i_size_read(inode)); path = ocfs2_new_path(di_bh, &di->id2.i_list, ocfs2_journal_access_di); if (!path) { status = -ENOMEM; mlog_errno(status); goto bail; } ocfs2_extent_map_trunc(inode, new_highest_cpos); start: /* * Check that we still have allocation to delete. */ if (OCFS2_I(inode)->ip_clusters == 0) { status = 0; goto bail; } /* * Truncate always works against the rightmost tree branch. */ status = ocfs2_find_path(INODE_CACHE(inode), path, UINT_MAX); if (status) { mlog_errno(status); goto bail; } trace_ocfs2_commit_truncate( (unsigned long long)OCFS2_I(inode)->ip_blkno, new_highest_cpos, OCFS2_I(inode)->ip_clusters, path->p_tree_depth); /* * By now, el will point to the extent list on the bottom most * portion of this tree. Only the tail record is considered in * each pass. * * We handle the following cases, in order: * - empty extent: delete the remaining branch * - remove the entire record * - remove a partial record * - no record needs to be removed (truncate has completed) */ el = path_leaf_el(path); if (le16_to_cpu(el->l_next_free_rec) == 0) { ocfs2_error(inode->i_sb, "Inode %llu has empty extent block at %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)path_leaf_bh(path)->b_blocknr); status = -EROFS; goto bail; } i = le16_to_cpu(el->l_next_free_rec) - 1; rec = &el->l_recs[i]; flags = rec->e_flags; range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (i == 0 && ocfs2_is_empty_extent(rec)) { /* * Lower levels depend on this never happening, but it's best * to check it up here before changing the tree. */ if (root_el->l_tree_depth && rec->e_int_clusters == 0) { ocfs2_error(inode->i_sb, "Inode %lu has an empty " "extent record, depth %u\n", inode->i_ino, le16_to_cpu(root_el->l_tree_depth)); status = -EROFS; goto bail; } trunc_cpos = le32_to_cpu(rec->e_cpos); trunc_len = 0; blkno = 0; } else if (le32_to_cpu(rec->e_cpos) >= new_highest_cpos) { /* * Truncate entire record. */ trunc_cpos = le32_to_cpu(rec->e_cpos); trunc_len = ocfs2_rec_clusters(el, rec); blkno = le64_to_cpu(rec->e_blkno); } else if (range > new_highest_cpos) { /* * Partial truncate. it also should be * the last truncate we're doing. */ trunc_cpos = new_highest_cpos; trunc_len = range - new_highest_cpos; coff = new_highest_cpos - le32_to_cpu(rec->e_cpos); blkno = le64_to_cpu(rec->e_blkno) + ocfs2_clusters_to_blocks(inode->i_sb, coff); } else { /* * Truncate completed, leave happily. */ status = 0; goto bail; } phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno); status = ocfs2_remove_btree_range(inode, &et, trunc_cpos, phys_cpos, trunc_len, flags, &dealloc, refcount_loc); if (status < 0) { mlog_errno(status); goto bail; } ocfs2_reinit_path(path, 1); /* * The check above will catch the case where we've truncated * away all allocation. */ goto start; bail: ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &dealloc); ocfs2_free_path(path); return status; } /* * 'start' is inclusive, 'end' is not. */ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, unsigned int start, unsigned int end, int trunc) { int ret; unsigned int numbytes; handle_t *handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_inline_data *idata = &di->id2.i_data; if (end > i_size_read(inode)) end = i_size_read(inode); BUG_ON(start >= end); if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) || !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) || !ocfs2_supports_inline_data(osb)) { ocfs2_error(inode->i_sb, "Inline data flags for inode %llu don't agree! " "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, le16_to_cpu(di->i_dyn_features), OCFS2_I(inode)->ip_dyn_features, osb->s_feature_incompat); ret = -EROFS; goto out; } handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } numbytes = end - start; memset(idata->id_data + start, 0, numbytes); /* * No need to worry about the data page here - it's been * truncated already and inline data doesn't need it for * pushing zero's to disk, so we'll let readpage pick it up * later. */ if (trunc) { i_size_write(inode, start); di->i_size = cpu_to_le64(start); } inode->i_blocks = ocfs2_inode_sector_count(inode); inode->i_ctime = inode->i_mtime = CURRENT_TIME; di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ocfs2_journal_dirty(handle, di_bh); out_commit: ocfs2_commit_trans(osb, handle); out: return ret; } static int ocfs2_trim_extent(struct super_block *sb, struct ocfs2_group_desc *gd, u32 start, u32 count) { u64 discard, bcount; bcount = ocfs2_clusters_to_blocks(sb, count); discard = le64_to_cpu(gd->bg_blkno) + ocfs2_clusters_to_blocks(sb, start); trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount); return sb_issue_discard(sb, discard, bcount, GFP_NOFS, 0); } static int ocfs2_trim_group(struct super_block *sb, struct ocfs2_group_desc *gd, u32 start, u32 max, u32 minbits) { int ret = 0, count = 0, next; void *bitmap = gd->bg_bitmap; if (le16_to_cpu(gd->bg_free_bits_count) < minbits) return 0; trace_ocfs2_trim_group((unsigned long long)le64_to_cpu(gd->bg_blkno), start, max, minbits); while (start < max) { start = ocfs2_find_next_zero_bit(bitmap, max, start); if (start >= max) break; next = ocfs2_find_next_bit(bitmap, max, start); if ((next - start) >= minbits) { ret = ocfs2_trim_extent(sb, gd, start, next - start); if (ret < 0) { mlog_errno(ret); break; } count += next - start; } start = next + 1; if (fatal_signal_pending(current)) { count = -ERESTARTSYS; break; } if ((le16_to_cpu(gd->bg_free_bits_count) - count) < minbits) break; } if (ret < 0) count = ret; return count; } int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range) { struct ocfs2_super *osb = OCFS2_SB(sb); u64 start, len, trimmed, first_group, last_group, group; int ret, cnt; u32 first_bit, last_bit, minlen; struct buffer_head *main_bm_bh = NULL; struct inode *main_bm_inode = NULL; struct buffer_head *gd_bh = NULL; struct ocfs2_dinode *main_bm; struct ocfs2_group_desc *gd = NULL; start = range->start >> osb->s_clustersize_bits; len = range->len >> osb->s_clustersize_bits; minlen = range->minlen >> osb->s_clustersize_bits; trimmed = 0; if (!len) { range->len = 0; return 0; } if (minlen >= osb->bitmap_cpg) return -EINVAL; main_bm_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!main_bm_inode) { ret = -EIO; mlog_errno(ret); goto out; } mutex_lock(&main_bm_inode->i_mutex); ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 0); if (ret < 0) { mlog_errno(ret); goto out_mutex; } main_bm = (struct ocfs2_dinode *)main_bm_bh->b_data; if (start >= le32_to_cpu(main_bm->i_clusters)) { ret = -EINVAL; goto out_unlock; } if (start + len > le32_to_cpu(main_bm->i_clusters)) len = le32_to_cpu(main_bm->i_clusters) - start; trace_ocfs2_trim_fs(start, len, minlen); /* Determine first and last group to examine based on start and len */ first_group = ocfs2_which_cluster_group(main_bm_inode, start); if (first_group == osb->first_cluster_group_blkno) first_bit = start; else first_bit = start - ocfs2_blocks_to_clusters(sb, first_group); last_group = ocfs2_which_cluster_group(main_bm_inode, start + len - 1); last_bit = osb->bitmap_cpg; for (group = first_group; group <= last_group;) { if (first_bit + len >= osb->bitmap_cpg) last_bit = osb->bitmap_cpg; else last_bit = first_bit + len; ret = ocfs2_read_group_descriptor(main_bm_inode, main_bm, group, &gd_bh); if (ret < 0) { mlog_errno(ret); break; } gd = (struct ocfs2_group_desc *)gd_bh->b_data; cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen); brelse(gd_bh); gd_bh = NULL; if (cnt < 0) { ret = cnt; mlog_errno(ret); break; } trimmed += cnt; len -= osb->bitmap_cpg - first_bit; first_bit = 0; if (group == osb->first_cluster_group_blkno) group = ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg); else group += ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg); } range->len = trimmed * sb->s_blocksize; out_unlock: ocfs2_inode_unlock(main_bm_inode, 0); brelse(main_bm_bh); out_mutex: mutex_unlock(&main_bm_inode->i_mutex); iput(main_bm_inode); out: return ret; }
gpl-2.0
synexxus/synnix
drivers/mfd/tps6105x.c
1239
5604
/* * Core driver for TPS61050/61052 boost converters, used for while LED * driving, audio power amplification, white LED flash, and generic * boost conversion. Additionally it provides a 1-bit GPIO pin (out or in) * and a flash synchronization pin to synchronize flash events when used as * flashgun. * * Copyright (C) 2011 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * * Author: Linus Walleij <linus.walleij@linaro.org> * * License terms: GNU General Public License (GPL) version 2 */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/gpio.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/regulator/driver.h> #include <linux/mfd/core.h> #include <linux/mfd/tps6105x.h> int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value) { int ret; ret = mutex_lock_interruptible(&tps6105x->lock); if (ret) return ret; ret = i2c_smbus_write_byte_data(tps6105x->client, reg, value); mutex_unlock(&tps6105x->lock); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL(tps6105x_set); int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf) { int ret; ret = mutex_lock_interruptible(&tps6105x->lock); if (ret) return ret; ret = i2c_smbus_read_byte_data(tps6105x->client, reg); mutex_unlock(&tps6105x->lock); if (ret < 0) return ret; *buf = ret; return 0; } EXPORT_SYMBOL(tps6105x_get); /* * Masks off the bits in the mask and sets the bits in the bitvalues * parameter in one atomic operation */ int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg, u8 bitmask, u8 bitvalues) { int ret; u8 regval; ret = mutex_lock_interruptible(&tps6105x->lock); if (ret) return ret; ret = i2c_smbus_read_byte_data(tps6105x->client, reg); if (ret < 0) goto fail; regval = ret; regval = (~bitmask & regval) | (bitmask & bitvalues); ret = i2c_smbus_write_byte_data(tps6105x->client, reg, regval); fail: mutex_unlock(&tps6105x->lock); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL(tps6105x_mask_and_set); static int tps6105x_startup(struct tps6105x *tps6105x) { int ret; u8 regval; ret = tps6105x_get(tps6105x, TPS6105X_REG_0, &regval); if (ret) return ret; switch (regval >> TPS6105X_REG0_MODE_SHIFT) { case TPS6105X_REG0_MODE_SHUTDOWN: dev_info(&tps6105x->client->dev, "TPS6105x found in SHUTDOWN mode\n"); break; case TPS6105X_REG0_MODE_TORCH: dev_info(&tps6105x->client->dev, "TPS6105x found in TORCH mode\n"); break; case TPS6105X_REG0_MODE_TORCH_FLASH: dev_info(&tps6105x->client->dev, "TPS6105x found in FLASH mode\n"); break; case TPS6105X_REG0_MODE_VOLTAGE: dev_info(&tps6105x->client->dev, "TPS6105x found in VOLTAGE mode\n"); break; default: break; } return ret; } /* * MFD cells - we have one cell which is selected operation * mode, and we always have a GPIO cell. */ static struct mfd_cell tps6105x_cells[] = { { /* name will be runtime assigned */ .id = -1, }, { .name = "tps6105x-gpio", .id = -1, }, }; static int tps6105x_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct tps6105x *tps6105x; struct tps6105x_platform_data *pdata; int ret; int i; tps6105x = devm_kmalloc(&client->dev, sizeof(*tps6105x), GFP_KERNEL); if (!tps6105x) return -ENOMEM; i2c_set_clientdata(client, tps6105x); tps6105x->client = client; pdata = dev_get_platdata(&client->dev); tps6105x->pdata = pdata; mutex_init(&tps6105x->lock); ret = tps6105x_startup(tps6105x); if (ret) { dev_err(&client->dev, "chip initialization failed\n"); return ret; } /* Remove warning texts when you implement new cell drivers */ switch (pdata->mode) { case TPS6105X_MODE_SHUTDOWN: dev_info(&client->dev, "present, not used for anything, only GPIO\n"); break; case TPS6105X_MODE_TORCH: tps6105x_cells[0].name = "tps6105x-leds"; dev_warn(&client->dev, "torch mode is unsupported\n"); break; case TPS6105X_MODE_TORCH_FLASH: tps6105x_cells[0].name = "tps6105x-flash"; dev_warn(&client->dev, "flash mode is unsupported\n"); break; case TPS6105X_MODE_VOLTAGE: tps6105x_cells[0].name ="tps6105x-regulator"; break; default: break; } /* Set up and register the platform devices. */ for (i = 0; i < ARRAY_SIZE(tps6105x_cells); i++) { /* One state holder for all drivers, this is simple */ tps6105x_cells[i].platform_data = tps6105x; tps6105x_cells[i].pdata_size = sizeof(*tps6105x); } return mfd_add_devices(&client->dev, 0, tps6105x_cells, ARRAY_SIZE(tps6105x_cells), NULL, 0, NULL); } static int tps6105x_remove(struct i2c_client *client) { struct tps6105x *tps6105x = i2c_get_clientdata(client); mfd_remove_devices(&client->dev); /* Put chip in shutdown mode */ tps6105x_mask_and_set(tps6105x, TPS6105X_REG_0, TPS6105X_REG0_MODE_MASK, TPS6105X_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT); return 0; } static const struct i2c_device_id tps6105x_id[] = { { "tps61050", 0 }, { "tps61052", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tps6105x_id); static struct i2c_driver tps6105x_driver = { .driver = { .name = "tps6105x", }, .probe = tps6105x_probe, .remove = tps6105x_remove, .id_table = tps6105x_id, }; static int __init tps6105x_init(void) { return i2c_add_driver(&tps6105x_driver); } subsys_initcall(tps6105x_init); static void __exit tps6105x_exit(void) { i2c_del_driver(&tps6105x_driver); } module_exit(tps6105x_exit); MODULE_AUTHOR("Linus Walleij"); MODULE_DESCRIPTION("TPS6105x White LED Boost Converter Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
dan-and/linux-sunxi
drivers/acpi/sleep.c
1495
25095
/* * sleep.c - ACPI sleep support. * * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com> * Copyright (c) 2000-2003 Patrick Mochel * Copyright (c) 2003 Open Source Development Lab * * This file is released under the GPLv2. * */ #include <linux/delay.h> #include <linux/irq.h> #include <linux/dmi.h> #include <linux/device.h> #include <linux/suspend.h> #include <linux/reboot.h> #include <linux/acpi.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <asm/io.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include "internal.h" #include "sleep.h" u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS; static unsigned int gts, bfs; static int set_param_wake_flag(const char *val, struct kernel_param *kp) { int ret = param_set_int(val, kp); if (ret) return ret; if (kp->arg == (const char *)&gts) { if (gts) wake_sleep_flags |= ACPI_EXECUTE_GTS; else wake_sleep_flags &= ~ACPI_EXECUTE_GTS; } if (kp->arg == (const char *)&bfs) { if (bfs) wake_sleep_flags |= ACPI_EXECUTE_BFS; else wake_sleep_flags &= ~ACPI_EXECUTE_BFS; } return ret; } module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644); module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644); MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); static u8 sleep_states[ACPI_S_STATE_COUNT]; static void acpi_sleep_tts_switch(u32 acpi_state) { union acpi_object in_arg = { ACPI_TYPE_INTEGER }; struct acpi_object_list arg_list = { 1, &in_arg }; acpi_status status = AE_OK; in_arg.integer.value = acpi_state; status = acpi_evaluate_object(NULL, "\\_TTS", &arg_list, NULL); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { /* * OS can't evaluate the _TTS object correctly. Some warning * message will be printed. But it won't break anything. */ printk(KERN_NOTICE "Failure in evaluating _TTS object\n"); } } static int tts_notify_reboot(struct notifier_block *this, unsigned long code, void *x) { acpi_sleep_tts_switch(ACPI_STATE_S5); return NOTIFY_DONE; } static struct notifier_block tts_notifier = { .notifier_call = tts_notify_reboot, .next = NULL, .priority = 0, }; static int acpi_sleep_prepare(u32 acpi_state) { #ifdef CONFIG_ACPI_SLEEP /* do we have a wakeup address for S2 and S3? */ if (acpi_state == ACPI_STATE_S3) { if (!acpi_wakeup_address) { return -EFAULT; } acpi_set_firmware_waking_vector( (acpi_physical_address)acpi_wakeup_address); } ACPI_FLUSH_CPU_CACHE(); #endif printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n", acpi_state); acpi_enable_wakeup_devices(acpi_state); acpi_enter_sleep_state_prep(acpi_state); return 0; } #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; /* * The ACPI specification wants us to save NVS memory regions during hibernation * and to restore them during the subsequent resume. Windows does that also for * suspend to RAM. However, it is known that this mechanism does not work on * all machines, so we allow the user to disable it with the help of the * 'acpi_sleep=nonvs' kernel command line option. */ static bool nvs_nosave; void __init acpi_nvs_nosave(void) { nvs_nosave = true; } /* * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the * user to request that behavior by using the 'acpi_old_suspend_ordering' * kernel command line option that causes the following variable to be set. */ static bool old_suspend_ordering; void __init acpi_old_suspend_ordering(void) { old_suspend_ordering = true; } static int __init init_old_suspend_ordering(const struct dmi_system_id *d) { acpi_old_suspend_ordering(); return 0; } static int __init init_nvs_nosave(const struct dmi_system_id *d) { acpi_nvs_nosave(); return 0; } static struct dmi_system_id __initdata acpisleep_dmi_table[] = { { .callback = init_old_suspend_ordering, .ident = "Abit KN9 (nForce4 variant)", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), }, }, { .callback = init_old_suspend_ordering, .ident = "HP xw4600 Workstation", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), }, }, { .callback = init_old_suspend_ordering, .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), }, }, { .callback = init_old_suspend_ordering, .ident = "Panasonic CF51-2L", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Matsushita Electric Industrial Co.,Ltd."), DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW41E_H", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW21E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCEB17FX", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-SR11M", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), }, }, { .callback = init_nvs_nosave, .ident = "Everex StepNote Series", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCEB1Z1E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-NW130D", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCCW29FX", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), }, }, { .callback = init_nvs_nosave, .ident = "Averatec AV1020-ED2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), }, }, { .callback = init_old_suspend_ordering, .ident = "Asus A8N-SLI DELUXE", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), }, }, { .callback = init_old_suspend_ordering, .ident = "Asus A8N-SLI Premium", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-SR26GN_P", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCEB1S1E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW520F", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), }, }, { .callback = init_nvs_nosave, .ident = "Asus K54C", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), }, }, { .callback = init_nvs_nosave, .ident = "Asus K54HR", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), }, }, {}, }; static void acpi_sleep_dmi_check(void) { dmi_check_system(acpisleep_dmi_table); } /** * acpi_pm_freeze - Disable the GPEs and suspend EC transactions. */ static int acpi_pm_freeze(void) { acpi_disable_all_gpes(); acpi_os_wait_events_complete(NULL); acpi_ec_block_transactions(); return 0; } /** * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS. */ static int acpi_pm_pre_suspend(void) { acpi_pm_freeze(); return suspend_nvs_save(); } /** * __acpi_pm_prepare - Prepare the platform to enter the target state. * * If necessary, set the firmware waking vector and do arch-specific * nastiness to get the wakeup code to the waking vector. */ static int __acpi_pm_prepare(void) { int error = acpi_sleep_prepare(acpi_target_sleep_state); if (error) acpi_target_sleep_state = ACPI_STATE_S0; return error; } /** * acpi_pm_prepare - Prepare the platform to enter the target sleep * state and disable the GPEs. */ static int acpi_pm_prepare(void) { int error = __acpi_pm_prepare(); if (!error) error = acpi_pm_pre_suspend(); return error; } /** * acpi_pm_finish - Instruct the platform to leave a sleep state. * * This is called after we wake back up (or if entering the sleep state * failed). */ static void acpi_pm_finish(void) { u32 acpi_state = acpi_target_sleep_state; acpi_ec_unblock_transactions(); suspend_nvs_free(); if (acpi_state == ACPI_STATE_S0) return; printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n", acpi_state); acpi_disable_wakeup_devices(acpi_state); acpi_leave_sleep_state(acpi_state); /* reset firmware waking vector */ acpi_set_firmware_waking_vector((acpi_physical_address) 0); acpi_target_sleep_state = ACPI_STATE_S0; } /** * acpi_pm_end - Finish up suspend sequence. */ static void acpi_pm_end(void) { /* * This is necessary in case acpi_pm_finish() is not called during a * failing transition to a sleep state. */ acpi_target_sleep_state = ACPI_STATE_S0; acpi_sleep_tts_switch(acpi_target_sleep_state); } #else /* !CONFIG_ACPI_SLEEP */ #define acpi_target_sleep_state ACPI_STATE_S0 static inline void acpi_sleep_dmi_check(void) {} #endif /* CONFIG_ACPI_SLEEP */ #ifdef CONFIG_SUSPEND static u32 acpi_suspend_states[] = { [PM_SUSPEND_ON] = ACPI_STATE_S0, [PM_SUSPEND_STANDBY] = ACPI_STATE_S1, [PM_SUSPEND_MEM] = ACPI_STATE_S3, [PM_SUSPEND_MAX] = ACPI_STATE_S5 }; /** * acpi_suspend_begin - Set the target system sleep state to the state * associated with given @pm_state, if supported. */ static int acpi_suspend_begin(suspend_state_t pm_state) { u32 acpi_state = acpi_suspend_states[pm_state]; int error = 0; error = nvs_nosave ? 0 : suspend_nvs_alloc(); if (error) return error; if (sleep_states[acpi_state]) { acpi_target_sleep_state = acpi_state; acpi_sleep_tts_switch(acpi_target_sleep_state); } else { printk(KERN_ERR "ACPI does not support this state: %d\n", pm_state); error = -ENOSYS; } return error; } /** * acpi_suspend_enter - Actually enter a sleep state. * @pm_state: ignored * * Flush caches and go to sleep. For STR we have to call arch-specific * assembly, which in turn call acpi_enter_sleep_state(). * It's unfortunate, but it works. Please fix if you're feeling frisky. */ static int acpi_suspend_enter(suspend_state_t pm_state) { acpi_status status = AE_OK; u32 acpi_state = acpi_target_sleep_state; int error; ACPI_FLUSH_CPU_CACHE(); switch (acpi_state) { case ACPI_STATE_S1: barrier(); status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags); break; case ACPI_STATE_S3: error = acpi_suspend_lowlevel(); if (error) return error; pr_info(PREFIX "Low-level resume complete\n"); break; } /* This violates the spec but is required for bug compatibility. */ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); /* Reprogram control registers and execute _BFS */ acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags); /* ACPI 3.0 specs (P62) says that it's the responsibility * of the OSPM to clear the status bit [ implying that the * POWER_BUTTON event should not reach userspace ] */ if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) acpi_clear_event(ACPI_EVENT_POWER_BUTTON); /* * Disable and clear GPE status before interrupt is enabled. Some GPEs * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. * acpi_leave_sleep_state will reenable specific GPEs later */ acpi_disable_all_gpes(); /* Allow EC transactions to happen. */ acpi_ec_unblock_transactions_early(); suspend_nvs_restore(); return ACPI_SUCCESS(status) ? 0 : -EFAULT; } static int acpi_suspend_state_valid(suspend_state_t pm_state) { u32 acpi_state; switch (pm_state) { case PM_SUSPEND_ON: case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: acpi_state = acpi_suspend_states[pm_state]; return sleep_states[acpi_state]; default: return 0; } } static const struct platform_suspend_ops acpi_suspend_ops = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin, .prepare_late = acpi_pm_prepare, .enter = acpi_suspend_enter, .wake = acpi_pm_finish, .end = acpi_pm_end, }; /** * acpi_suspend_begin_old - Set the target system sleep state to the * state associated with given @pm_state, if supported, and * execute the _PTS control method. This function is used if the * pre-ACPI 2.0 suspend ordering has been requested. */ static int acpi_suspend_begin_old(suspend_state_t pm_state) { int error = acpi_suspend_begin(pm_state); if (!error) error = __acpi_pm_prepare(); return error; } /* * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. */ static const struct platform_suspend_ops acpi_suspend_ops_old = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin_old, .prepare_late = acpi_pm_pre_suspend, .enter = acpi_suspend_enter, .wake = acpi_pm_finish, .end = acpi_pm_end, .recover = acpi_pm_finish, }; #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATION static unsigned long s4_hardware_signature; static struct acpi_table_facs *facs; static bool nosigcheck; void __init acpi_no_s4_hw_signature(void) { nosigcheck = true; } static int acpi_hibernation_begin(void) { int error; error = nvs_nosave ? 0 : suspend_nvs_alloc(); if (!error) { acpi_target_sleep_state = ACPI_STATE_S4; acpi_sleep_tts_switch(acpi_target_sleep_state); } return error; } static int acpi_hibernation_enter(void) { acpi_status status = AE_OK; ACPI_FLUSH_CPU_CACHE(); /* This shouldn't return. If it returns, we have a problem */ status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags); /* Reprogram control registers and execute _BFS */ acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); return ACPI_SUCCESS(status) ? 0 : -EFAULT; } static void acpi_hibernation_leave(void) { /* * If ACPI is not enabled by the BIOS and the boot kernel, we need to * enable it here. */ acpi_enable(); /* Reprogram control registers and execute _BFS */ acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); /* Check the hardware signature */ if (facs && s4_hardware_signature != facs->hardware_signature) { printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " "cannot resume!\n"); panic("ACPI S4 hardware signature mismatch"); } /* Restore the NVS memory area */ suspend_nvs_restore(); /* Allow EC transactions to happen. */ acpi_ec_unblock_transactions_early(); } static void acpi_pm_thaw(void) { acpi_ec_unblock_transactions(); acpi_enable_all_runtime_gpes(); } static const struct platform_hibernation_ops acpi_hibernation_ops = { .begin = acpi_hibernation_begin, .end = acpi_pm_end, .pre_snapshot = acpi_pm_prepare, .finish = acpi_pm_finish, .prepare = acpi_pm_prepare, .enter = acpi_hibernation_enter, .leave = acpi_hibernation_leave, .pre_restore = acpi_pm_freeze, .restore_cleanup = acpi_pm_thaw, }; /** * acpi_hibernation_begin_old - Set the target system sleep state to * ACPI_STATE_S4 and execute the _PTS control method. This * function is used if the pre-ACPI 2.0 suspend ordering has been * requested. */ static int acpi_hibernation_begin_old(void) { int error; /* * The _TTS object should always be evaluated before the _PTS object. * When the old_suspended_ordering is true, the _PTS object is * evaluated in the acpi_sleep_prepare. */ acpi_sleep_tts_switch(ACPI_STATE_S4); error = acpi_sleep_prepare(ACPI_STATE_S4); if (!error) { if (!nvs_nosave) error = suspend_nvs_alloc(); if (!error) acpi_target_sleep_state = ACPI_STATE_S4; } return error; } /* * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. */ static const struct platform_hibernation_ops acpi_hibernation_ops_old = { .begin = acpi_hibernation_begin_old, .end = acpi_pm_end, .pre_snapshot = acpi_pm_pre_suspend, .prepare = acpi_pm_freeze, .finish = acpi_pm_finish, .enter = acpi_hibernation_enter, .leave = acpi_hibernation_leave, .pre_restore = acpi_pm_freeze, .restore_cleanup = acpi_pm_thaw, .recover = acpi_pm_finish, }; #endif /* CONFIG_HIBERNATION */ int acpi_suspend(u32 acpi_state) { suspend_state_t states[] = { [1] = PM_SUSPEND_STANDBY, [3] = PM_SUSPEND_MEM, [5] = PM_SUSPEND_MAX }; if (acpi_state < 6 && states[acpi_state]) return pm_suspend(states[acpi_state]); if (acpi_state == 4) return hibernate(); return -EINVAL; } #ifdef CONFIG_PM /** * acpi_pm_device_sleep_state - return preferred power state of ACPI device * in the system sleep state given by %acpi_target_sleep_state * @dev: device to examine; its driver model wakeup flags control * whether it should be able to wake up the system * @d_min_p: used to store the upper limit of allowed states range * Return value: preferred power state of the device on success, -ENODEV on * failure (ie. if there's no 'struct acpi_device' for @dev) * * Find the lowest power (highest number) ACPI device power state that * device @dev can be in while the system is in the sleep state represented * by %acpi_target_sleep_state. If @wake is nonzero, the device should be * able to wake up the system from this sleep state. If @d_min_p is set, * the highest power (lowest number) device power state of @dev allowed * in this system sleep state is stored at the location pointed to by it. * * The caller must ensure that @dev is valid before using this function. * The caller is also responsible for figuring out if the device is * supposed to be able to wake up the system and passing this information * via @wake. */ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p) { acpi_handle handle = DEVICE_ACPI_HANDLE(dev); struct acpi_device *adev; char acpi_method[] = "_SxD"; unsigned long long d_min, d_max; if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { printk(KERN_DEBUG "ACPI handle has no context!\n"); return -ENODEV; } acpi_method[2] = '0' + acpi_target_sleep_state; /* * If the sleep state is S0, we will return D3, but if the device has * _S0W, we will use the value from _S0W */ d_min = ACPI_STATE_D0; d_max = ACPI_STATE_D3; /* * If present, _SxD methods return the minimum D-state (highest power * state) we can use for the corresponding S-states. Otherwise, the * minimum D-state is D0 (ACPI 3.x). * * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer * provided -- that's our fault recovery, we ignore retval. */ if (acpi_target_sleep_state > ACPI_STATE_S0) acpi_evaluate_integer(handle, acpi_method, NULL, &d_min); /* * If _PRW says we can wake up the system from the target sleep state, * the D-state returned by _SxD is sufficient for that (we assume a * wakeup-aware driver if wake is set). Still, if _SxW exists * (ACPI 3.x), it should return the maximum (lowest power) D-state that * can wake the system. _S0W may be valid, too. */ if (acpi_target_sleep_state == ACPI_STATE_S0 || (device_may_wakeup(dev) && adev->wakeup.flags.valid && adev->wakeup.sleep_state >= acpi_target_sleep_state)) { acpi_status status; acpi_method[3] = 'W'; status = acpi_evaluate_integer(handle, acpi_method, NULL, &d_max); if (ACPI_FAILURE(status)) { if (acpi_target_sleep_state != ACPI_STATE_S0 || status != AE_NOT_FOUND) d_max = d_min; } else if (d_max < d_min) { /* Warn the user of the broken DSDT */ printk(KERN_WARNING "ACPI: Wrong value from %s\n", acpi_method); /* Sanitize it */ d_min = d_max; } } if (d_min_p) *d_min_p = d_min; return d_max; } #endif /* CONFIG_PM */ #ifdef CONFIG_PM_SLEEP /** * acpi_pm_device_run_wake - Enable/disable wake-up for given device. * @phys_dev: Device to enable/disable the platform to wake-up the system for. * @enable: Whether enable or disable the wake-up functionality. * * Find the ACPI device object corresponding to @pci_dev and try to * enable/disable the GPE associated with it. */ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) { struct acpi_device *dev; acpi_handle handle; if (!device_run_wake(phys_dev)) return -EINVAL; handle = DEVICE_ACPI_HANDLE(phys_dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) { dev_dbg(phys_dev, "ACPI handle has no context in %s!\n", __func__); return -ENODEV; } if (enable) { acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0); acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); } else { acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); acpi_disable_wakeup_device_power(dev); } return 0; } /** * acpi_pm_device_sleep_wake - enable or disable the system wake-up * capability of given device * @dev: device to handle * @enable: 'true' - enable, 'false' - disable the wake-up capability */ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) { acpi_handle handle; struct acpi_device *adev; int error; if (!device_can_wakeup(dev)) return -EINVAL; handle = DEVICE_ACPI_HANDLE(dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { dev_dbg(dev, "ACPI handle has no context in %s!\n", __func__); return -ENODEV; } error = enable ? acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : acpi_disable_wakeup_device_power(adev); if (!error) dev_info(dev, "wake-up capability %s by ACPI\n", enable ? "enabled" : "disabled"); return error; } #endif /* CONFIG_PM_SLEEP */ static void acpi_power_off_prepare(void) { /* Prepare to power off the system */ acpi_sleep_prepare(ACPI_STATE_S5); acpi_disable_all_gpes(); } static void acpi_power_off(void) { /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ printk(KERN_DEBUG "%s called\n", __func__); local_irq_disable(); acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags); } /* * ACPI 2.0 created the optional _GTS and _BFS, * but industry adoption has been neither rapid nor broad. * * Linux gets into trouble when it executes poorly validated * paths through the BIOS, so disable _GTS and _BFS by default, * but do speak up and offer the option to enable them. */ static void __init acpi_gts_bfs_check(void) { acpi_handle dummy; if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy))) { printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n"); printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, " "please notify linux-acpi@vger.kernel.org\n"); } if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy))) { printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n"); printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, " "please notify linux-acpi@vger.kernel.org\n"); } } int __init acpi_sleep_init(void) { acpi_status status; u8 type_a, type_b; #ifdef CONFIG_SUSPEND int i = 0; #endif if (acpi_disabled) return 0; acpi_sleep_dmi_check(); sleep_states[ACPI_STATE_S0] = 1; printk(KERN_INFO PREFIX "(supports S0"); #ifdef CONFIG_SUSPEND for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) { status = acpi_get_sleep_type_data(i, &type_a, &type_b); if (ACPI_SUCCESS(status)) { sleep_states[i] = 1; printk(" S%d", i); } } suspend_set_ops(old_suspend_ordering ? &acpi_suspend_ops_old : &acpi_suspend_ops); #endif #ifdef CONFIG_HIBERNATION status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); if (ACPI_SUCCESS(status)) { hibernation_set_ops(old_suspend_ordering ? &acpi_hibernation_ops_old : &acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; printk(" S4"); if (!nosigcheck) { acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs); if (facs) s4_hardware_signature = facs->hardware_signature; } } #endif status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); if (ACPI_SUCCESS(status)) { sleep_states[ACPI_STATE_S5] = 1; printk(" S5"); pm_power_off_prepare = acpi_power_off_prepare; pm_power_off = acpi_power_off; } printk(")\n"); /* * Register the tts_notifier to reboot notifier list so that the _TTS * object can also be evaluated when the system enters S5. */ register_reboot_notifier(&tts_notifier); acpi_gts_bfs_check(); return 0; }
gpl-2.0
thenameisnigel/android_kernel_lge_ls840
lib/atomic64_test.c
2775
3562
/* * Testsuite for atomic64_t functions * * Copyright © 2010 Luca Barbieri * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/atomic.h> #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) static __init int test_atomic64(void) { long long v0 = 0xaaa31337c001d00dLL; long long v1 = 0xdeadbeefdeafcafeLL; long long v2 = 0xfaceabadf00df001LL; long long onestwos = 0x1111111122222222LL; long long one = 1LL; atomic64_t v = ATOMIC64_INIT(v0); long long r = v0; BUG_ON(v.counter != r); atomic64_set(&v, v1); r = v1; BUG_ON(v.counter != r); BUG_ON(atomic64_read(&v) != r); INIT(v0); atomic64_add(onestwos, &v); r += onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_add(-one, &v); r += -one; BUG_ON(v.counter != r); INIT(v0); r += onestwos; BUG_ON(atomic64_add_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r += -one; BUG_ON(atomic64_add_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_sub(onestwos, &v); r -= onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_sub(-one, &v); r -= -one; BUG_ON(v.counter != r); INIT(v0); r -= onestwos; BUG_ON(atomic64_sub_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r -= -one; BUG_ON(atomic64_sub_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_inc(&v); r += one; BUG_ON(v.counter != r); INIT(v0); r += one; BUG_ON(atomic64_inc_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_dec(&v); r -= one; BUG_ON(v.counter != r); INIT(v0); r -= one; BUG_ON(atomic64_dec_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_xchg(&v, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_add_unless(&v, one, v0)); BUG_ON(v.counter != r); INIT(v0); BUG_ON(!atomic64_add_unless(&v, one, v1)); r += one; BUG_ON(v.counter != r); #if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM) INIT(onestwos); BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); r -= one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_dec_if_positive(&v) != -one); BUG_ON(v.counter != r); INIT(-one); BUG_ON(atomic64_dec_if_positive(&v) != (-one - one)); BUG_ON(v.counter != r); #else #warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above #endif INIT(onestwos); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_inc_not_zero(&v)); BUG_ON(v.counter != r); INIT(-one); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); #ifdef CONFIG_X86 printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n", #ifdef CONFIG_X86_64 "x86-64", #elif defined(CONFIG_X86_CMPXCHG64) "i586+", #else "i386+", #endif boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without", boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without"); #else printk(KERN_INFO "atomic64 test passed\n"); #endif return 0; } core_initcall(test_atomic64);
gpl-2.0
tadeas482/android_kernel_u8500
drivers/media/dvb/dvb-usb/a800.c
2775
5958
/* DVB USB framework compliant Linux driver for the AVerMedia AverTV DVB-T * USB2.0 (A800) DVB-T receiver. * * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de) * * Thanks to * - AVerMedia who kindly provided information and * - Glen Harris who suffered from my mistakes during development. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "dibusb.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (rc=1 (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define deb_rc(args...) dprintk(debug,0x01,args) static int a800_power_ctrl(struct dvb_usb_device *d, int onoff) { /* do nothing for the AVerMedia */ return 0; } /* assure to put cold to 0 for iManufacturer == 1 */ static int a800_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { *cold = udev->descriptor.iManufacturer != 1; return 0; } static struct rc_map_table rc_map_a800_table[] = { { 0x0201, KEY_MODE }, /* SOURCE */ { 0x0200, KEY_POWER2 }, /* POWER */ { 0x0205, KEY_1 }, /* 1 */ { 0x0206, KEY_2 }, /* 2 */ { 0x0207, KEY_3 }, /* 3 */ { 0x0209, KEY_4 }, /* 4 */ { 0x020a, KEY_5 }, /* 5 */ { 0x020b, KEY_6 }, /* 6 */ { 0x020d, KEY_7 }, /* 7 */ { 0x020e, KEY_8 }, /* 8 */ { 0x020f, KEY_9 }, /* 9 */ { 0x0212, KEY_LEFT }, /* L / DISPLAY */ { 0x0211, KEY_0 }, /* 0 */ { 0x0213, KEY_RIGHT }, /* R / CH RTN */ { 0x0217, KEY_CAMERA }, /* SNAP SHOT */ { 0x0210, KEY_LAST }, /* 16-CH PREV */ { 0x021e, KEY_VOLUMEDOWN }, /* VOL DOWN */ { 0x020c, KEY_ZOOM }, /* FULL SCREEN */ { 0x021f, KEY_VOLUMEUP }, /* VOL UP */ { 0x0214, KEY_MUTE }, /* MUTE */ { 0x0208, KEY_AUDIO }, /* AUDIO */ { 0x0219, KEY_RECORD }, /* RECORD */ { 0x0218, KEY_PLAY }, /* PLAY */ { 0x021b, KEY_STOP }, /* STOP */ { 0x021a, KEY_PLAYPAUSE }, /* TIMESHIFT / PAUSE */ { 0x021d, KEY_BACK }, /* << / RED */ { 0x021c, KEY_FORWARD }, /* >> / YELLOW */ { 0x0203, KEY_TEXT }, /* TELETEXT */ { 0x0204, KEY_EPG }, /* EPG */ { 0x0215, KEY_MENU }, /* MENU */ { 0x0303, KEY_CHANNELUP }, /* CH UP */ { 0x0302, KEY_CHANNELDOWN }, /* CH DOWN */ { 0x0301, KEY_FIRST }, /* |<< / GREEN */ { 0x0300, KEY_LAST }, /* >>| / BLUE */ }; static int a800_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { int ret; u8 *key = kmalloc(5, GFP_KERNEL); if (!key) return -ENOMEM; if (usb_control_msg(d->udev,usb_rcvctrlpipe(d->udev,0), 0x04, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, key, 5, 2000) != 5) { ret = -ENODEV; goto out; } /* call the universal NEC remote processor, to find out the key's state and event */ dvb_usb_nec_rc_key_to_event(d,key,event,state); if (key[0] != 0) deb_rc("key: %x %x %x %x %x\n",key[0],key[1],key[2],key[3],key[4]); ret = 0; out: kfree(key); return ret; } /* USB Driver stuff */ static struct dvb_usb_device_properties a800_properties; static int a800_probe(struct usb_interface *intf, const struct usb_device_id *id) { return dvb_usb_device_init(intf, &a800_properties, THIS_MODULE, NULL, adapter_nr); } /* do not change the order of the ID table */ static struct usb_device_id a800_table [] = { /* 00 */ { USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_DVBT_USB2_COLD) }, /* 01 */ { USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_DVBT_USB2_WARM) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, a800_table); static struct dvb_usb_device_properties a800_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-avertv-a800-02.fw", .num_adapters = 1, .adapter = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .streaming_ctrl = dibusb2_0_streaming_ctrl, .pid_filter = dibusb_pid_filter, .pid_filter_ctrl = dibusb_pid_filter_ctrl, .frontend_attach = dibusb_dib3000mc_frontend_attach, .tuner_attach = dibusb_dib3000mc_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x06, .u = { .bulk = { .buffersize = 4096, } } }, .size_of_priv = sizeof(struct dibusb_state), }, }, .power_ctrl = a800_power_ctrl, .identify_state = a800_identify_state, .rc.legacy = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_map_table = rc_map_a800_table, .rc_map_size = ARRAY_SIZE(rc_map_a800_table), .rc_query = a800_rc_query, }, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "AVerMedia AverTV DVB-T USB 2.0 (A800)", { &a800_table[0], NULL }, { &a800_table[1], NULL }, }, } }; static struct usb_driver a800_driver = { .name = "dvb_usb_a800", .probe = a800_probe, .disconnect = dvb_usb_device_exit, .id_table = a800_table, }; /* module stuff */ static int __init a800_module_init(void) { int result; if ((result = usb_register(&a800_driver))) { err("usb_register failed. Error number %d",result); return result; } return 0; } static void __exit a800_module_exit(void) { /* deregister this driver from the USB subsystem */ usb_deregister(&a800_driver); } module_init (a800_module_init); module_exit (a800_module_exit); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); MODULE_DESCRIPTION("AVerMedia AverTV DVB-T USB 2.0 (A800)"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
AzraelsKiss/android_kernel_samsung_smdk4412
drivers/firewire/core-transaction.c
2775
34979
/* * Core IEEE1394 transaction logic * * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/bug.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/idr.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> #include <asm/byteorder.h> #include "core.h" #define HEADER_PRI(pri) ((pri) << 0) #define HEADER_TCODE(tcode) ((tcode) << 4) #define HEADER_RETRY(retry) ((retry) << 8) #define HEADER_TLABEL(tlabel) ((tlabel) << 10) #define HEADER_DESTINATION(destination) ((destination) << 16) #define HEADER_SOURCE(source) ((source) << 16) #define HEADER_RCODE(rcode) ((rcode) << 12) #define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0) #define HEADER_DATA_LENGTH(length) ((length) << 16) #define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0) #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) #define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f) #define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f) #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) #define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff) #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) #define HEADER_DESTINATION_IS_BROADCAST(q) \ (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f)) #define PHY_PACKET_CONFIG 0x0 #define PHY_PACKET_LINK_ON 0x1 #define PHY_PACKET_SELF_ID 0x2 #define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22)) #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) #define PHY_IDENTIFIER(id) ((id) << 30) /* returns 0 if the split timeout handler is already running */ static int try_cancel_split_timeout(struct fw_transaction *t) { if (t->is_split_transaction) return del_timer(&t->split_timeout_timer); else return 1; } static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode) { struct fw_transaction *t; unsigned long flags; spin_lock_irqsave(&card->lock, flags); list_for_each_entry(t, &card->transaction_list, link) { if (t == transaction) { if (!try_cancel_split_timeout(t)) { spin_unlock_irqrestore(&card->lock, flags); goto timed_out; } list_del_init(&t->link); card->tlabel_mask &= ~(1ULL << t->tlabel); break; } } spin_unlock_irqrestore(&card->lock, flags); if (&t->link != &card->transaction_list) { t->callback(card, rcode, NULL, 0, t->callback_data); return 0; } timed_out: return -ENOENT; } /* * Only valid for transactions that are potentially pending (ie have * been sent). */ int fw_cancel_transaction(struct fw_card *card, struct fw_transaction *transaction) { /* * Cancel the packet transmission if it's still queued. That * will call the packet transmission callback which cancels * the transaction. */ if (card->driver->cancel_packet(card, &transaction->packet) == 0) return 0; /* * If the request packet has already been sent, we need to see * if the transaction is still pending and remove it in that case. */ return close_transaction(transaction, card, RCODE_CANCELLED); } EXPORT_SYMBOL(fw_cancel_transaction); static void split_transaction_timeout_callback(unsigned long data) { struct fw_transaction *t = (struct fw_transaction *)data; struct fw_card *card = t->card; unsigned long flags; spin_lock_irqsave(&card->lock, flags); if (list_empty(&t->link)) { spin_unlock_irqrestore(&card->lock, flags); return; } list_del(&t->link); card->tlabel_mask &= ~(1ULL << t->tlabel); spin_unlock_irqrestore(&card->lock, flags); t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); } static void start_split_transaction_timeout(struct fw_transaction *t, struct fw_card *card) { unsigned long flags; spin_lock_irqsave(&card->lock, flags); if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) { spin_unlock_irqrestore(&card->lock, flags); return; } t->is_split_transaction = true; mod_timer(&t->split_timeout_timer, jiffies + card->split_timeout_jiffies); spin_unlock_irqrestore(&card->lock, flags); } static void transmit_complete_callback(struct fw_packet *packet, struct fw_card *card, int status) { struct fw_transaction *t = container_of(packet, struct fw_transaction, packet); switch (status) { case ACK_COMPLETE: close_transaction(t, card, RCODE_COMPLETE); break; case ACK_PENDING: start_split_transaction_timeout(t, card); break; case ACK_BUSY_X: case ACK_BUSY_A: case ACK_BUSY_B: close_transaction(t, card, RCODE_BUSY); break; case ACK_DATA_ERROR: close_transaction(t, card, RCODE_DATA_ERROR); break; case ACK_TYPE_ERROR: close_transaction(t, card, RCODE_TYPE_ERROR); break; default: /* * In this case the ack is really a juju specific * rcode, so just forward that to the callback. */ close_transaction(t, card, status); break; } } static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, int destination_id, int source_id, int generation, int speed, unsigned long long offset, void *payload, size_t length) { int ext_tcode; if (tcode == TCODE_STREAM_DATA) { packet->header[0] = HEADER_DATA_LENGTH(length) | destination_id | HEADER_TCODE(TCODE_STREAM_DATA); packet->header_length = 4; packet->payload = payload; packet->payload_length = length; goto common; } if (tcode > 0x10) { ext_tcode = tcode & ~0x10; tcode = TCODE_LOCK_REQUEST; } else ext_tcode = 0; packet->header[0] = HEADER_RETRY(RETRY_X) | HEADER_TLABEL(tlabel) | HEADER_TCODE(tcode) | HEADER_DESTINATION(destination_id); packet->header[1] = HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id); packet->header[2] = offset; switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: packet->header[3] = *(u32 *)payload; packet->header_length = 16; packet->payload_length = 0; break; case TCODE_LOCK_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: packet->header[3] = HEADER_DATA_LENGTH(length) | HEADER_EXTENDED_TCODE(ext_tcode); packet->header_length = 16; packet->payload = payload; packet->payload_length = length; break; case TCODE_READ_QUADLET_REQUEST: packet->header_length = 12; packet->payload_length = 0; break; case TCODE_READ_BLOCK_REQUEST: packet->header[3] = HEADER_DATA_LENGTH(length) | HEADER_EXTENDED_TCODE(ext_tcode); packet->header_length = 16; packet->payload_length = 0; break; default: WARN(1, "wrong tcode %d\n", tcode); } common: packet->speed = speed; packet->generation = generation; packet->ack = 0; packet->payload_mapped = false; } static int allocate_tlabel(struct fw_card *card) { int tlabel; tlabel = card->current_tlabel; while (card->tlabel_mask & (1ULL << tlabel)) { tlabel = (tlabel + 1) & 0x3f; if (tlabel == card->current_tlabel) return -EBUSY; } card->current_tlabel = (tlabel + 1) & 0x3f; card->tlabel_mask |= 1ULL << tlabel; return tlabel; } /** * fw_send_request() - submit a request packet for transmission * @card: interface to send the request at * @t: transaction instance to which the request belongs * @tcode: transaction code * @destination_id: destination node ID, consisting of bus_ID and phy_ID * @generation: bus generation in which request and response are valid * @speed: transmission speed * @offset: 48bit wide offset into destination's address space * @payload: data payload for the request subaction * @length: length of the payload, in bytes * @callback: function to be called when the transaction is completed * @callback_data: data to be passed to the transaction completion callback * * Submit a request packet into the asynchronous request transmission queue. * Can be called from atomic context. If you prefer a blocking API, use * fw_run_transaction() in a context that can sleep. * * In case of lock requests, specify one of the firewire-core specific %TCODE_ * constants instead of %TCODE_LOCK_REQUEST in @tcode. * * Make sure that the value in @destination_id is not older than the one in * @generation. Otherwise the request is in danger to be sent to a wrong node. * * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller * needs to synthesize @destination_id with fw_stream_packet_destination_id(). * It will contain tag, channel, and sy data instead of a node ID then. * * The payload buffer at @data is going to be DMA-mapped except in case of * @length <= 8 or of local (loopback) requests. Hence make sure that the * buffer complies with the restrictions of the streaming DMA mapping API. * @payload must not be freed before the @callback is called. * * In case of request types without payload, @data is NULL and @length is 0. * * After the transaction is completed successfully or unsuccessfully, the * @callback will be called. Among its parameters is the response code which * is either one of the rcodes per IEEE 1394 or, in case of internal errors, * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION, * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request * generation, or missing ACK respectively. * * Note some timing corner cases: fw_send_request() may complete much earlier * than when the request packet actually hits the wire. On the other hand, * transaction completion and hence execution of @callback may happen even * before fw_send_request() returns. */ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, unsigned long long offset, void *payload, size_t length, fw_transaction_callback_t callback, void *callback_data) { unsigned long flags; int tlabel; /* * Allocate tlabel from the bitmap and put the transaction on * the list while holding the card spinlock. */ spin_lock_irqsave(&card->lock, flags); tlabel = allocate_tlabel(card); if (tlabel < 0) { spin_unlock_irqrestore(&card->lock, flags); callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); return; } t->node_id = destination_id; t->tlabel = tlabel; t->card = card; t->is_split_transaction = false; setup_timer(&t->split_timeout_timer, split_transaction_timeout_callback, (unsigned long)t); t->callback = callback; t->callback_data = callback_data; fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation, speed, offset, payload, length); t->packet.callback = transmit_complete_callback; list_add_tail(&t->link, &card->transaction_list); spin_unlock_irqrestore(&card->lock, flags); card->driver->send_request(card, &t->packet); } EXPORT_SYMBOL(fw_send_request); struct transaction_callback_data { struct completion done; void *payload; int rcode; }; static void transaction_callback(struct fw_card *card, int rcode, void *payload, size_t length, void *data) { struct transaction_callback_data *d = data; if (rcode == RCODE_COMPLETE) memcpy(d->payload, payload, length); d->rcode = rcode; complete(&d->done); } /** * fw_run_transaction() - send request and sleep until transaction is completed * * Returns the RCODE. See fw_send_request() for parameter documentation. * Unlike fw_send_request(), @data points to the payload of the request or/and * to the payload of the response. DMA mapping restrictions apply to outbound * request payloads of >= 8 bytes but not to inbound response payloads. */ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, int generation, int speed, unsigned long long offset, void *payload, size_t length) { struct transaction_callback_data d; struct fw_transaction t; init_timer_on_stack(&t.split_timeout_timer); init_completion(&d.done); d.payload = payload; fw_send_request(card, &t, tcode, destination_id, generation, speed, offset, payload, length, transaction_callback, &d); wait_for_completion(&d.done); destroy_timer_on_stack(&t.split_timeout_timer); return d.rcode; } EXPORT_SYMBOL(fw_run_transaction); static DEFINE_MUTEX(phy_config_mutex); static DECLARE_COMPLETION(phy_config_done); static void transmit_phy_packet_callback(struct fw_packet *packet, struct fw_card *card, int status) { complete(&phy_config_done); } static struct fw_packet phy_config_packet = { .header_length = 12, .header[0] = TCODE_LINK_INTERNAL << 4, .payload_length = 0, .speed = SCODE_100, .callback = transmit_phy_packet_callback, }; void fw_send_phy_config(struct fw_card *card, int node_id, int generation, int gap_count) { long timeout = DIV_ROUND_UP(HZ, 10); u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG); if (node_id != FW_PHY_CONFIG_NO_NODE_ID) data |= PHY_CONFIG_ROOT_ID(node_id); if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) { gap_count = card->driver->read_phy_reg(card, 1); if (gap_count < 0) return; gap_count &= 63; if (gap_count == 63) return; } data |= PHY_CONFIG_GAP_COUNT(gap_count); mutex_lock(&phy_config_mutex); phy_config_packet.header[1] = data; phy_config_packet.header[2] = ~data; phy_config_packet.generation = generation; INIT_COMPLETION(phy_config_done); card->driver->send_request(card, &phy_config_packet); wait_for_completion_timeout(&phy_config_done, timeout); mutex_unlock(&phy_config_mutex); } static struct fw_address_handler *lookup_overlapping_address_handler( struct list_head *list, unsigned long long offset, size_t length) { struct fw_address_handler *handler; list_for_each_entry(handler, list, link) { if (handler->offset < offset + length && offset < handler->offset + handler->length) return handler; } return NULL; } static bool is_enclosing_handler(struct fw_address_handler *handler, unsigned long long offset, size_t length) { return handler->offset <= offset && offset + length <= handler->offset + handler->length; } static struct fw_address_handler *lookup_enclosing_address_handler( struct list_head *list, unsigned long long offset, size_t length) { struct fw_address_handler *handler; list_for_each_entry(handler, list, link) { if (is_enclosing_handler(handler, offset, length)) return handler; } return NULL; } static DEFINE_SPINLOCK(address_handler_lock); static LIST_HEAD(address_handler_list); const struct fw_address_region fw_high_memory_region = { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, }; EXPORT_SYMBOL(fw_high_memory_region); #if 0 const struct fw_address_region fw_low_memory_region = { .start = 0x000000000000ULL, .end = 0x000100000000ULL, }; const struct fw_address_region fw_private_region = { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, }; const struct fw_address_region fw_csr_region = { .start = CSR_REGISTER_BASE, .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, }; const struct fw_address_region fw_unit_space_region = { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; #endif /* 0 */ static bool is_in_fcp_region(u64 offset, size_t length) { return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END); } /** * fw_core_add_address_handler() - register for incoming requests * @handler: callback * @region: region in the IEEE 1212 node space address range * * region->start, ->end, and handler->length have to be quadlet-aligned. * * When a request is received that falls within the specified address range, * the specified callback is invoked. The parameters passed to the callback * give the details of the particular request. * * Return value: 0 on success, non-zero otherwise. * * The start offset of the handler's address region is determined by * fw_core_add_address_handler() and is returned in handler->offset. * * Address allocations are exclusive, except for the FCP registers. */ int fw_core_add_address_handler(struct fw_address_handler *handler, const struct fw_address_region *region) { struct fw_address_handler *other; unsigned long flags; int ret = -EBUSY; if (region->start & 0xffff000000000003ULL || region->start >= region->end || region->end > 0x0001000000000000ULL || handler->length & 3 || handler->length == 0) return -EINVAL; spin_lock_irqsave(&address_handler_lock, flags); handler->offset = region->start; while (handler->offset + handler->length <= region->end) { if (is_in_fcp_region(handler->offset, handler->length)) other = NULL; else other = lookup_overlapping_address_handler (&address_handler_list, handler->offset, handler->length); if (other != NULL) { handler->offset += other->length; } else { list_add_tail(&handler->link, &address_handler_list); ret = 0; break; } } spin_unlock_irqrestore(&address_handler_lock, flags); return ret; } EXPORT_SYMBOL(fw_core_add_address_handler); /** * fw_core_remove_address_handler() - unregister an address handler */ void fw_core_remove_address_handler(struct fw_address_handler *handler) { unsigned long flags; spin_lock_irqsave(&address_handler_lock, flags); list_del(&handler->link); spin_unlock_irqrestore(&address_handler_lock, flags); } EXPORT_SYMBOL(fw_core_remove_address_handler); struct fw_request { struct fw_packet response; u32 request_header[4]; int ack; u32 length; u32 data[0]; }; static void free_response_callback(struct fw_packet *packet, struct fw_card *card, int status) { struct fw_request *request; request = container_of(packet, struct fw_request, response); kfree(request); } int fw_get_response_length(struct fw_request *r) { int tcode, ext_tcode, data_length; tcode = HEADER_GET_TCODE(r->request_header[0]); switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: return 0; case TCODE_READ_QUADLET_REQUEST: return 4; case TCODE_READ_BLOCK_REQUEST: data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); return data_length; case TCODE_LOCK_REQUEST: ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]); data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); switch (ext_tcode) { case EXTCODE_FETCH_ADD: case EXTCODE_LITTLE_ADD: return data_length; default: return data_length / 2; } default: WARN(1, "wrong tcode %d\n", tcode); return 0; } } void fw_fill_response(struct fw_packet *response, u32 *request_header, int rcode, void *payload, size_t length) { int tcode, tlabel, extended_tcode, source, destination; tcode = HEADER_GET_TCODE(request_header[0]); tlabel = HEADER_GET_TLABEL(request_header[0]); source = HEADER_GET_DESTINATION(request_header[0]); destination = HEADER_GET_SOURCE(request_header[1]); extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]); response->header[0] = HEADER_RETRY(RETRY_1) | HEADER_TLABEL(tlabel) | HEADER_DESTINATION(destination); response->header[1] = HEADER_SOURCE(source) | HEADER_RCODE(rcode); response->header[2] = 0; switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE); response->header_length = 12; response->payload_length = 0; break; case TCODE_READ_QUADLET_REQUEST: response->header[0] |= HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE); if (payload != NULL) response->header[3] = *(u32 *)payload; else response->header[3] = 0; response->header_length = 16; response->payload_length = 0; break; case TCODE_READ_BLOCK_REQUEST: case TCODE_LOCK_REQUEST: response->header[0] |= HEADER_TCODE(tcode + 2); response->header[3] = HEADER_DATA_LENGTH(length) | HEADER_EXTENDED_TCODE(extended_tcode); response->header_length = 16; response->payload = payload; response->payload_length = length; break; default: WARN(1, "wrong tcode %d\n", tcode); } response->payload_mapped = false; } EXPORT_SYMBOL(fw_fill_response); static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp) { unsigned int cycles; u32 timestamp; cycles = card->split_timeout_cycles; cycles += request_timestamp & 0x1fff; timestamp = request_timestamp & ~0x1fff; timestamp += (cycles / 8000) << 13; timestamp |= cycles % 8000; return timestamp; } static struct fw_request *allocate_request(struct fw_card *card, struct fw_packet *p) { struct fw_request *request; u32 *data, length; int request_tcode; request_tcode = HEADER_GET_TCODE(p->header[0]); switch (request_tcode) { case TCODE_WRITE_QUADLET_REQUEST: data = &p->header[3]; length = 4; break; case TCODE_WRITE_BLOCK_REQUEST: case TCODE_LOCK_REQUEST: data = p->payload; length = HEADER_GET_DATA_LENGTH(p->header[3]); break; case TCODE_READ_QUADLET_REQUEST: data = NULL; length = 4; break; case TCODE_READ_BLOCK_REQUEST: data = NULL; length = HEADER_GET_DATA_LENGTH(p->header[3]); break; default: fw_error("ERROR - corrupt request received - %08x %08x %08x\n", p->header[0], p->header[1], p->header[2]); return NULL; } request = kmalloc(sizeof(*request) + length, GFP_ATOMIC); if (request == NULL) return NULL; request->response.speed = p->speed; request->response.timestamp = compute_split_timeout_timestamp(card, p->timestamp); request->response.generation = p->generation; request->response.ack = 0; request->response.callback = free_response_callback; request->ack = p->ack; request->length = length; if (data) memcpy(request->data, data, length); memcpy(request->request_header, p->header, sizeof(p->header)); return request; } void fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) { if (WARN_ONCE(!request, "invalid for FCP address handlers")) return; /* unified transaction or broadcast transaction: don't respond */ if (request->ack != ACK_PENDING || HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) { kfree(request); return; } if (rcode == RCODE_COMPLETE) fw_fill_response(&request->response, request->request_header, rcode, request->data, fw_get_response_length(request)); else fw_fill_response(&request->response, request->request_header, rcode, NULL, 0); card->driver->send_response(card, &request->response); } EXPORT_SYMBOL(fw_send_response); static void handle_exclusive_region_request(struct fw_card *card, struct fw_packet *p, struct fw_request *request, unsigned long long offset) { struct fw_address_handler *handler; unsigned long flags; int tcode, destination, source; destination = HEADER_GET_DESTINATION(p->header[0]); source = HEADER_GET_SOURCE(p->header[1]); tcode = HEADER_GET_TCODE(p->header[0]); if (tcode == TCODE_LOCK_REQUEST) tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); spin_lock_irqsave(&address_handler_lock, flags); handler = lookup_enclosing_address_handler(&address_handler_list, offset, request->length); spin_unlock_irqrestore(&address_handler_lock, flags); /* * FIXME: lookup the fw_node corresponding to the sender of * this request and pass that to the address handler instead * of the node ID. We may also want to move the address * allocations to fw_node so we only do this callback if the * upper layers registered it for this node. */ if (handler == NULL) fw_send_response(card, request, RCODE_ADDRESS_ERROR); else handler->address_callback(card, request, tcode, destination, source, p->generation, offset, request->data, request->length, handler->callback_data); } static void handle_fcp_region_request(struct fw_card *card, struct fw_packet *p, struct fw_request *request, unsigned long long offset) { struct fw_address_handler *handler; unsigned long flags; int tcode, destination, source; if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) || request->length > 0x200) { fw_send_response(card, request, RCODE_ADDRESS_ERROR); return; } tcode = HEADER_GET_TCODE(p->header[0]); destination = HEADER_GET_DESTINATION(p->header[0]); source = HEADER_GET_SOURCE(p->header[1]); if (tcode != TCODE_WRITE_QUADLET_REQUEST && tcode != TCODE_WRITE_BLOCK_REQUEST) { fw_send_response(card, request, RCODE_TYPE_ERROR); return; } spin_lock_irqsave(&address_handler_lock, flags); list_for_each_entry(handler, &address_handler_list, link) { if (is_enclosing_handler(handler, offset, request->length)) handler->address_callback(card, NULL, tcode, destination, source, p->generation, offset, request->data, request->length, handler->callback_data); } spin_unlock_irqrestore(&address_handler_lock, flags); fw_send_response(card, request, RCODE_COMPLETE); } void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) { struct fw_request *request; unsigned long long offset; if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) return; if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) { fw_cdev_handle_phy_packet(card, p); return; } request = allocate_request(card, p); if (request == NULL) { /* FIXME: send statically allocated busy packet. */ return; } offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2]; if (!is_in_fcp_region(offset, request->length)) handle_exclusive_region_request(card, p, request, offset); else handle_fcp_region_request(card, p, request, offset); } EXPORT_SYMBOL(fw_core_handle_request); void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) { struct fw_transaction *t; unsigned long flags; u32 *data; size_t data_length; int tcode, tlabel, source, rcode; tcode = HEADER_GET_TCODE(p->header[0]); tlabel = HEADER_GET_TLABEL(p->header[0]); source = HEADER_GET_SOURCE(p->header[1]); rcode = HEADER_GET_RCODE(p->header[1]); spin_lock_irqsave(&card->lock, flags); list_for_each_entry(t, &card->transaction_list, link) { if (t->node_id == source && t->tlabel == tlabel) { if (!try_cancel_split_timeout(t)) { spin_unlock_irqrestore(&card->lock, flags); goto timed_out; } list_del_init(&t->link); card->tlabel_mask &= ~(1ULL << t->tlabel); break; } } spin_unlock_irqrestore(&card->lock, flags); if (&t->link == &card->transaction_list) { timed_out: fw_notify("Unsolicited response (source %x, tlabel %x)\n", source, tlabel); return; } /* * FIXME: sanity check packet, is length correct, does tcodes * and addresses match. */ switch (tcode) { case TCODE_READ_QUADLET_RESPONSE: data = (u32 *) &p->header[3]; data_length = 4; break; case TCODE_WRITE_RESPONSE: data = NULL; data_length = 0; break; case TCODE_READ_BLOCK_RESPONSE: case TCODE_LOCK_RESPONSE: data = p->payload; data_length = HEADER_GET_DATA_LENGTH(p->header[3]); break; default: /* Should never happen, this is just to shut up gcc. */ data = NULL; data_length = 0; break; } /* * The response handler may be executed while the request handler * is still pending. Cancel the request handler. */ card->driver->cancel_packet(card, &t->packet); t->callback(card, rcode, data, data_length, t->callback_data); } EXPORT_SYMBOL(fw_core_handle_response); static const struct fw_address_region topology_map_region = { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; static void handle_topology_map(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, unsigned long long offset, void *payload, size_t length, void *callback_data) { int start; if (!TCODE_IS_READ_REQUEST(tcode)) { fw_send_response(card, request, RCODE_TYPE_ERROR); return; } if ((offset & 3) > 0 || (length & 3) > 0) { fw_send_response(card, request, RCODE_ADDRESS_ERROR); return; } start = (offset - topology_map_region.start) / 4; memcpy(payload, &card->topology_map[start], length); fw_send_response(card, request, RCODE_COMPLETE); } static struct fw_address_handler topology_map = { .length = 0x400, .address_callback = handle_topology_map, }; static const struct fw_address_region registers_region = { .start = CSR_REGISTER_BASE, .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; static void update_split_timeout(struct fw_card *card) { unsigned int cycles; cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19); cycles = max(cycles, 800u); /* minimum as per the spec */ cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */ card->split_timeout_cycles = cycles; card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000); } static void handle_registers(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, unsigned long long offset, void *payload, size_t length, void *callback_data) { int reg = offset & ~CSR_REGISTER_BASE; __be32 *data = payload; int rcode = RCODE_COMPLETE; unsigned long flags; switch (reg) { case CSR_PRIORITY_BUDGET: if (!card->priority_budget_implemented) { rcode = RCODE_ADDRESS_ERROR; break; } /* else fall through */ case CSR_NODE_IDS: /* * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8 * and 9.6, but interoperable with IEEE 1394.1-2004 bridges */ /* fall through */ case CSR_STATE_CLEAR: case CSR_STATE_SET: case CSR_CYCLE_TIME: case CSR_BUS_TIME: case CSR_BUSY_TIMEOUT: if (tcode == TCODE_READ_QUADLET_REQUEST) *data = cpu_to_be32(card->driver->read_csr(card, reg)); else if (tcode == TCODE_WRITE_QUADLET_REQUEST) card->driver->write_csr(card, reg, be32_to_cpu(*data)); else rcode = RCODE_TYPE_ERROR; break; case CSR_RESET_START: if (tcode == TCODE_WRITE_QUADLET_REQUEST) card->driver->write_csr(card, CSR_STATE_CLEAR, CSR_STATE_BIT_ABDICATE); else rcode = RCODE_TYPE_ERROR; break; case CSR_SPLIT_TIMEOUT_HI: if (tcode == TCODE_READ_QUADLET_REQUEST) { *data = cpu_to_be32(card->split_timeout_hi); } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { spin_lock_irqsave(&card->lock, flags); card->split_timeout_hi = be32_to_cpu(*data) & 7; update_split_timeout(card); spin_unlock_irqrestore(&card->lock, flags); } else { rcode = RCODE_TYPE_ERROR; } break; case CSR_SPLIT_TIMEOUT_LO: if (tcode == TCODE_READ_QUADLET_REQUEST) { *data = cpu_to_be32(card->split_timeout_lo); } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { spin_lock_irqsave(&card->lock, flags); card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000; update_split_timeout(card); spin_unlock_irqrestore(&card->lock, flags); } else { rcode = RCODE_TYPE_ERROR; } break; case CSR_MAINT_UTILITY: if (tcode == TCODE_READ_QUADLET_REQUEST) *data = card->maint_utility_register; else if (tcode == TCODE_WRITE_QUADLET_REQUEST) card->maint_utility_register = *data; else rcode = RCODE_TYPE_ERROR; break; case CSR_BROADCAST_CHANNEL: if (tcode == TCODE_READ_QUADLET_REQUEST) *data = cpu_to_be32(card->broadcast_channel); else if (tcode == TCODE_WRITE_QUADLET_REQUEST) card->broadcast_channel = (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) | BROADCAST_CHANNEL_INITIAL; else rcode = RCODE_TYPE_ERROR; break; case CSR_BUS_MANAGER_ID: case CSR_BANDWIDTH_AVAILABLE: case CSR_CHANNELS_AVAILABLE_HI: case CSR_CHANNELS_AVAILABLE_LO: /* * FIXME: these are handled by the OHCI hardware and * the stack never sees these request. If we add * support for a new type of controller that doesn't * handle this in hardware we need to deal with these * transactions. */ BUG(); break; default: rcode = RCODE_ADDRESS_ERROR; break; } fw_send_response(card, request, rcode); } static struct fw_address_handler registers = { .length = 0x400, .address_callback = handle_registers, }; MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); MODULE_DESCRIPTION("Core IEEE1394 transaction logic"); MODULE_LICENSE("GPL"); static const u32 vendor_textual_descriptor[] = { /* textual descriptor leaf () */ 0x00060000, 0x00000000, 0x00000000, 0x4c696e75, /* L i n u */ 0x78204669, /* x F i */ 0x72657769, /* r e w i */ 0x72650000, /* r e */ }; static const u32 model_textual_descriptor[] = { /* model descriptor leaf () */ 0x00030000, 0x00000000, 0x00000000, 0x4a756a75, /* J u j u */ }; static struct fw_descriptor vendor_id_descriptor = { .length = ARRAY_SIZE(vendor_textual_descriptor), .immediate = 0x03d00d1e, .key = 0x81000000, .data = vendor_textual_descriptor, }; static struct fw_descriptor model_id_descriptor = { .length = ARRAY_SIZE(model_textual_descriptor), .immediate = 0x17000001, .key = 0x81000000, .data = model_textual_descriptor, }; static int __init fw_core_init(void) { int ret; fw_workqueue = alloc_workqueue("firewire", WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); if (!fw_workqueue) return -ENOMEM; ret = bus_register(&fw_bus_type); if (ret < 0) { destroy_workqueue(fw_workqueue); return ret; } fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); if (fw_cdev_major < 0) { bus_unregister(&fw_bus_type); destroy_workqueue(fw_workqueue); return fw_cdev_major; } fw_core_add_address_handler(&topology_map, &topology_map_region); fw_core_add_address_handler(&registers, &registers_region); fw_core_add_descriptor(&vendor_id_descriptor); fw_core_add_descriptor(&model_id_descriptor); return 0; } static void __exit fw_core_cleanup(void) { unregister_chrdev(fw_cdev_major, "firewire"); bus_unregister(&fw_bus_type); destroy_workqueue(fw_workqueue); idr_destroy(&fw_device_idr); } module_init(fw_core_init); module_exit(fw_core_cleanup);
gpl-2.0
TeamGlade-Devices/android_kernel_htc_pico
sound/soc/codecs/cs42l51.c
3031
18357
/* * cs42l51.c * * ASoC Driver for Cirrus Logic CS42L51 codecs * * Copyright (c) 2010 Arnaud Patard <apatard@mandriva.com> * * Based on cs4270.c - Copyright (c) Freescale Semiconductor * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * For now: * - Only I2C is support. Not SPI * - master mode *NOT* supported */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/tlv.h> #include <sound/initval.h> #include <sound/pcm_params.h> #include <sound/pcm.h> #include <linux/i2c.h> #include "cs42l51.h" enum master_slave_mode { MODE_SLAVE, MODE_SLAVE_AUTO, MODE_MASTER, }; struct cs42l51_private { enum snd_soc_control_type control_type; void *control_data; unsigned int mclk; unsigned int audio_mode; /* The mode (I2S or left-justified) */ enum master_slave_mode func; }; #define CS42L51_FORMATS ( \ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | \ SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE | \ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE | \ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE) static int cs42l51_fill_cache(struct snd_soc_codec *codec) { u8 *cache = codec->reg_cache + 1; struct i2c_client *i2c_client = codec->control_data; s32 length; length = i2c_smbus_read_i2c_block_data(i2c_client, CS42L51_FIRSTREG | 0x80, CS42L51_NUMREGS, cache); if (length != CS42L51_NUMREGS) { dev_err(&i2c_client->dev, "I2C read failure, addr=0x%x (ret=%d vs %d)\n", i2c_client->addr, length, CS42L51_NUMREGS); return -EIO; } return 0; } static int cs42l51_get_chan_mix(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned long value = snd_soc_read(codec, CS42L51_PCM_MIXER)&3; switch (value) { default: case 0: ucontrol->value.integer.value[0] = 0; break; /* same value : (L+R)/2 and (R+L)/2 */ case 1: case 2: ucontrol->value.integer.value[0] = 1; break; case 3: ucontrol->value.integer.value[0] = 2; break; } return 0; } #define CHAN_MIX_NORMAL 0x00 #define CHAN_MIX_BOTH 0x55 #define CHAN_MIX_SWAP 0xFF static int cs42l51_set_chan_mix(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned char val; switch (ucontrol->value.integer.value[0]) { default: case 0: val = CHAN_MIX_NORMAL; break; case 1: val = CHAN_MIX_BOTH; break; case 2: val = CHAN_MIX_SWAP; break; } snd_soc_write(codec, CS42L51_PCM_MIXER, val); return 1; } static const DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -5150, 50, 0); static const DECLARE_TLV_DB_SCALE(tone_tlv, -1050, 150, 0); /* This is a lie. after -102 db, it stays at -102 */ /* maybe a range would be better */ static const DECLARE_TLV_DB_SCALE(aout_tlv, -11550, 50, 0); static const DECLARE_TLV_DB_SCALE(boost_tlv, 1600, 1600, 0); static const char *chan_mix[] = { "L R", "L+R", "R L", }; static const struct soc_enum cs42l51_chan_mix = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(chan_mix), chan_mix); static const struct snd_kcontrol_new cs42l51_snd_controls[] = { SOC_DOUBLE_R_SX_TLV("PCM Playback Volume", CS42L51_PCMA_VOL, CS42L51_PCMB_VOL, 7, 0xffffff99, 0x18, adc_pcm_tlv), SOC_DOUBLE_R("PCM Playback Switch", CS42L51_PCMA_VOL, CS42L51_PCMB_VOL, 7, 1, 1), SOC_DOUBLE_R_SX_TLV("Analog Playback Volume", CS42L51_AOUTA_VOL, CS42L51_AOUTB_VOL, 8, 0xffffff19, 0x18, aout_tlv), SOC_DOUBLE_R_SX_TLV("ADC Mixer Volume", CS42L51_ADCA_VOL, CS42L51_ADCB_VOL, 7, 0xffffff99, 0x18, adc_pcm_tlv), SOC_DOUBLE_R("ADC Mixer Switch", CS42L51_ADCA_VOL, CS42L51_ADCB_VOL, 7, 1, 1), SOC_SINGLE("Playback Deemphasis Switch", CS42L51_DAC_CTL, 3, 1, 0), SOC_SINGLE("Auto-Mute Switch", CS42L51_DAC_CTL, 2, 1, 0), SOC_SINGLE("Soft Ramp Switch", CS42L51_DAC_CTL, 1, 1, 0), SOC_SINGLE("Zero Cross Switch", CS42L51_DAC_CTL, 0, 0, 0), SOC_DOUBLE_TLV("Mic Boost Volume", CS42L51_MIC_CTL, 0, 1, 1, 0, boost_tlv), SOC_SINGLE_TLV("Bass Volume", CS42L51_TONE_CTL, 0, 0xf, 1, tone_tlv), SOC_SINGLE_TLV("Treble Volume", CS42L51_TONE_CTL, 4, 0xf, 1, tone_tlv), SOC_ENUM_EXT("PCM channel mixer", cs42l51_chan_mix, cs42l51_get_chan_mix, cs42l51_set_chan_mix), }; /* * to power down, one must: * 1.) Enable the PDN bit * 2.) enable power-down for the select channels * 3.) disable the PDN bit. */ static int cs42l51_pdn_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { unsigned long value; value = snd_soc_read(w->codec, CS42L51_POWER_CTL1); value &= ~CS42L51_POWER_CTL1_PDN; switch (event) { case SND_SOC_DAPM_PRE_PMD: value |= CS42L51_POWER_CTL1_PDN; break; default: case SND_SOC_DAPM_POST_PMD: break; } snd_soc_update_bits(w->codec, CS42L51_POWER_CTL1, CS42L51_POWER_CTL1_PDN, value); return 0; } static const char *cs42l51_dac_names[] = {"Direct PCM", "DSP PCM", "ADC"}; static const struct soc_enum cs42l51_dac_mux_enum = SOC_ENUM_SINGLE(CS42L51_DAC_CTL, 6, 3, cs42l51_dac_names); static const struct snd_kcontrol_new cs42l51_dac_mux_controls = SOC_DAPM_ENUM("Route", cs42l51_dac_mux_enum); static const char *cs42l51_adcl_names[] = {"AIN1 Left", "AIN2 Left", "MIC Left", "MIC+preamp Left"}; static const struct soc_enum cs42l51_adcl_mux_enum = SOC_ENUM_SINGLE(CS42L51_ADC_INPUT, 4, 4, cs42l51_adcl_names); static const struct snd_kcontrol_new cs42l51_adcl_mux_controls = SOC_DAPM_ENUM("Route", cs42l51_adcl_mux_enum); static const char *cs42l51_adcr_names[] = {"AIN1 Right", "AIN2 Right", "MIC Right", "MIC+preamp Right"}; static const struct soc_enum cs42l51_adcr_mux_enum = SOC_ENUM_SINGLE(CS42L51_ADC_INPUT, 6, 4, cs42l51_adcr_names); static const struct snd_kcontrol_new cs42l51_adcr_mux_controls = SOC_DAPM_ENUM("Route", cs42l51_adcr_mux_enum); static const struct snd_soc_dapm_widget cs42l51_dapm_widgets[] = { SND_SOC_DAPM_MICBIAS("Mic Bias", CS42L51_MIC_POWER_CTL, 1, 1), SND_SOC_DAPM_PGA_E("Left PGA", CS42L51_POWER_CTL1, 3, 1, NULL, 0, cs42l51_pdn_event, SND_SOC_DAPM_PRE_POST_PMD), SND_SOC_DAPM_PGA_E("Right PGA", CS42L51_POWER_CTL1, 4, 1, NULL, 0, cs42l51_pdn_event, SND_SOC_DAPM_PRE_POST_PMD), SND_SOC_DAPM_ADC_E("Left ADC", "Left HiFi Capture", CS42L51_POWER_CTL1, 1, 1, cs42l51_pdn_event, SND_SOC_DAPM_PRE_POST_PMD), SND_SOC_DAPM_ADC_E("Right ADC", "Right HiFi Capture", CS42L51_POWER_CTL1, 2, 1, cs42l51_pdn_event, SND_SOC_DAPM_PRE_POST_PMD), SND_SOC_DAPM_DAC_E("Left DAC", "Left HiFi Playback", CS42L51_POWER_CTL1, 5, 1, cs42l51_pdn_event, SND_SOC_DAPM_PRE_POST_PMD), SND_SOC_DAPM_DAC_E("Right DAC", "Right HiFi Playback", CS42L51_POWER_CTL1, 6, 1, cs42l51_pdn_event, SND_SOC_DAPM_PRE_POST_PMD), /* analog/mic */ SND_SOC_DAPM_INPUT("AIN1L"), SND_SOC_DAPM_INPUT("AIN1R"), SND_SOC_DAPM_INPUT("AIN2L"), SND_SOC_DAPM_INPUT("AIN2R"), SND_SOC_DAPM_INPUT("MICL"), SND_SOC_DAPM_INPUT("MICR"), SND_SOC_DAPM_MIXER("Mic Preamp Left", CS42L51_MIC_POWER_CTL, 2, 1, NULL, 0), SND_SOC_DAPM_MIXER("Mic Preamp Right", CS42L51_MIC_POWER_CTL, 3, 1, NULL, 0), /* HP */ SND_SOC_DAPM_OUTPUT("HPL"), SND_SOC_DAPM_OUTPUT("HPR"), /* mux */ SND_SOC_DAPM_MUX("DAC Mux", SND_SOC_NOPM, 0, 0, &cs42l51_dac_mux_controls), SND_SOC_DAPM_MUX("PGA-ADC Mux Left", SND_SOC_NOPM, 0, 0, &cs42l51_adcl_mux_controls), SND_SOC_DAPM_MUX("PGA-ADC Mux Right", SND_SOC_NOPM, 0, 0, &cs42l51_adcr_mux_controls), }; static const struct snd_soc_dapm_route cs42l51_routes[] = { {"HPL", NULL, "Left DAC"}, {"HPR", NULL, "Right DAC"}, {"Left ADC", NULL, "Left PGA"}, {"Right ADC", NULL, "Right PGA"}, {"Mic Preamp Left", NULL, "MICL"}, {"Mic Preamp Right", NULL, "MICR"}, {"PGA-ADC Mux Left", "AIN1 Left", "AIN1L" }, {"PGA-ADC Mux Left", "AIN2 Left", "AIN2L" }, {"PGA-ADC Mux Left", "MIC Left", "MICL" }, {"PGA-ADC Mux Left", "MIC+preamp Left", "Mic Preamp Left" }, {"PGA-ADC Mux Right", "AIN1 Right", "AIN1R" }, {"PGA-ADC Mux Right", "AIN2 Right", "AIN2R" }, {"PGA-ADC Mux Right", "MIC Right", "MICR" }, {"PGA-ADC Mux Right", "MIC+preamp Right", "Mic Preamp Right" }, {"Left PGA", NULL, "PGA-ADC Mux Left"}, {"Right PGA", NULL, "PGA-ADC Mux Right"}, }; static int cs42l51_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int format) { struct snd_soc_codec *codec = codec_dai->codec; struct cs42l51_private *cs42l51 = snd_soc_codec_get_drvdata(codec); int ret = 0; switch (format & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_LEFT_J: case SND_SOC_DAIFMT_RIGHT_J: cs42l51->audio_mode = format & SND_SOC_DAIFMT_FORMAT_MASK; break; default: dev_err(codec->dev, "invalid DAI format\n"); ret = -EINVAL; } switch (format & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: cs42l51->func = MODE_MASTER; break; case SND_SOC_DAIFMT_CBS_CFS: cs42l51->func = MODE_SLAVE_AUTO; break; default: ret = -EINVAL; break; } return ret; } struct cs42l51_ratios { unsigned int ratio; unsigned char speed_mode; unsigned char mclk; }; static struct cs42l51_ratios slave_ratios[] = { { 512, CS42L51_QSM_MODE, 0 }, { 768, CS42L51_QSM_MODE, 0 }, { 1024, CS42L51_QSM_MODE, 0 }, { 1536, CS42L51_QSM_MODE, 0 }, { 2048, CS42L51_QSM_MODE, 0 }, { 3072, CS42L51_QSM_MODE, 0 }, { 256, CS42L51_HSM_MODE, 0 }, { 384, CS42L51_HSM_MODE, 0 }, { 512, CS42L51_HSM_MODE, 0 }, { 768, CS42L51_HSM_MODE, 0 }, { 1024, CS42L51_HSM_MODE, 0 }, { 1536, CS42L51_HSM_MODE, 0 }, { 128, CS42L51_SSM_MODE, 0 }, { 192, CS42L51_SSM_MODE, 0 }, { 256, CS42L51_SSM_MODE, 0 }, { 384, CS42L51_SSM_MODE, 0 }, { 512, CS42L51_SSM_MODE, 0 }, { 768, CS42L51_SSM_MODE, 0 }, { 128, CS42L51_DSM_MODE, 0 }, { 192, CS42L51_DSM_MODE, 0 }, { 256, CS42L51_DSM_MODE, 0 }, { 384, CS42L51_DSM_MODE, 0 }, }; static struct cs42l51_ratios slave_auto_ratios[] = { { 1024, CS42L51_QSM_MODE, 0 }, { 1536, CS42L51_QSM_MODE, 0 }, { 2048, CS42L51_QSM_MODE, 1 }, { 3072, CS42L51_QSM_MODE, 1 }, { 512, CS42L51_HSM_MODE, 0 }, { 768, CS42L51_HSM_MODE, 0 }, { 1024, CS42L51_HSM_MODE, 1 }, { 1536, CS42L51_HSM_MODE, 1 }, { 256, CS42L51_SSM_MODE, 0 }, { 384, CS42L51_SSM_MODE, 0 }, { 512, CS42L51_SSM_MODE, 1 }, { 768, CS42L51_SSM_MODE, 1 }, { 128, CS42L51_DSM_MODE, 0 }, { 192, CS42L51_DSM_MODE, 0 }, { 256, CS42L51_DSM_MODE, 1 }, { 384, CS42L51_DSM_MODE, 1 }, }; static int cs42l51_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct cs42l51_private *cs42l51 = snd_soc_codec_get_drvdata(codec); cs42l51->mclk = freq; return 0; } static int cs42l51_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct cs42l51_private *cs42l51 = snd_soc_codec_get_drvdata(codec); int ret; unsigned int i; unsigned int rate; unsigned int ratio; struct cs42l51_ratios *ratios = NULL; int nr_ratios = 0; int intf_ctl, power_ctl, fmt; switch (cs42l51->func) { case MODE_MASTER: return -EINVAL; case MODE_SLAVE: ratios = slave_ratios; nr_ratios = ARRAY_SIZE(slave_ratios); break; case MODE_SLAVE_AUTO: ratios = slave_auto_ratios; nr_ratios = ARRAY_SIZE(slave_auto_ratios); break; } /* Figure out which MCLK/LRCK ratio to use */ rate = params_rate(params); /* Sampling rate, in Hz */ ratio = cs42l51->mclk / rate; /* MCLK/LRCK ratio */ for (i = 0; i < nr_ratios; i++) { if (ratios[i].ratio == ratio) break; } if (i == nr_ratios) { /* We did not find a matching ratio */ dev_err(codec->dev, "could not find matching ratio\n"); return -EINVAL; } intf_ctl = snd_soc_read(codec, CS42L51_INTF_CTL); power_ctl = snd_soc_read(codec, CS42L51_MIC_POWER_CTL); intf_ctl &= ~(CS42L51_INTF_CTL_MASTER | CS42L51_INTF_CTL_ADC_I2S | CS42L51_INTF_CTL_DAC_FORMAT(7)); power_ctl &= ~(CS42L51_MIC_POWER_CTL_SPEED(3) | CS42L51_MIC_POWER_CTL_MCLK_DIV2); switch (cs42l51->func) { case MODE_MASTER: intf_ctl |= CS42L51_INTF_CTL_MASTER; power_ctl |= CS42L51_MIC_POWER_CTL_SPEED(ratios[i].speed_mode); break; case MODE_SLAVE: power_ctl |= CS42L51_MIC_POWER_CTL_SPEED(ratios[i].speed_mode); break; case MODE_SLAVE_AUTO: power_ctl |= CS42L51_MIC_POWER_CTL_AUTO; break; } switch (cs42l51->audio_mode) { case SND_SOC_DAIFMT_I2S: intf_ctl |= CS42L51_INTF_CTL_ADC_I2S; intf_ctl |= CS42L51_INTF_CTL_DAC_FORMAT(CS42L51_DAC_DIF_I2S); break; case SND_SOC_DAIFMT_LEFT_J: intf_ctl |= CS42L51_INTF_CTL_DAC_FORMAT(CS42L51_DAC_DIF_LJ24); break; case SND_SOC_DAIFMT_RIGHT_J: switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: case SNDRV_PCM_FORMAT_S16_BE: fmt = CS42L51_DAC_DIF_RJ16; break; case SNDRV_PCM_FORMAT_S18_3LE: case SNDRV_PCM_FORMAT_S18_3BE: fmt = CS42L51_DAC_DIF_RJ18; break; case SNDRV_PCM_FORMAT_S20_3LE: case SNDRV_PCM_FORMAT_S20_3BE: fmt = CS42L51_DAC_DIF_RJ20; break; case SNDRV_PCM_FORMAT_S24_LE: case SNDRV_PCM_FORMAT_S24_BE: fmt = CS42L51_DAC_DIF_RJ24; break; default: dev_err(codec->dev, "unknown format\n"); return -EINVAL; } intf_ctl |= CS42L51_INTF_CTL_DAC_FORMAT(fmt); break; default: dev_err(codec->dev, "unknown format\n"); return -EINVAL; } if (ratios[i].mclk) power_ctl |= CS42L51_MIC_POWER_CTL_MCLK_DIV2; ret = snd_soc_write(codec, CS42L51_INTF_CTL, intf_ctl); if (ret < 0) return ret; ret = snd_soc_write(codec, CS42L51_MIC_POWER_CTL, power_ctl); if (ret < 0) return ret; return 0; } static int cs42l51_dai_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; int reg; int mask = CS42L51_DAC_OUT_CTL_DACA_MUTE|CS42L51_DAC_OUT_CTL_DACB_MUTE; reg = snd_soc_read(codec, CS42L51_DAC_OUT_CTL); if (mute) reg |= mask; else reg &= ~mask; return snd_soc_write(codec, CS42L51_DAC_OUT_CTL, reg); } static struct snd_soc_dai_ops cs42l51_dai_ops = { .hw_params = cs42l51_hw_params, .set_sysclk = cs42l51_set_dai_sysclk, .set_fmt = cs42l51_set_dai_fmt, .digital_mute = cs42l51_dai_mute, }; static struct snd_soc_dai_driver cs42l51_dai = { .name = "cs42l51-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = CS42L51_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = CS42L51_FORMATS, }, .ops = &cs42l51_dai_ops, }; static int cs42l51_probe(struct snd_soc_codec *codec) { struct cs42l51_private *cs42l51 = snd_soc_codec_get_drvdata(codec); struct snd_soc_dapm_context *dapm = &codec->dapm; int ret, reg; codec->control_data = cs42l51->control_data; ret = cs42l51_fill_cache(codec); if (ret < 0) { dev_err(codec->dev, "failed to fill register cache\n"); return ret; } ret = snd_soc_codec_set_cache_io(codec, 8, 8, cs42l51->control_type); if (ret < 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } /* * DAC configuration * - Use signal processor * - auto mute * - vol changes immediate * - no de-emphasize */ reg = CS42L51_DAC_CTL_DATA_SEL(1) | CS42L51_DAC_CTL_AMUTE | CS42L51_DAC_CTL_DACSZ(0); ret = snd_soc_write(codec, CS42L51_DAC_CTL, reg); if (ret < 0) return ret; snd_soc_add_controls(codec, cs42l51_snd_controls, ARRAY_SIZE(cs42l51_snd_controls)); snd_soc_dapm_new_controls(dapm, cs42l51_dapm_widgets, ARRAY_SIZE(cs42l51_dapm_widgets)); snd_soc_dapm_add_routes(dapm, cs42l51_routes, ARRAY_SIZE(cs42l51_routes)); return 0; } static struct snd_soc_codec_driver soc_codec_device_cs42l51 = { .probe = cs42l51_probe, .reg_cache_size = CS42L51_NUMREGS, .reg_word_size = sizeof(u8), }; static int cs42l51_i2c_probe(struct i2c_client *i2c_client, const struct i2c_device_id *id) { struct cs42l51_private *cs42l51; int ret; /* Verify that we have a CS42L51 */ ret = i2c_smbus_read_byte_data(i2c_client, CS42L51_CHIP_REV_ID); if (ret < 0) { dev_err(&i2c_client->dev, "failed to read I2C\n"); goto error; } if ((ret != CS42L51_MK_CHIP_REV(CS42L51_CHIP_ID, CS42L51_CHIP_REV_A)) && (ret != CS42L51_MK_CHIP_REV(CS42L51_CHIP_ID, CS42L51_CHIP_REV_B))) { dev_err(&i2c_client->dev, "Invalid chip id\n"); ret = -ENODEV; goto error; } dev_info(&i2c_client->dev, "found device cs42l51 rev %d\n", ret & 7); cs42l51 = kzalloc(sizeof(struct cs42l51_private), GFP_KERNEL); if (!cs42l51) { dev_err(&i2c_client->dev, "could not allocate codec\n"); return -ENOMEM; } i2c_set_clientdata(i2c_client, cs42l51); cs42l51->control_data = i2c_client; cs42l51->control_type = SND_SOC_I2C; ret = snd_soc_register_codec(&i2c_client->dev, &soc_codec_device_cs42l51, &cs42l51_dai, 1); if (ret < 0) kfree(cs42l51); error: return ret; } static int cs42l51_i2c_remove(struct i2c_client *client) { struct cs42l51_private *cs42l51 = i2c_get_clientdata(client); snd_soc_unregister_codec(&client->dev); kfree(cs42l51); return 0; } static const struct i2c_device_id cs42l51_id[] = { {"cs42l51", 0}, {} }; MODULE_DEVICE_TABLE(i2c, cs42l51_id); static struct i2c_driver cs42l51_i2c_driver = { .driver = { .name = "cs42l51-codec", .owner = THIS_MODULE, }, .id_table = cs42l51_id, .probe = cs42l51_i2c_probe, .remove = cs42l51_i2c_remove, }; static int __init cs42l51_init(void) { int ret; ret = i2c_add_driver(&cs42l51_i2c_driver); if (ret != 0) { printk(KERN_ERR "%s: can't add i2c driver\n", __func__); return ret; } return 0; } module_init(cs42l51_init); static void __exit cs42l51_exit(void) { i2c_del_driver(&cs42l51_i2c_driver); } module_exit(cs42l51_exit); MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); MODULE_DESCRIPTION("Cirrus Logic CS42L51 ALSA SoC Codec Driver"); MODULE_LICENSE("GPL");
gpl-2.0
arshull/GalaTab3_KK_Kernel_T310
drivers/ata/pata_oldpiix.c
3543
7615
/* * pata_oldpiix.c - Intel PATA/SATA controllers * * (C) 2005 Red Hat * * Some parts based on ata_piix.c by Jeff Garzik and others. * * Early PIIX differs significantly from the later PIIX as it lacks * SITRE and the slave timing registers. This means that you have to * set timing per channel, or be clever. Libata tells us whenever it * does drive selection and we use this to reload the timings. * * Because of these behaviour differences PIIX gets its own driver module. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> #define DRV_NAME "pata_oldpiix" #define DRV_VERSION "0.5.5" /** * oldpiix_pre_reset - probe begin * @link: ATA link * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ static int oldpiix_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits oldpiix_enable_bits[] = { { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ }; if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * oldpiix_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device whose timings we are configuring * * Set PIO mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *dev = to_pci_dev(ap->host->dev); unsigned int idetm_port= ap->port_no ? 0x42 : 0x40; u16 idetm_data; int control = 0; /* * See Intel Document 298600-004 for the timing programing rules * for PIIX/ICH. Note that the early PIIX does not have the slave * timing port at 0x44. */ static const /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; if (pio > 1) control |= 1; /* TIME */ if (ata_pio_need_iordy(adev)) control |= 2; /* IE */ /* Intel specifies that the prefetch/posting is for disk only */ if (adev->class == ATA_DEV_ATA) control |= 4; /* PPE */ pci_read_config_word(dev, idetm_port, &idetm_data); /* * Set PPE, IE and TIME as appropriate. * Clear the other drive's timing bits. */ if (adev->devno == 0) { idetm_data &= 0xCCE0; idetm_data |= control; } else { idetm_data &= 0xCC0E; idetm_data |= (control << 4); } idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); pci_write_config_word(dev, idetm_port, idetm_data); /* Track which port is configured */ ap->private_data = adev; } /** * oldpiix_set_dmamode - Initialize host controller PATA DMA timings * @ap: Port whose timings we are configuring * @adev: Device to program * * Set MWDMA mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev) { struct pci_dev *dev = to_pci_dev(ap->host->dev); u8 idetm_port = ap->port_no ? 0x42 : 0x40; u16 idetm_data; static const /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; /* * MWDMA is driven by the PIO timings. We must also enable * IORDY unconditionally along with TIME1. PPE has already * been set when the PIO timing was set. */ unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0; unsigned int control; const unsigned int needed_pio[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 }; int pio = needed_pio[mwdma] - XFER_PIO_0; pci_read_config_word(dev, idetm_port, &idetm_data); control = 3; /* IORDY|TIME0 */ /* Intel specifies that the PPE functionality is for disk only */ if (adev->class == ATA_DEV_ATA) control |= 4; /* PPE enable */ /* If the drive MWDMA is faster than it can do PIO then we must force PIO into PIO0 */ if (adev->pio_mode < needed_pio[mwdma]) /* Enable DMA timing only */ control |= 8; /* PIO cycles in PIO0 */ /* Mask out the relevant control and timing bits we will load. Also clear the other drive TIME register as a precaution */ if (adev->devno == 0) { idetm_data &= 0xCCE0; idetm_data |= control; } else { idetm_data &= 0xCC0E; idetm_data |= (control << 4); } idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); pci_write_config_word(dev, idetm_port, idetm_data); /* Track which port is configured */ ap->private_data = adev; } /** * oldpiix_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if * necessary. Our logic also clears TIME0/TIME1 for the other device so * that, even if we get this wrong, cycles to the other device will * be made PIO0. */ static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; if (adev != ap->private_data) { oldpiix_set_piomode(ap, adev); if (ata_dma_enabled(adev)) oldpiix_set_dmamode(ap, adev); } return ata_bmdma_qc_issue(qc); } static struct scsi_host_template oldpiix_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations oldpiix_pata_ops = { .inherits = &ata_bmdma_port_ops, .qc_issue = oldpiix_qc_issue, .cable_detect = ata_cable_40wire, .set_piomode = oldpiix_set_piomode, .set_dmamode = oldpiix_set_dmamode, .prereset = oldpiix_pre_reset, }; /** * oldpiix_init_one - Register PIIX ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in oldpiix_pci_tbl matching with @pdev * * Called from kernel PCI layer. We probe for combined mode (sigh), * and then hand over control to libata, for it to do the rest. * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA12_ONLY, .port_ops = &oldpiix_pata_ops, }; const struct ata_port_info *ppi[] = { &info, NULL }; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); } static const struct pci_device_id oldpiix_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x1230), }, { } /* terminate list */ }; static struct pci_driver oldpiix_pci_driver = { .name = DRV_NAME, .id_table = oldpiix_pci_tbl, .probe = oldpiix_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init oldpiix_init(void) { return pci_register_driver(&oldpiix_pci_driver); } static void __exit oldpiix_exit(void) { pci_unregister_driver(&oldpiix_pci_driver); } module_init(oldpiix_init); module_exit(oldpiix_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
Dr-Shadow/android_kernel_mt6589
arch/arm/mach-at91/board-csb637.c
4823
3540
/* * linux/arch/arm/mach-at91/board-csb637.c * * Copyright (C) 2005 SAN People * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include "generic.h" static void __init csb637_init_early(void) { /* Initialize processor: 3.6864 MHz crystal */ at91_initialize(3686400); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* make console=ttyS0 (ie, DBGU) the default */ at91_set_serial_console(0); } static struct macb_platform_data __initdata csb637_eth_data = { .phy_irq_pin = AT91_PIN_PC0, .is_rmii = 0, }; static struct at91_usbh_data __initdata csb637_usbh_data = { .ports = 2, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; static struct at91_udc_data __initdata csb637_udc_data = { .vbus_pin = AT91_PIN_PB28, .pullup_pin = AT91_PIN_PB1, }; #define CSB_FLASH_BASE AT91_CHIPSELECT_0 #define CSB_FLASH_SIZE SZ_16M static struct mtd_partition csb_flash_partitions[] = { { .name = "uMON flash", .offset = 0, .size = MTDPART_SIZ_FULL, .mask_flags = MTD_WRITEABLE, /* read only */ } }; static struct physmap_flash_data csb_flash_data = { .width = 2, .parts = csb_flash_partitions, .nr_parts = ARRAY_SIZE(csb_flash_partitions), }; static struct resource csb_flash_resources[] = { { .start = CSB_FLASH_BASE, .end = CSB_FLASH_BASE + CSB_FLASH_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device csb_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &csb_flash_data, }, .resource = csb_flash_resources, .num_resources = ARRAY_SIZE(csb_flash_resources), }; static struct gpio_led csb_leds[] = { { /* "d1", red */ .name = "d1", .gpio = AT91_PIN_PB2, .active_low = 1, .default_trigger = "heartbeat", }, }; static void __init csb637_board_init(void) { /* LED(s) */ at91_gpio_leds(csb_leds, ARRAY_SIZE(csb_leds)); /* Serial */ at91_add_device_serial(); /* Ethernet */ at91_add_device_eth(&csb637_eth_data); /* USB Host */ at91_add_device_usbh(&csb637_usbh_data); /* USB Device */ at91_add_device_udc(&csb637_udc_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* SPI */ at91_add_device_spi(NULL, 0); /* NOR flash */ platform_device_register(&csb_flash); } MACHINE_START(CSB637, "Cogent CSB637") /* Maintainer: Bill Gatliff */ .timer = &at91rm9200_timer, .map_io = at91_map_io, .init_early = csb637_init_early, .init_irq = at91_init_irq_default, .init_machine = csb637_board_init, MACHINE_END
gpl-2.0
jrior001/evitaul-3.4.100-HTC
arch/arm/mach-at91/board-foxg20.c
4823
6517
/* * Copyright (C) 2005 SAN People * Copyright (C) 2008 Atmel * Copyright (C) 2010 Lee McLoughlin - lee@lmmrtech.com * Copyright (C) 2010 Sergio Tanzilli - tanzilli@acmesystems.it * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/at73c213.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/clk.h> #include <linux/w1-gpio.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/board.h> #include <mach/at91sam9_smc.h> #include "sam9_smc.h" #include "generic.h" /* * The FOX Board G20 hardware comes as the "Netus G20" board with * just the cpu, ram, dataflash and two header connectors. * This is plugged into the FOX Board which provides the ethernet, * usb, rtc, leds, switch, ... * * For more info visit: http://www.acmesystems.it/foxg20 */ static void __init foxg20_init_early(void) { /* Initialize processor: 18.432 MHz crystal */ at91_initialize(18432000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); /* USART2 on ttyS3. (Rx & Tx only) */ at91_register_uart(AT91SAM9260_ID_US2, 3, 0); /* USART3 on ttyS4. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9260_ID_US3, 4, ATMEL_UART_CTS | ATMEL_UART_RTS); /* USART4 on ttyS5. (Rx & Tx only) */ at91_register_uart(AT91SAM9260_ID_US4, 5, 0); /* USART5 on ttyS6. (Rx & Tx only) */ at91_register_uart(AT91SAM9260_ID_US5, 6, 0); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); /* Set the internal pull-up resistor on DRXD */ at91_set_A_periph(AT91_PIN_PB14, 1); } /* * USB Host port */ static struct at91_usbh_data __initdata foxg20_usbh_data = { .ports = 2, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; /* * USB Device port */ static struct at91_udc_data __initdata foxg20_udc_data = { .vbus_pin = AT91_PIN_PC6, .pullup_pin = -EINVAL, /* pull-up driven by UDC */ }; /* * SPI devices. */ static struct spi_board_info foxg20_spi_devices[] = { #if !defined(CONFIG_MMC_AT91) { .modalias = "mtd_dataflash", .chip_select = 1, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #endif }; /* * MACB Ethernet device */ static struct macb_platform_data __initdata foxg20_macb_data = { .phy_irq_pin = AT91_PIN_PA7, .is_rmii = 1, }; /* * MCI (SD/MMC) * det_pin, wp_pin and vcc_pin are not connected */ static struct at91_mmc_data __initdata foxg20_mmc_data = { .slot_b = 1, .wire4 = 1, .det_pin = -EINVAL, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; /* * LEDs */ static struct gpio_led foxg20_leds[] = { { /* user led, red */ .name = "user_led", .gpio = AT91_PIN_PC7, .active_low = 0, .default_trigger = "heartbeat", }, }; /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button foxg20_buttons[] = { { .gpio = AT91_PIN_PC4, .code = BTN_1, .desc = "Button 1", .active_low = 1, .wakeup = 1, }, }; static struct gpio_keys_platform_data foxg20_button_data = { .buttons = foxg20_buttons, .nbuttons = ARRAY_SIZE(foxg20_buttons), }; static struct platform_device foxg20_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &foxg20_button_data, } }; static void __init foxg20_add_device_buttons(void) { at91_set_gpio_input(AT91_PIN_PC4, 1); /* btn1 */ at91_set_deglitch(AT91_PIN_PC4, 1); platform_device_register(&foxg20_button_device); } #else static void __init foxg20_add_device_buttons(void) {} #endif #if defined(CONFIG_W1_MASTER_GPIO) || defined(CONFIG_W1_MASTER_GPIO_MODULE) static struct w1_gpio_platform_data w1_gpio_pdata = { /* If you choose to use a pin other than PB16 it needs to be 3.3V */ .pin = AT91_PIN_PB16, .is_open_drain = 1, }; static struct platform_device w1_device = { .name = "w1-gpio", .id = -1, .dev.platform_data = &w1_gpio_pdata, }; static void __init at91_add_device_w1(void) { at91_set_GPIO_periph(w1_gpio_pdata.pin, 1); at91_set_multi_drive(w1_gpio_pdata.pin, 1); platform_device_register(&w1_device); } #endif static struct i2c_board_info __initdata foxg20_i2c_devices[] = { { I2C_BOARD_INFO("24c512", 0x50), }, }; static void __init foxg20_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&foxg20_usbh_data); /* USB Device */ at91_add_device_udc(&foxg20_udc_data); /* SPI */ at91_add_device_spi(foxg20_spi_devices, ARRAY_SIZE(foxg20_spi_devices)); /* Ethernet */ at91_add_device_eth(&foxg20_macb_data); /* MMC */ at91_add_device_mmc(0, &foxg20_mmc_data); /* I2C */ at91_add_device_i2c(foxg20_i2c_devices, ARRAY_SIZE(foxg20_i2c_devices)); /* LEDs */ at91_gpio_leds(foxg20_leds, ARRAY_SIZE(foxg20_leds)); /* Push Buttons */ foxg20_add_device_buttons(); #if defined(CONFIG_W1_MASTER_GPIO) || defined(CONFIG_W1_MASTER_GPIO_MODULE) at91_add_device_w1(); #endif } MACHINE_START(ACMENETUSFOXG20, "Acme Systems srl FOX Board G20") /* Maintainer: Sergio Tanzilli */ .timer = &at91sam926x_timer, .map_io = at91_map_io, .init_early = foxg20_init_early, .init_irq = at91_init_irq_default, .init_machine = foxg20_board_init, MACHINE_END
gpl-2.0
beats4x/kernel_lge_g3-v10m
drivers/media/video/sh_mobile_csi2.c
5079
10077
/* * Driver for the SH-Mobile MIPI CSI-2 unit * * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/module.h> #include <media/sh_mobile_ceu.h> #include <media/sh_mobile_csi2.h> #include <media/soc_camera.h> #include <media/soc_mediabus.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> #include <media/v4l2-subdev.h> #define SH_CSI2_TREF 0x00 #define SH_CSI2_SRST 0x04 #define SH_CSI2_PHYCNT 0x08 #define SH_CSI2_CHKSUM 0x0C #define SH_CSI2_VCDT 0x10 struct sh_csi2 { struct v4l2_subdev subdev; struct list_head list; unsigned int irq; unsigned long mipi_flags; void __iomem *base; struct platform_device *pdev; struct sh_csi2_client_config *client; }; static int sh_csi2_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev); struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; if (mf->width > 8188) mf->width = 8188; else if (mf->width & 1) mf->width &= ~1; switch (pdata->type) { case SH_CSI2C: switch (mf->code) { case V4L2_MBUS_FMT_UYVY8_2X8: /* YUV422 */ case V4L2_MBUS_FMT_YUYV8_1_5X8: /* YUV420 */ case V4L2_MBUS_FMT_Y8_1X8: /* RAW8 */ case V4L2_MBUS_FMT_SBGGR8_1X8: case V4L2_MBUS_FMT_SGRBG8_1X8: break; default: /* All MIPI CSI-2 devices must support one of primary formats */ mf->code = V4L2_MBUS_FMT_YUYV8_2X8; } break; case SH_CSI2I: switch (mf->code) { case V4L2_MBUS_FMT_Y8_1X8: /* RAW8 */ case V4L2_MBUS_FMT_SBGGR8_1X8: case V4L2_MBUS_FMT_SGRBG8_1X8: case V4L2_MBUS_FMT_SBGGR10_1X10: /* RAW10 */ case V4L2_MBUS_FMT_SBGGR12_1X12: /* RAW12 */ break; default: /* All MIPI CSI-2 devices must support one of primary formats */ mf->code = V4L2_MBUS_FMT_SBGGR8_1X8; } break; } return 0; } /* * We have done our best in try_fmt to try and tell the sensor, which formats * we support. If now the configuration is unsuitable for us we can only * error out. */ static int sh_csi2_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev); u32 tmp = (priv->client->channel & 3) << 8; dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code); if (mf->width > 8188 || mf->width & 1) return -EINVAL; switch (mf->code) { case V4L2_MBUS_FMT_UYVY8_2X8: tmp |= 0x1e; /* YUV422 8 bit */ break; case V4L2_MBUS_FMT_YUYV8_1_5X8: tmp |= 0x18; /* YUV420 8 bit */ break; case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE: tmp |= 0x21; /* RGB555 */ break; case V4L2_MBUS_FMT_RGB565_2X8_BE: tmp |= 0x22; /* RGB565 */ break; case V4L2_MBUS_FMT_Y8_1X8: case V4L2_MBUS_FMT_SBGGR8_1X8: case V4L2_MBUS_FMT_SGRBG8_1X8: tmp |= 0x2a; /* RAW8 */ break; default: return -EINVAL; } iowrite32(tmp, priv->base + SH_CSI2_VCDT); return 0; } static int sh_csi2_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH; cfg->type = V4L2_MBUS_PARALLEL; return 0; } static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd, const struct v4l2_mbus_config *cfg) { struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev); struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd); struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd); struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2, .flags = priv->mipi_flags}; return v4l2_subdev_call(client_sd, video, s_mbus_config, &client_cfg); } static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = { .s_mbus_fmt = sh_csi2_s_fmt, .try_mbus_fmt = sh_csi2_try_fmt, .g_mbus_config = sh_csi2_g_mbus_config, .s_mbus_config = sh_csi2_s_mbus_config, }; static void sh_csi2_hwinit(struct sh_csi2 *priv) { struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; __u32 tmp = 0x10; /* Enable MIPI CSI clock lane */ /* Reflect registers immediately */ iowrite32(0x00000001, priv->base + SH_CSI2_TREF); /* reset CSI2 harware */ iowrite32(0x00000001, priv->base + SH_CSI2_SRST); udelay(5); iowrite32(0x00000000, priv->base + SH_CSI2_SRST); switch (pdata->type) { case SH_CSI2C: if (priv->client->lanes == 1) tmp |= 1; else /* Default - both lanes */ tmp |= 3; break; case SH_CSI2I: if (!priv->client->lanes || priv->client->lanes > 4) /* Default - all 4 lanes */ tmp |= 0xf; else tmp |= (1 << priv->client->lanes) - 1; } if (priv->client->phy == SH_CSI2_PHY_MAIN) tmp |= 0x8000; iowrite32(tmp, priv->base + SH_CSI2_PHYCNT); tmp = 0; if (pdata->flags & SH_CSI2_ECC) tmp |= 2; if (pdata->flags & SH_CSI2_CRC) tmp |= 1; iowrite32(tmp, priv->base + SH_CSI2_CHKSUM); } static int sh_csi2_client_connect(struct sh_csi2 *priv) { struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev); struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd); struct device *dev = v4l2_get_subdevdata(&priv->subdev); struct v4l2_mbus_config cfg; unsigned long common_flags, csi2_flags; int i, ret; if (priv->client) return -EBUSY; for (i = 0; i < pdata->num_clients; i++) if (&pdata->clients[i].pdev->dev == icd->pdev) break; dev_dbg(dev, "%s(%p): found #%d\n", __func__, dev, i); if (i == pdata->num_clients) return -ENODEV; /* Check if we can support this camera */ csi2_flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK | V4L2_MBUS_CSI2_1_LANE; switch (pdata->type) { case SH_CSI2C: if (pdata->clients[i].lanes != 1) csi2_flags |= V4L2_MBUS_CSI2_2_LANE; break; case SH_CSI2I: switch (pdata->clients[i].lanes) { default: csi2_flags |= V4L2_MBUS_CSI2_4_LANE; case 3: csi2_flags |= V4L2_MBUS_CSI2_3_LANE; case 2: csi2_flags |= V4L2_MBUS_CSI2_2_LANE; } } cfg.type = V4L2_MBUS_CSI2; ret = v4l2_subdev_call(client_sd, video, g_mbus_config, &cfg); if (ret == -ENOIOCTLCMD) common_flags = csi2_flags; else if (!ret) common_flags = soc_mbus_config_compatible(&cfg, csi2_flags); else common_flags = 0; if (!common_flags) return -EINVAL; /* All good: camera MIPI configuration supported */ priv->mipi_flags = common_flags; priv->client = pdata->clients + i; pm_runtime_get_sync(dev); sh_csi2_hwinit(priv); return 0; } static void sh_csi2_client_disconnect(struct sh_csi2 *priv) { if (!priv->client) return; priv->client = NULL; pm_runtime_put(v4l2_get_subdevdata(&priv->subdev)); } static int sh_csi2_s_power(struct v4l2_subdev *sd, int on) { struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev); if (on) return sh_csi2_client_connect(priv); sh_csi2_client_disconnect(priv); return 0; } static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops = { .s_power = sh_csi2_s_power, }; static struct v4l2_subdev_ops sh_csi2_subdev_ops = { .core = &sh_csi2_subdev_core_ops, .video = &sh_csi2_subdev_video_ops, }; static __devinit int sh_csi2_probe(struct platform_device *pdev) { struct resource *res; unsigned int irq; int ret; struct sh_csi2 *priv; /* Platform data specify the PHY, lanes, ECC, CRC */ struct sh_csi2_pdata *pdata = pdev->dev.platform_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); /* Interrupt unused so far */ irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0 || !pdata) { dev_err(&pdev->dev, "Not enough CSI2 platform resources.\n"); return -ENODEV; } /* TODO: Add support for CSI2I. Careful: different register layout! */ if (pdata->type != SH_CSI2C) { dev_err(&pdev->dev, "Only CSI2C supported ATM.\n"); return -EINVAL; } priv = kzalloc(sizeof(struct sh_csi2), GFP_KERNEL); if (!priv) return -ENOMEM; priv->irq = irq; if (!request_mem_region(res->start, resource_size(res), pdev->name)) { dev_err(&pdev->dev, "CSI2 register region already claimed\n"); ret = -EBUSY; goto ereqreg; } priv->base = ioremap(res->start, resource_size(res)); if (!priv->base) { ret = -ENXIO; dev_err(&pdev->dev, "Unable to ioremap CSI2 registers.\n"); goto eremap; } priv->pdev = pdev; platform_set_drvdata(pdev, priv); v4l2_subdev_init(&priv->subdev, &sh_csi2_subdev_ops); v4l2_set_subdevdata(&priv->subdev, &pdev->dev); snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.mipi-csi", dev_name(pdata->v4l2_dev->dev)); ret = v4l2_device_register_subdev(pdata->v4l2_dev, &priv->subdev); dev_dbg(&pdev->dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret); if (ret < 0) goto esdreg; pm_runtime_enable(&pdev->dev); dev_dbg(&pdev->dev, "CSI2 probed.\n"); return 0; esdreg: iounmap(priv->base); eremap: release_mem_region(res->start, resource_size(res)); ereqreg: kfree(priv); return ret; } static __devexit int sh_csi2_remove(struct platform_device *pdev) { struct sh_csi2 *priv = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); v4l2_device_unregister_subdev(&priv->subdev); pm_runtime_disable(&pdev->dev); iounmap(priv->base); release_mem_region(res->start, resource_size(res)); platform_set_drvdata(pdev, NULL); kfree(priv); return 0; } static struct platform_driver __refdata sh_csi2_pdrv = { .remove = __devexit_p(sh_csi2_remove), .probe = sh_csi2_probe, .driver = { .name = "sh-mobile-csi2", .owner = THIS_MODULE, }, }; module_platform_driver(sh_csi2_pdrv); MODULE_DESCRIPTION("SH-Mobile MIPI CSI-2 driver"); MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:sh-mobile-csi2");
gpl-2.0
iHateWEBos/vigor_aosp_kernel
arch/um/sys-ppc/ptrace.c
9431
1432
#include "linux/sched.h" #include "asm/ptrace.h" int putreg(struct task_struct *child, unsigned long regno, unsigned long value) { child->thread.process_regs.regs[regno >> 2] = value; return 0; } int poke_user(struct task_struct *child, long addr, long data) { if ((addr & 3) || addr < 0) return -EIO; if (addr < MAX_REG_OFFSET) return putreg(child, addr, data); else if((addr >= offsetof(struct user, u_debugreg[0])) && (addr <= offsetof(struct user, u_debugreg[7]))){ addr -= offsetof(struct user, u_debugreg[0]); addr = addr >> 2; if((addr == 4) || (addr == 5)) return -EIO; child->thread.arch.debugregs[addr] = data; return 0; } return -EIO; } unsigned long getreg(struct task_struct *child, unsigned long regno) { unsigned long retval = ~0UL; retval &= child->thread.process_regs.regs[regno >> 2]; return retval; } int peek_user(struct task_struct *child, long addr, long data) { /* read the word at location addr in the USER area. */ unsigned long tmp; if ((addr & 3) || addr < 0) return -EIO; tmp = 0; /* Default return condition */ if(addr < MAX_REG_OFFSET){ tmp = getreg(child, addr); } else if((addr >= offsetof(struct user, u_debugreg[0])) && (addr <= offsetof(struct user, u_debugreg[7]))){ addr -= offsetof(struct user, u_debugreg[0]); addr = addr >> 2; tmp = child->thread.arch.debugregs[addr]; } return put_user(tmp, (unsigned long *) data); }
gpl-2.0
bestmjh47/android_kernel_kttech_e100_kk
arch/sh/drivers/pci/fixups-cayman.c
9687
2200
#include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/types.h> #include <cpu/irq.h> #include "pci-sh5.h" int __init pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int result = -1; /* The complication here is that the PCI IRQ lines from the Cayman's 2 5V slots get into the CPU via a different path from the IRQ lines from the 3 3.3V slots. Thus, we have to detect whether the card's interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling' at the point where we cross from 5V to 3.3V is not the normal case. The added complication is that we don't know that the 5V slots are always bus 2, because a card containing a PCI-PCI bridge may be plugged into a 3.3V slot, and this changes the bus numbering. Also, the Cayman has an intermediate PCI bus that goes a custom expansion board header (and to the secondary bridge). This bus has never been used in practice. The 1ary onboard PCI-PCI bridge is device 3 on bus 0 The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of the 1ary bridge. */ struct slot_pin { int slot; int pin; } path[4]; int i=0; while (dev->bus->number > 0) { slot = path[i].slot = PCI_SLOT(dev->devfn); pin = path[i].pin = pci_swizzle_interrupt_pin(dev, pin); dev = dev->bus->self; i++; if (i > 3) panic("PCI path to root bus too long!\n"); } slot = PCI_SLOT(dev->devfn); /* This is the slot on bus 0 through which the device is eventually reachable. */ /* Now work back up. */ if ((slot < 3) || (i == 0)) { /* Bus 0 (incl. PCI-PCI bridge itself) : perform the final swizzle now. */ result = IRQ_INTA + pci_swizzle_interrupt_pin(dev, pin) - 1; } else { i--; slot = path[i].slot; pin = path[i].pin; if (slot > 0) { panic("PCI expansion bus device found - not handled!\n"); } else { if (i > 0) { /* 5V slots */ i--; slot = path[i].slot; pin = path[i].pin; /* 'pin' was swizzled earlier wrt slot, don't do it again. */ result = IRQ_P2INTA + (pin - 1); } else { /* IRQ for 2ary PCI-PCI bridge : unused */ result = -1; } } } return result; }
gpl-2.0
Split-Screen/android_kernel_samsung_trlte
net/l2tp/l2tp_ppp.c
216
48562
/***************************************************************************** * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets * * PPPoX --- Generic PPP encapsulation socket family * PPPoL2TP --- PPP over L2TP (RFC 2661) * * Version: 2.0.0 * * Authors: James Chapman (jchapman@katalix.com) * * Based on original work by Martijn van Oosterhout <kleptog@svana.org> * * License: * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ /* This driver handles only L2TP data frames; control frames are handled by a * userspace application. * * To send data in an L2TP session, userspace opens a PPPoL2TP socket and * attaches it to a bound UDP socket with local tunnel_id / session_id and * peer tunnel_id / session_id set. Data can then be sent or received using * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket * can be read or modified using ioctl() or [gs]etsockopt() calls. * * When a PPPoL2TP socket is connected with local and peer session_id values * zero, the socket is treated as a special tunnel management socket. * * Here's example userspace code to create a socket for sending/receiving data * over an L2TP session:- * * struct sockaddr_pppol2tp sax; * int fd; * int session_fd; * * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP); * * sax.sa_family = AF_PPPOX; * sax.sa_protocol = PX_PROTO_OL2TP; * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr; * sax.pppol2tp.addr.sin_port = addr->sin_port; * sax.pppol2tp.addr.sin_family = AF_INET; * sax.pppol2tp.s_tunnel = tunnel_id; * sax.pppol2tp.s_session = session_id; * sax.pppol2tp.d_tunnel = peer_tunnel_id; * sax.pppol2tp.d_session = peer_session_id; * * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax)); * * A pppd plugin that allows PPP traffic to be carried over L2TP using * this driver is available from the OpenL2TP project at * http://openl2tp.sourceforge.net. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/string.h> #include <linux/list.h> #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/jiffies.h> #include <linux/netdevice.h> #include <linux/net.h> #include <linux/inetdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/udp.h> #include <linux/if_pppox.h> #include <linux/if_pppol2tp.h> #include <net/sock.h> #include <linux/ppp_channel.h> #include <linux/ppp_defs.h> #include <linux/ppp-ioctl.h> #include <linux/file.h> #include <linux/hash.h> #include <linux/sort.h> #include <linux/proc_fs.h> #include <linux/l2tp.h> #include <linux/nsproxy.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/dst.h> #include <net/ip.h> #include <net/udp.h> #include <net/xfrm.h> #include <net/inet_common.h> #include <asm/byteorder.h> #include <linux/atomic.h> #include "l2tp_core.h" #define PPPOL2TP_DRV_VERSION "V2.0" /* Space for UDP, L2TP and PPP headers */ #define PPPOL2TP_HEADER_OVERHEAD 40 /* Number of bytes to build transmit L2TP headers. * Unfortunately the size is different depending on whether sequence numbers * are enabled. */ #define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10 #define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6 /* Private data of each session. This data lives at the end of struct * l2tp_session, referenced via session->priv[]. */ struct pppol2tp_session { int owner; /* pid that opened the socket */ struct sock *sock; /* Pointer to the session * PPPoX socket */ struct sock *tunnel_sock; /* Pointer to the tunnel UDP * socket */ int flags; /* accessed by PPPIOCGFLAGS. * Unused. */ }; static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); static const struct ppp_channel_ops pppol2tp_chan_ops = { .start_xmit = pppol2tp_xmit, }; static const struct proto_ops pppol2tp_ops; /* Helpers to obtain tunnel/session contexts from sockets. */ static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) { struct l2tp_session *session; if (sk == NULL) return NULL; sock_hold(sk); session = (struct l2tp_session *)(sk->sk_user_data); if (session == NULL) { sock_put(sk); goto out; } BUG_ON(session->magic != L2TP_SESSION_MAGIC); out: return session; } /***************************************************************************** * Receive data handling *****************************************************************************/ static int pppol2tp_recv_payload_hook(struct sk_buff *skb) { /* Skip PPP header, if present. In testing, Microsoft L2TP clients * don't send the PPP header (PPP header compression enabled), but * other clients can include the header. So we cope with both cases * here. The PPP header is always FF03 when using L2TP. * * Note that skb->data[] isn't dereferenced from a u16 ptr here since * the field may be unaligned. */ if (!pskb_may_pull(skb, 2)) return 1; if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03)) skb_pull(skb, 2); return 0; } /* Receive message. This is the recvmsg for the PPPoL2TP socket. */ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int err; struct sk_buff *skb; struct sock *sk = sock->sk; err = -EIO; if (sk->sk_state & PPPOX_BOUND) goto end; err = 0; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) goto end; if (len > skb->len) len = skb->len; else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); if (likely(err == 0)) err = len; kfree_skb(skb); end: return err; } static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) { struct pppol2tp_session *ps = l2tp_session_priv(session); struct sock *sk = NULL; /* If the socket is bound, send it in to PPP's input queue. Otherwise * queue it on the session socket. */ sk = ps->sock; if (sk == NULL) goto no_sock; if (sk->sk_state & PPPOX_BOUND) { struct pppox_sock *po; l2tp_dbg(session, PPPOL2TP_MSG_DATA, "%s: recv %d byte data frame, passing to ppp\n", session->name, data_len); /* We need to forget all info related to the L2TP packet * gathered in the skb as we are going to reuse the same * skb for the inner packet. * Namely we need to: * - reset xfrm (IPSec) information as it applies to * the outer L2TP packet and not to the inner one * - release the dst to force a route lookup on the inner * IP packet since skb->dst currently points to the dst * of the UDP tunnel * - reset netfilter information as it doesn't apply * to the inner packet either */ secpath_reset(skb); skb_dst_drop(skb); nf_reset(skb); po = pppox_sk(sk); ppp_input(&po->chan, skb); } else { l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n", session->name); /* Not bound. Nothing we can do, so discard. */ atomic_long_inc(&session->stats.rx_errors); kfree_skb(skb); } return; no_sock: l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name); kfree_skb(skb); } static void pppol2tp_session_sock_hold(struct l2tp_session *session) { struct pppol2tp_session *ps = l2tp_session_priv(session); if (ps->sock) sock_hold(ps->sock); } static void pppol2tp_session_sock_put(struct l2tp_session *session) { struct pppol2tp_session *ps = l2tp_session_priv(session); if (ps->sock) sock_put(ps->sock); } /************************************************************************ * Transmit handling ***********************************************************************/ /* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here * when a user application does a sendmsg() on the session socket. L2TP and * PPP headers must be inserted into the user's data. */ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { static const unsigned char ppph[2] = { 0xff, 0x03 }; struct sock *sk = sock->sk; struct sk_buff *skb; int error; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; int uhlen; error = -ENOTCONN; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto error; /* Get session and tunnel contexts */ error = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto error; ps = l2tp_session_priv(session); tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); if (tunnel == NULL) goto error_put_sess; uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; /* Allocate a socket buffer */ error = -ENOMEM; skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len + sizeof(ppph) + total_len, 0, GFP_KERNEL); if (!skb) goto error_put_sess_tun; /* Reserve space for headers. */ skb_reserve(skb, NET_SKB_PAD); skb_reset_network_header(skb); skb_reserve(skb, sizeof(struct iphdr)); skb_reset_transport_header(skb); skb_reserve(skb, uhlen); /* Add PPP header */ skb->data[0] = ppph[0]; skb->data[1] = ppph[1]; skb_put(skb, 2); /* Copy user data into skb */ error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov, total_len); if (error < 0) { kfree_skb(skb); goto error_put_sess_tun; } local_bh_disable(); l2tp_xmit_skb(session, skb, session->hdr_len); local_bh_enable(); sock_put(ps->tunnel_sock); sock_put(sk); return total_len; error_put_sess_tun: sock_put(ps->tunnel_sock); error_put_sess: sock_put(sk); error: return error; } /* Transmit function called by generic PPP driver. Sends PPP frame * over PPPoL2TP socket. * * This is almost the same as pppol2tp_sendmsg(), but rather than * being called with a msghdr from userspace, it is called with a skb * from the kernel. * * The supplied skb from ppp doesn't have enough headroom for the * insertion of L2TP, UDP and IP headers so we need to allocate more * headroom in the skb. This will create a cloned skb. But we must be * careful in the error case because the caller will expect to free * the skb it supplied, not our cloned skb. So we take care to always * leave the original skb unfreed if we return an error. */ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) { static const u8 ppph[2] = { 0xff, 0x03 }; struct sock *sk = (struct sock *) chan->private; struct sock *sk_tun; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; int uhlen, headroom; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto abort; /* Get session and tunnel contexts from the socket */ session = pppol2tp_sock_to_session(sk); if (session == NULL) goto abort; ps = l2tp_session_priv(session); sk_tun = ps->tunnel_sock; if (sk_tun == NULL) goto abort_put_sess; tunnel = l2tp_sock_to_tunnel(sk_tun); if (tunnel == NULL) goto abort_put_sess; uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; headroom = NET_SKB_PAD + sizeof(struct iphdr) + /* IP header */ uhlen + /* UDP header (if L2TP_ENCAPTYPE_UDP) */ session->hdr_len + /* L2TP header */ sizeof(ppph); /* PPP header */ if (skb_cow_head(skb, headroom)) goto abort_put_sess_tun; /* Setup PPP header */ __skb_push(skb, sizeof(ppph)); skb->data[0] = ppph[0]; skb->data[1] = ppph[1]; local_bh_disable(); l2tp_xmit_skb(session, skb, session->hdr_len); local_bh_enable(); sock_put(sk_tun); sock_put(sk); return 1; abort_put_sess_tun: sock_put(sk_tun); abort_put_sess: sock_put(sk); abort: /* Free the original skb */ kfree_skb(skb); return 1; } /***************************************************************************** * Session (and tunnel control) socket create/destroy. *****************************************************************************/ /* Called by l2tp_core when a session socket is being closed. */ static void pppol2tp_session_close(struct l2tp_session *session) { struct pppol2tp_session *ps = l2tp_session_priv(session); struct sock *sk = ps->sock; struct socket *sock = sk->sk_socket; BUG_ON(session->magic != L2TP_SESSION_MAGIC); if (sock) { inet_shutdown(sock, 2); /* Don't let the session go away before our socket does */ l2tp_session_inc_refcount(session); } return; } /* Really kill the session socket. (Called from sock_put() if * refcnt == 0.) */ static void pppol2tp_session_destruct(struct sock *sk) { struct l2tp_session *session = sk->sk_user_data; if (session) { sk->sk_user_data = NULL; BUG_ON(session->magic != L2TP_SESSION_MAGIC); l2tp_session_dec_refcount(session); } return; } /* Called when the PPPoX socket (session) is closed. */ static int pppol2tp_release(struct socket *sock) { struct sock *sk = sock->sk; struct l2tp_session *session; int error; if (!sk) return 0; error = -EBADF; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD) != 0) goto error; pppox_unbind_sock(sk); /* Signal the death of the socket. */ sk->sk_state = PPPOX_DEAD; sock_orphan(sk); sock->sk = NULL; session = pppol2tp_sock_to_session(sk); /* Purge any queued data */ if (session != NULL) { __l2tp_session_unhash(session); l2tp_session_queue_purge(session); sock_put(sk); } skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); release_sock(sk); /* This will delete the session context via * pppol2tp_session_destruct() if the socket's refcnt drops to * zero. */ sock_put(sk); return 0; error: release_sock(sk); return error; } static struct proto pppol2tp_sk_proto = { .name = "PPPOL2TP", .owner = THIS_MODULE, .obj_size = sizeof(struct pppox_sock), }; static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb) { int rc; rc = l2tp_udp_encap_recv(sk, skb); if (rc) kfree_skb(skb); return NET_RX_SUCCESS; } /* socket() handler. Initialize a new struct sock. */ static int pppol2tp_create(struct net *net, struct socket *sock) { int error = -ENOMEM; struct sock *sk; sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto); if (!sk) goto out; sock_init_data(sock, sk); sock->state = SS_UNCONNECTED; sock->ops = &pppol2tp_ops; sk->sk_backlog_rcv = pppol2tp_backlog_recv; sk->sk_protocol = PX_PROTO_OL2TP; sk->sk_family = PF_PPPOX; sk->sk_state = PPPOX_NONE; sk->sk_type = SOCK_STREAM; sk->sk_destruct = pppol2tp_session_destruct; error = 0; out: return error; } #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) static void pppol2tp_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; struct pppol2tp_session *ps = l2tp_session_priv(session); if (ps) { struct pppox_sock *po = pppox_sk(ps->sock); if (po) seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); } } #endif /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket */ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; struct pppox_sock *po = pppox_sk(sk); struct l2tp_session *session = NULL; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; struct dst_entry *dst; struct l2tp_session_cfg cfg = { 0, }; int error = 0; u32 tunnel_id, peer_tunnel_id; u32 session_id, peer_session_id; int ver = 2; int fd; lock_sock(sk); error = -EINVAL; if (sp->sa_protocol != PX_PROTO_OL2TP) goto end; /* Check for already bound sockets */ error = -EBUSY; if (sk->sk_state & PPPOX_CONNECTED) goto end; /* We don't supporting rebinding anyway */ error = -EALREADY; if (sk->sk_user_data) goto end; /* socket is already attached */ /* Get params from socket address. Handle L2TPv2 and L2TPv3. * This is nasty because there are different sockaddr_pppol2tp * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use * the sockaddr size to determine which structure the caller * is using. */ peer_tunnel_id = 0; if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { fd = sp->pppol2tp.fd; tunnel_id = sp->pppol2tp.s_tunnel; peer_tunnel_id = sp->pppol2tp.d_tunnel; session_id = sp->pppol2tp.s_session; peer_session_id = sp->pppol2tp.d_session; } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) sp; ver = 3; fd = sp3->pppol2tp.fd; tunnel_id = sp3->pppol2tp.s_tunnel; peer_tunnel_id = sp3->pppol2tp.d_tunnel; session_id = sp3->pppol2tp.s_session; peer_session_id = sp3->pppol2tp.d_session; } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) { struct sockaddr_pppol2tpin6 *sp6 = (struct sockaddr_pppol2tpin6 *) sp; fd = sp6->pppol2tp.fd; tunnel_id = sp6->pppol2tp.s_tunnel; peer_tunnel_id = sp6->pppol2tp.d_tunnel; session_id = sp6->pppol2tp.s_session; peer_session_id = sp6->pppol2tp.d_session; } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) { struct sockaddr_pppol2tpv3in6 *sp6 = (struct sockaddr_pppol2tpv3in6 *) sp; ver = 3; fd = sp6->pppol2tp.fd; tunnel_id = sp6->pppol2tp.s_tunnel; peer_tunnel_id = sp6->pppol2tp.d_tunnel; session_id = sp6->pppol2tp.s_session; peer_session_id = sp6->pppol2tp.d_session; } else { error = -EINVAL; goto end; /* bad socket address */ } /* Don't bind if tunnel_id is 0 */ error = -EINVAL; if (tunnel_id == 0) goto end; tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id); /* Special case: create tunnel context if session_id and * peer_session_id is 0. Otherwise look up tunnel using supplied * tunnel id. */ if ((session_id == 0) && (peer_session_id == 0)) { if (tunnel == NULL) { struct l2tp_tunnel_cfg tcfg = { .encap = L2TP_ENCAPTYPE_UDP, .debug = 0, }; error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); if (error < 0) goto end; } } else { /* Error if we can't find the tunnel */ error = -ENOENT; if (tunnel == NULL) goto end; /* Error if socket is not prepped */ if (tunnel->sock == NULL) goto end; } if (tunnel->recv_payload_hook == NULL) tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; if (tunnel->peer_tunnel_id == 0) tunnel->peer_tunnel_id = peer_tunnel_id; /* Create session if it doesn't already exist. We handle the * case where a session was previously created by the netlink * interface by checking that the session doesn't already have * a socket and its tunnel socket are what we expect. If any * of those checks fail, return EEXIST to the caller. */ session = l2tp_session_find(sock_net(sk), tunnel, session_id); if (session == NULL) { /* Default MTU must allow space for UDP/L2TP/PPP * headers. */ cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; /* Allocate and initialize a new session context. */ session = l2tp_session_create(sizeof(struct pppol2tp_session), tunnel, session_id, peer_session_id, &cfg); if (session == NULL) { error = -ENOMEM; goto end; } } else { ps = l2tp_session_priv(session); error = -EEXIST; if (ps->sock != NULL) goto end; /* consistency checks */ if (ps->tunnel_sock != tunnel->sock) goto end; } /* Associate session with its PPPoL2TP socket */ ps = l2tp_session_priv(session); ps->owner = current->pid; ps->sock = sk; ps->tunnel_sock = tunnel->sock; session->recv_skb = pppol2tp_recv; session->session_close = pppol2tp_session_close; #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) session->show = pppol2tp_show; #endif /* We need to know each time a skb is dropped from the reorder * queue. */ session->ref = pppol2tp_session_sock_hold; session->deref = pppol2tp_session_sock_put; /* If PMTU discovery was enabled, use the MTU that was discovered */ dst = sk_dst_get(sk); if (dst != NULL) { u32 pmtu = dst_mtu(__sk_dst_get(sk)); if (pmtu != 0) session->mtu = session->mru = pmtu - PPPOL2TP_HEADER_OVERHEAD; dst_release(dst); } /* Special case: if source & dest session_id == 0x0000, this * socket is being created to manage the tunnel. Just set up * the internal context for use by ioctl() and sockopt() * handlers. */ if ((session->session_id == 0) && (session->peer_session_id == 0)) { error = 0; goto out_no_ppp; } /* The only header we need to worry about is the L2TP * header. This size is different depending on whether * sequence numbers are enabled for the data channel. */ po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; po->chan.private = sk; po->chan.ops = &pppol2tp_chan_ops; po->chan.mtu = session->mtu; error = ppp_register_net_channel(sock_net(sk), &po->chan); if (error) goto end; out_no_ppp: /* This is how we get the session context from the socket. */ sk->sk_user_data = session; sk->sk_state = PPPOX_CONNECTED; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n", session->name); end: release_sock(sk); return error; } #ifdef CONFIG_L2TP_V3 /* Called when creating sessions via the netlink interface. */ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { int error; struct l2tp_tunnel *tunnel; struct l2tp_session *session; struct pppol2tp_session *ps; tunnel = l2tp_tunnel_find(net, tunnel_id); /* Error if we can't find the tunnel */ error = -ENOENT; if (tunnel == NULL) goto out; /* Error if tunnel socket is not prepped */ if (tunnel->sock == NULL) goto out; /* Check that this session doesn't already exist */ error = -EEXIST; session = l2tp_session_find(net, tunnel, session_id); if (session != NULL) goto out; /* Default MTU values. */ if (cfg->mtu == 0) cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; if (cfg->mru == 0) cfg->mru = cfg->mtu; /* Allocate and initialize a new session context. */ error = -ENOMEM; session = l2tp_session_create(sizeof(struct pppol2tp_session), tunnel, session_id, peer_session_id, cfg); if (session == NULL) goto out; ps = l2tp_session_priv(session); ps->tunnel_sock = tunnel->sock; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n", session->name); error = 0; out: return error; } #endif /* CONFIG_L2TP_V3 */ /* getname() support. */ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, int *usockaddr_len, int peer) { int len = 0; int error = 0; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct sock *sk = sock->sk; struct inet_sock *inet; struct pppol2tp_session *pls; error = -ENOTCONN; if (sk == NULL) goto end; if (sk->sk_state != PPPOX_CONNECTED) goto end; error = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto end; pls = l2tp_session_priv(session); tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock); if (tunnel == NULL) { error = -EBADF; goto end_put_sess; } inet = inet_sk(tunnel->sock); if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) { struct sockaddr_pppol2tp sp; len = sizeof(sp); memset(&sp, 0, len); sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_OL2TP; sp.pppol2tp.fd = tunnel->fd; sp.pppol2tp.pid = pls->owner; sp.pppol2tp.s_tunnel = tunnel->tunnel_id; sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; sp.pppol2tp.s_session = session->session_id; sp.pppol2tp.d_session = session->peer_session_id; sp.pppol2tp.addr.sin_family = AF_INET; sp.pppol2tp.addr.sin_port = inet->inet_dport; sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; memcpy(uaddr, &sp, len); #if IS_ENABLED(CONFIG_IPV6) } else if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET6)) { struct ipv6_pinfo *np = inet6_sk(tunnel->sock); struct sockaddr_pppol2tpin6 sp; len = sizeof(sp); memset(&sp, 0, len); sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_OL2TP; sp.pppol2tp.fd = tunnel->fd; sp.pppol2tp.pid = pls->owner; sp.pppol2tp.s_tunnel = tunnel->tunnel_id; sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; sp.pppol2tp.s_session = session->session_id; sp.pppol2tp.d_session = session->peer_session_id; sp.pppol2tp.addr.sin6_family = AF_INET6; sp.pppol2tp.addr.sin6_port = inet->inet_dport; memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr, sizeof(np->daddr)); memcpy(uaddr, &sp, len); } else if ((tunnel->version == 3) && (tunnel->sock->sk_family == AF_INET6)) { struct ipv6_pinfo *np = inet6_sk(tunnel->sock); struct sockaddr_pppol2tpv3in6 sp; len = sizeof(sp); memset(&sp, 0, len); sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_OL2TP; sp.pppol2tp.fd = tunnel->fd; sp.pppol2tp.pid = pls->owner; sp.pppol2tp.s_tunnel = tunnel->tunnel_id; sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; sp.pppol2tp.s_session = session->session_id; sp.pppol2tp.d_session = session->peer_session_id; sp.pppol2tp.addr.sin6_family = AF_INET6; sp.pppol2tp.addr.sin6_port = inet->inet_dport; memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr, sizeof(np->daddr)); memcpy(uaddr, &sp, len); #endif } else if (tunnel->version == 3) { struct sockaddr_pppol2tpv3 sp; len = sizeof(sp); memset(&sp, 0, len); sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_OL2TP; sp.pppol2tp.fd = tunnel->fd; sp.pppol2tp.pid = pls->owner; sp.pppol2tp.s_tunnel = tunnel->tunnel_id; sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; sp.pppol2tp.s_session = session->session_id; sp.pppol2tp.d_session = session->peer_session_id; sp.pppol2tp.addr.sin_family = AF_INET; sp.pppol2tp.addr.sin_port = inet->inet_dport; sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; memcpy(uaddr, &sp, len); } *usockaddr_len = len; sock_put(pls->tunnel_sock); end_put_sess: sock_put(sk); error = 0; end: return error; } /**************************************************************************** * ioctl() handlers. * * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP * sockets. However, in order to control kernel tunnel features, we allow * userspace to create a special "tunnel" PPPoX socket which is used for * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow * the user application to issue L2TP setsockopt(), getsockopt() and ioctl() * calls. ****************************************************************************/ static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, struct l2tp_stats *stats) { dest->tx_packets = atomic_long_read(&stats->tx_packets); dest->tx_bytes = atomic_long_read(&stats->tx_bytes); dest->tx_errors = atomic_long_read(&stats->tx_errors); dest->rx_packets = atomic_long_read(&stats->rx_packets); dest->rx_bytes = atomic_long_read(&stats->rx_bytes); dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards); dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets); dest->rx_errors = atomic_long_read(&stats->rx_errors); } /* Session ioctl helper. */ static int pppol2tp_session_ioctl(struct l2tp_session *session, unsigned int cmd, unsigned long arg) { struct ifreq ifr; int err = 0; struct sock *sk; int val = (int) arg; struct pppol2tp_session *ps = l2tp_session_priv(session); struct l2tp_tunnel *tunnel = session->tunnel; struct pppol2tp_ioc_stats stats; l2tp_dbg(session, PPPOL2TP_MSG_CONTROL, "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", session->name, cmd, arg); sk = ps->sock; sock_hold(sk); switch (cmd) { case SIOCGIFMTU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) break; ifr.ifr_mtu = session->mtu; if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) break; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n", session->name, session->mtu); err = 0; break; case SIOCSIFMTU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) break; session->mtu = ifr.ifr_mtu; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n", session->name, session->mtu); err = 0; break; case PPPIOCGMRU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (put_user(session->mru, (int __user *) arg)) break; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n", session->name, session->mru); err = 0; break; case PPPIOCSMRU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (get_user(val, (int __user *) arg)) break; session->mru = val; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n", session->name, session->mru); err = 0; break; case PPPIOCGFLAGS: err = -EFAULT; if (put_user(ps->flags, (int __user *) arg)) break; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n", session->name, ps->flags); err = 0; break; case PPPIOCSFLAGS: err = -EFAULT; if (get_user(val, (int __user *) arg)) break; ps->flags = val; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n", session->name, ps->flags); err = 0; break; case PPPIOCGL2TPSTATS: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; memset(&stats, 0, sizeof(stats)); stats.tunnel_id = tunnel->tunnel_id; stats.session_id = session->session_id; pppol2tp_copy_stats(&stats, &session->stats); if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) break; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n", session->name); err = 0; break; default: err = -ENOSYS; break; } sock_put(sk); return err; } /* Tunnel ioctl helper. * * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data * specifies a session_id, the session ioctl handler is called. This allows an * application to retrieve session stats via a tunnel socket. */ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, unsigned int cmd, unsigned long arg) { int err = 0; struct sock *sk; struct pppol2tp_ioc_stats stats; l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL, "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name, cmd, arg); sk = tunnel->sock; sock_hold(sk); switch (cmd) { case PPPIOCGL2TPSTATS: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; if (copy_from_user(&stats, (void __user *) arg, sizeof(stats))) { err = -EFAULT; break; } if (stats.session_id != 0) { /* resend to session ioctl handler */ struct l2tp_session *session = l2tp_session_find(sock_net(sk), tunnel, stats.session_id); if (session != NULL) err = pppol2tp_session_ioctl(session, cmd, arg); else err = -EBADR; break; } #ifdef CONFIG_XFRM stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0; #endif pppol2tp_copy_stats(&stats, &tunnel->stats); if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) { err = -EFAULT; break; } l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n", tunnel->name); err = 0; break; default: err = -ENOSYS; break; } sock_put(sk); return err; } /* Main ioctl() handler. * Dispatch to tunnel or session helpers depending on the socket. */ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; int err; if (!sk) return 0; err = -EBADF; if (sock_flag(sk, SOCK_DEAD) != 0) goto end; err = -ENOTCONN; if ((sk->sk_user_data == NULL) || (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)))) goto end; /* Get session context from the socket */ err = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto end; /* Special case: if session's session_id is zero, treat ioctl as a * tunnel ioctl */ ps = l2tp_session_priv(session); if ((session->session_id == 0) && (session->peer_session_id == 0)) { err = -EBADF; tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); if (tunnel == NULL) goto end_put_sess; err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); sock_put(ps->tunnel_sock); goto end_put_sess; } err = pppol2tp_session_ioctl(session, cmd, arg); end_put_sess: sock_put(sk); end: return err; } /***************************************************************************** * setsockopt() / getsockopt() support. * * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP * sockets. In order to control kernel tunnel features, we allow userspace to * create a special "tunnel" PPPoX socket which is used for control only. * Tunnel PPPoX sockets have session_id == 0 and simply allow the user * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls. *****************************************************************************/ /* Tunnel setsockopt() helper. */ static int pppol2tp_tunnel_setsockopt(struct sock *sk, struct l2tp_tunnel *tunnel, int optname, int val) { int err = 0; switch (optname) { case PPPOL2TP_SO_DEBUG: tunnel->debug = val; l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n", tunnel->name, tunnel->debug); break; default: err = -ENOPROTOOPT; break; } return err; } /* Session setsockopt helper. */ static int pppol2tp_session_setsockopt(struct sock *sk, struct l2tp_session *session, int optname, int val) { int err = 0; struct pppol2tp_session *ps = l2tp_session_priv(session); switch (optname) { case PPPOL2TP_SO_RECVSEQ: if ((val != 0) && (val != 1)) { err = -EINVAL; break; } session->recv_seq = val ? -1 : 0; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set recv_seq=%d\n", session->name, session->recv_seq); break; case PPPOL2TP_SO_SENDSEQ: if ((val != 0) && (val != 1)) { err = -EINVAL; break; } session->send_seq = val ? -1 : 0; { struct sock *ssk = ps->sock; struct pppox_sock *po = pppox_sk(ssk); po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; } l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set send_seq=%d\n", session->name, session->send_seq); break; case PPPOL2TP_SO_LNSMODE: if ((val != 0) && (val != 1)) { err = -EINVAL; break; } session->lns_mode = val ? -1 : 0; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set lns_mode=%d\n", session->name, session->lns_mode); break; case PPPOL2TP_SO_DEBUG: session->debug = val; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n", session->name, session->debug); break; case PPPOL2TP_SO_REORDERTO: session->reorder_timeout = msecs_to_jiffies(val); l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout); break; default: err = -ENOPROTOOPT; break; } return err; } /* Main setsockopt() entry point. * Does API checks, then calls either the tunnel or session setsockopt * handler, according to whether the PPPoL2TP socket is a for a regular * session or the special tunnel type. */ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; int val; int err; if (level != SOL_PPPOL2TP) return -EINVAL; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; err = -ENOTCONN; if (sk->sk_user_data == NULL) goto end; /* Get session context from the socket */ err = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto end; /* Special case: if session_id == 0x0000, treat as operation on tunnel */ ps = l2tp_session_priv(session); if ((session->session_id == 0) && (session->peer_session_id == 0)) { err = -EBADF; tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); if (tunnel == NULL) goto end_put_sess; err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); sock_put(ps->tunnel_sock); } else err = pppol2tp_session_setsockopt(sk, session, optname, val); err = 0; end_put_sess: sock_put(sk); end: return err; } /* Tunnel getsockopt helper. Called with sock locked. */ static int pppol2tp_tunnel_getsockopt(struct sock *sk, struct l2tp_tunnel *tunnel, int optname, int *val) { int err = 0; switch (optname) { case PPPOL2TP_SO_DEBUG: *val = tunnel->debug; l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n", tunnel->name, tunnel->debug); break; default: err = -ENOPROTOOPT; break; } return err; } /* Session getsockopt helper. Called with sock locked. */ static int pppol2tp_session_getsockopt(struct sock *sk, struct l2tp_session *session, int optname, int *val) { int err = 0; switch (optname) { case PPPOL2TP_SO_RECVSEQ: *val = session->recv_seq; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get recv_seq=%d\n", session->name, *val); break; case PPPOL2TP_SO_SENDSEQ: *val = session->send_seq; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get send_seq=%d\n", session->name, *val); break; case PPPOL2TP_SO_LNSMODE: *val = session->lns_mode; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get lns_mode=%d\n", session->name, *val); break; case PPPOL2TP_SO_DEBUG: *val = session->debug; l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n", session->name, *val); break; case PPPOL2TP_SO_REORDERTO: *val = (int) jiffies_to_msecs(session->reorder_timeout); l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get reorder_timeout=%d\n", session->name, *val); break; default: err = -ENOPROTOOPT; } return err; } /* Main getsockopt() entry point. * Does API checks, then calls either the tunnel or session getsockopt * handler, according to whether the PPPoX socket is a for a regular session * or the special tunnel type. */ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct l2tp_session *session; struct l2tp_tunnel *tunnel; int val, len; int err; struct pppol2tp_session *ps; if (level != SOL_PPPOL2TP) return -EINVAL; if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; err = -ENOTCONN; if (sk->sk_user_data == NULL) goto end; /* Get the session context */ err = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto end; /* Special case: if session_id == 0x0000, treat as operation on tunnel */ ps = l2tp_session_priv(session); if ((session->session_id == 0) && (session->peer_session_id == 0)) { err = -EBADF; tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); if (tunnel == NULL) goto end_put_sess; err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); sock_put(ps->tunnel_sock); } else err = pppol2tp_session_getsockopt(sk, session, optname, &val); err = -EFAULT; if (put_user(len, optlen)) goto end_put_sess; if (copy_to_user((void __user *) optval, &val, len)) goto end_put_sess; err = 0; end_put_sess: sock_put(sk); end: return err; } /***************************************************************************** * /proc filesystem for debug * Since the original pppol2tp driver provided /proc/net/pppol2tp for * L2TPv2, we dump only L2TPv2 tunnels and sessions here. *****************************************************************************/ static unsigned int pppol2tp_net_id; #ifdef CONFIG_PROC_FS struct pppol2tp_seq_data { struct seq_net_private p; int tunnel_idx; /* current tunnel */ int session_idx; /* index of session within current tunnel */ struct l2tp_tunnel *tunnel; struct l2tp_session *session; /* NULL means get next tunnel */ }; static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) { for (;;) { pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); pd->tunnel_idx++; if (pd->tunnel == NULL) break; /* Ignore L2TPv3 tunnels */ if (pd->tunnel->version < 3) break; } } static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) { pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); pd->session_idx++; if (pd->session == NULL) { pd->session_idx = 0; pppol2tp_next_tunnel(net, pd); } } static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) { struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; loff_t pos = *offs; struct net *net; if (!pos) goto out; BUG_ON(m->private == NULL); pd = m->private; net = seq_file_net(m); if (pd->tunnel == NULL) pppol2tp_next_tunnel(net, pd); else pppol2tp_next_session(net, pd); /* NULL tunnel and session indicates end of list */ if ((pd->tunnel == NULL) && (pd->session == NULL)) pd = NULL; out: return pd; } static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; return NULL; } static void pppol2tp_seq_stop(struct seq_file *p, void *v) { /* nothing to do */ } static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) { struct l2tp_tunnel *tunnel = v; seq_printf(m, "\nTUNNEL '%s', %c %d\n", tunnel->name, (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', atomic_read(&tunnel->ref_count) - 1); seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n", tunnel->debug, atomic_long_read(&tunnel->stats.tx_packets), atomic_long_read(&tunnel->stats.tx_bytes), atomic_long_read(&tunnel->stats.tx_errors), atomic_long_read(&tunnel->stats.rx_packets), atomic_long_read(&tunnel->stats.rx_bytes), atomic_long_read(&tunnel->stats.rx_errors)); } static void pppol2tp_seq_session_show(struct seq_file *m, void *v) { struct l2tp_session *session = v; struct l2tp_tunnel *tunnel = session->tunnel; struct pppol2tp_session *ps = l2tp_session_priv(session); struct pppox_sock *po = pppox_sk(ps->sock); u32 ip = 0; u16 port = 0; if (tunnel->sock) { struct inet_sock *inet = inet_sk(tunnel->sock); ip = ntohl(inet->inet_saddr); port = ntohs(inet->inet_sport); } seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " "%04X/%04X %d %c\n", session->name, ip, port, tunnel->tunnel_id, session->session_id, tunnel->peer_tunnel_id, session->peer_session_id, ps->sock->sk_state, (session == ps->sock->sk_user_data) ? 'Y' : 'N'); seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", session->mtu, session->mru, session->recv_seq ? 'R' : '-', session->send_seq ? 'S' : '-', session->lns_mode ? "LNS" : "LAC", session->debug, jiffies_to_msecs(session->reorder_timeout)); seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n", session->nr, session->ns, atomic_long_read(&session->stats.tx_packets), atomic_long_read(&session->stats.tx_bytes), atomic_long_read(&session->stats.tx_errors), atomic_long_read(&session->stats.rx_packets), atomic_long_read(&session->stats.rx_bytes), atomic_long_read(&session->stats.rx_errors)); if (po) seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); } static int pppol2tp_seq_show(struct seq_file *m, void *v) { struct pppol2tp_seq_data *pd = v; /* display header on line 1 */ if (v == SEQ_START_TOKEN) { seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n"); seq_puts(m, "TUNNEL name, user-data-ok session-count\n"); seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); seq_puts(m, " SESSION name, addr/port src-tid/sid " "dest-tid/sid state user-data-ok\n"); seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n"); seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); goto out; } /* Show the tunnel or session context. */ if (pd->session == NULL) pppol2tp_seq_tunnel_show(m, pd->tunnel); else pppol2tp_seq_session_show(m, pd->session); out: return 0; } static const struct seq_operations pppol2tp_seq_ops = { .start = pppol2tp_seq_start, .next = pppol2tp_seq_next, .stop = pppol2tp_seq_stop, .show = pppol2tp_seq_show, }; /* Called when our /proc file is opened. We allocate data for use when * iterating our tunnel / session contexts and store it in the private * data of the seq_file. */ static int pppol2tp_proc_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &pppol2tp_seq_ops, sizeof(struct pppol2tp_seq_data)); } static const struct file_operations pppol2tp_proc_fops = { .owner = THIS_MODULE, .open = pppol2tp_proc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif /* CONFIG_PROC_FS */ /***************************************************************************** * Network namespace *****************************************************************************/ static __net_init int pppol2tp_init_net(struct net *net) { struct proc_dir_entry *pde; int err = 0; pde = proc_create("pppol2tp", S_IRUGO, net->proc_net, &pppol2tp_proc_fops); if (!pde) { err = -ENOMEM; goto out; } out: return err; } static __net_exit void pppol2tp_exit_net(struct net *net) { remove_proc_entry("pppol2tp", net->proc_net); } static struct pernet_operations pppol2tp_net_ops = { .init = pppol2tp_init_net, .exit = pppol2tp_exit_net, .id = &pppol2tp_net_id, }; /***************************************************************************** * Init and cleanup *****************************************************************************/ static const struct proto_ops pppol2tp_ops = { .family = AF_PPPOX, .owner = THIS_MODULE, .release = pppol2tp_release, .bind = sock_no_bind, .connect = pppol2tp_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pppol2tp_getname, .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = pppol2tp_setsockopt, .getsockopt = pppol2tp_getsockopt, .sendmsg = pppol2tp_sendmsg, .recvmsg = pppol2tp_recvmsg, .mmap = sock_no_mmap, .ioctl = pppox_ioctl, }; static const struct pppox_proto pppol2tp_proto = { .create = pppol2tp_create, .ioctl = pppol2tp_ioctl, .owner = THIS_MODULE, }; #ifdef CONFIG_L2TP_V3 static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { .session_create = pppol2tp_session_create, .session_delete = l2tp_session_delete, }; #endif /* CONFIG_L2TP_V3 */ static int __init pppol2tp_init(void) { int err; err = register_pernet_device(&pppol2tp_net_ops); if (err) goto out; err = proto_register(&pppol2tp_sk_proto, 0); if (err) goto out_unregister_pppol2tp_pernet; err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto); if (err) goto out_unregister_pppol2tp_proto; #ifdef CONFIG_L2TP_V3 err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops); if (err) goto out_unregister_pppox; #endif pr_info("PPPoL2TP kernel driver, %s\n", PPPOL2TP_DRV_VERSION); out: return err; #ifdef CONFIG_L2TP_V3 out_unregister_pppox: unregister_pppox_proto(PX_PROTO_OL2TP); #endif out_unregister_pppol2tp_proto: proto_unregister(&pppol2tp_sk_proto); out_unregister_pppol2tp_pernet: unregister_pernet_device(&pppol2tp_net_ops); goto out; } static void __exit pppol2tp_exit(void) { #ifdef CONFIG_L2TP_V3 l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP); #endif unregister_pppox_proto(PX_PROTO_OL2TP); proto_unregister(&pppol2tp_sk_proto); unregister_pernet_device(&pppol2tp_net_ops); } module_init(pppol2tp_init); module_exit(pppol2tp_exit); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("PPP over L2TP over UDP"); MODULE_LICENSE("GPL"); MODULE_VERSION(PPPOL2TP_DRV_VERSION); MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP));
gpl-2.0
ccyrowski/cm-kernel
drivers/gpu/drm/mga/mga_state.c
216
29248
/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*- * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Jeff Hartmann <jhartmann@valinux.com> * Keith Whitwell <keith@tungstengraphics.com> * * Rewritten by: * Gareth Hughes <gareth@valinux.com> */ #include "drmP.h" #include "drm.h" #include "mga_drm.h" #include "mga_drv.h" /* ================================================================ * DMA hardware state programming functions */ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv, struct drm_clip_rect * box) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; unsigned int pitch = dev_priv->front_pitch; DMA_LOCALS; BEGIN_DMA(2); /* Force reset of DWGCTL on G400 (eliminates clip disable bit). */ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) { DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl, MGA_LEN + MGA_EXEC, 0x80000000, MGA_DWGCTL, ctx->dwgctl, MGA_LEN + MGA_EXEC, 0x80000000); } DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1, MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch); ADVANCE_DMA(); } static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; DMA_LOCALS; BEGIN_DMA(3); DMA_BLOCK(MGA_DSTORG, ctx->dstorg, MGA_MACCESS, ctx->maccess, MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, MGA_FOGCOL, ctx->fogcolor, MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset); DMA_BLOCK(MGA_FCOL, ctx->fcol, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ADVANCE_DMA(); } static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; DMA_LOCALS; BEGIN_DMA(4); DMA_BLOCK(MGA_DSTORG, ctx->dstorg, MGA_MACCESS, ctx->maccess, MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, MGA_FOGCOL, ctx->fogcolor, MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset); DMA_BLOCK(MGA_WFLAG1, ctx->wflag, MGA_TDUALSTAGE0, ctx->tdualstage0, MGA_TDUALSTAGE1, ctx->tdualstage1, MGA_FCOL, ctx->fcol); DMA_BLOCK(MGA_STENCIL, ctx->stencil, MGA_STENCILCTL, ctx->stencilctl, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ADVANCE_DMA(); } static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; DMA_LOCALS; BEGIN_DMA(4); DMA_BLOCK(MGA_TEXCTL2, tex->texctl2, MGA_TEXCTL, tex->texctl, MGA_TEXFILTER, tex->texfilter, MGA_TEXBORDERCOL, tex->texbordercol); DMA_BLOCK(MGA_TEXORG, tex->texorg, MGA_TEXORG1, tex->texorg1, MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3); DMA_BLOCK(MGA_TEXORG4, tex->texorg4, MGA_TEXWIDTH, tex->texwidth, MGA_TEXHEIGHT, tex->texheight, MGA_WR24, tex->texwidth); DMA_BLOCK(MGA_WR34, tex->texheight, MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff, MGA_DMAPAD, 0x00000000); ADVANCE_DMA(); } static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; DMA_LOCALS; /* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */ /* tex->texctl, tex->texctl2); */ BEGIN_DMA(6); DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC, MGA_TEXCTL, tex->texctl, MGA_TEXFILTER, tex->texfilter, MGA_TEXBORDERCOL, tex->texbordercol); DMA_BLOCK(MGA_TEXORG, tex->texorg, MGA_TEXORG1, tex->texorg1, MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3); DMA_BLOCK(MGA_TEXORG4, tex->texorg4, MGA_TEXWIDTH, tex->texwidth, MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000); DMA_BLOCK(MGA_WR57, 0x00000000, MGA_WR53, 0x00000000, MGA_WR61, 0x00000000, MGA_WR52, MGA_G400_WR_MAGIC); DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC, MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC, MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC, MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff); ADVANCE_DMA(); } static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1]; DMA_LOCALS; /* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */ /* tex->texctl, tex->texctl2); */ BEGIN_DMA(5); DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 | MGA_MAP1_ENABLE | MGA_G400_TC2_MAGIC), MGA_TEXCTL, tex->texctl, MGA_TEXFILTER, tex->texfilter, MGA_TEXBORDERCOL, tex->texbordercol); DMA_BLOCK(MGA_TEXORG, tex->texorg, MGA_TEXORG1, tex->texorg1, MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3); DMA_BLOCK(MGA_TEXORG4, tex->texorg4, MGA_TEXWIDTH, tex->texwidth, MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000); DMA_BLOCK(MGA_WR57, 0x00000000, MGA_WR53, 0x00000000, MGA_WR61, 0x00000000, MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC); DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC, MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff, MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC); ADVANCE_DMA(); } static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int pipe = sarea_priv->warp_pipe; DMA_LOCALS; BEGIN_DMA(3); DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND, MGA_WVRTXSZ, 0x00000007, MGA_WFLAG, 0x00000000, MGA_WR24, 0x00000000); DMA_BLOCK(MGA_WR25, 0x00000100, MGA_WR34, 0x00000000, MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff); /* Padding required to to hardware bug. */ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | MGA_WMODE_START | dev_priv->wagp_enable)); ADVANCE_DMA(); } static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int pipe = sarea_priv->warp_pipe; DMA_LOCALS; /* printk("mga_g400_emit_pipe %x\n", pipe); */ BEGIN_DMA(10); DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); if (pipe & MGA_T2) { DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x1e000000); } else { if (dev_priv->warp_pipe & MGA_T2) { /* Flush the WARP pipe */ DMA_BLOCK(MGA_YDST, 0x00000000, MGA_FXLEFT, 0x00000000, MGA_FXRIGHT, 0x00000001, MGA_DWGCTL, MGA_DWGCTL_FLUSH); DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001, MGA_DWGSYNC, 0x00007000, MGA_TEXCTL2, MGA_G400_TC2_MAGIC, MGA_LEN + MGA_EXEC, 0x00000000); DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX | MGA_G400_TC2_MAGIC), MGA_LEN + MGA_EXEC, 0x00000000, MGA_TEXCTL2, MGA_G400_TC2_MAGIC, MGA_DMAPAD, 0x00000000); } DMA_BLOCK(MGA_WVRTXSZ, 0x00001807, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x18000000); } DMA_BLOCK(MGA_WFLAG, 0x00000000, MGA_WFLAG1, 0x00000000, MGA_WR56, MGA_G400_WR56_MAGIC, MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0 */ MGA_WR57, 0x00000000, /* tex0 */ MGA_WR53, 0x00000000, /* tex1 */ MGA_WR61, 0x00000000); /* tex1 */ DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC, /* tex0 width */ MGA_WR62, MGA_G400_WR_MAGIC, /* tex0 height */ MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */ MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */ /* Padding required to to hardware bug */ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | MGA_WMODE_START | dev_priv->wagp_enable)); ADVANCE_DMA(); } static void mga_g200_emit_state(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; if (sarea_priv->warp_pipe != dev_priv->warp_pipe) { mga_g200_emit_pipe(dev_priv); dev_priv->warp_pipe = sarea_priv->warp_pipe; } if (dirty & MGA_UPLOAD_CONTEXT) { mga_g200_emit_context(dev_priv); sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT; } if (dirty & MGA_UPLOAD_TEX0) { mga_g200_emit_tex0(dev_priv); sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; } } static void mga_g400_emit_state(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; int multitex = sarea_priv->warp_pipe & MGA_T2; if (sarea_priv->warp_pipe != dev_priv->warp_pipe) { mga_g400_emit_pipe(dev_priv); dev_priv->warp_pipe = sarea_priv->warp_pipe; } if (dirty & MGA_UPLOAD_CONTEXT) { mga_g400_emit_context(dev_priv); sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT; } if (dirty & MGA_UPLOAD_TEX0) { mga_g400_emit_tex0(dev_priv); sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; } if ((dirty & MGA_UPLOAD_TEX1) && multitex) { mga_g400_emit_tex1(dev_priv); sarea_priv->dirty &= ~MGA_UPLOAD_TEX1; } } /* ================================================================ * SAREA state verification */ /* Disallow all write destinations except the front and backbuffer. */ static int mga_verify_context(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; if (ctx->dstorg != dev_priv->front_offset && ctx->dstorg != dev_priv->back_offset) { DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n", ctx->dstorg, dev_priv->front_offset, dev_priv->back_offset); ctx->dstorg = 0; return -EINVAL; } return 0; } /* Disallow texture reads from PCI space. */ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit]; unsigned int org; org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK); if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) { DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit); tex->texorg = 0; return -EINVAL; } return 0; } static int mga_verify_state(drm_mga_private_t * dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; int ret = 0; if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; if (dirty & MGA_UPLOAD_CONTEXT) ret |= mga_verify_context(dev_priv); if (dirty & MGA_UPLOAD_TEX0) ret |= mga_verify_tex(dev_priv, 0); if (dev_priv->chipset >= MGA_CARD_TYPE_G400) { if (dirty & MGA_UPLOAD_TEX1) ret |= mga_verify_tex(dev_priv, 1); if (dirty & MGA_UPLOAD_PIPE) ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES); } else { if (dirty & MGA_UPLOAD_PIPE) ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES); } return (ret == 0); } static int mga_verify_iload(drm_mga_private_t * dev_priv, unsigned int dstorg, unsigned int length) { if (dstorg < dev_priv->texture_offset || dstorg + length > (dev_priv->texture_offset + dev_priv->texture_size)) { DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg); return -EINVAL; } if (length & MGA_ILOAD_MASK) { DRM_ERROR("*** bad iload length: 0x%x\n", length & MGA_ILOAD_MASK); return -EINVAL; } return 0; } static int mga_verify_blit(drm_mga_private_t * dev_priv, unsigned int srcorg, unsigned int dstorg) { if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) { DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg); return -EINVAL; } return 0; } /* ================================================================ * */ static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; int i; DMA_LOCALS; DRM_DEBUG("\n"); BEGIN_DMA(1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); ADVANCE_DMA(); for (i = 0; i < nbox; i++) { struct drm_clip_rect *box = &pbox[i]; u32 height = box->y2 - box->y1; DRM_DEBUG(" from=%d,%d to=%d,%d\n", box->x1, box->y1, box->x2, box->y2); if (clear->flags & MGA_FRONT) { BEGIN_DMA(2); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, clear->color_mask, MGA_YDSTLEN, (box->y1 << 16) | height, MGA_FXBNDRY, (box->x2 << 16) | box->x1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_FCOL, clear->clear_color, MGA_DSTORG, dev_priv->front_offset, MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ADVANCE_DMA(); } if (clear->flags & MGA_BACK) { BEGIN_DMA(2); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, clear->color_mask, MGA_YDSTLEN, (box->y1 << 16) | height, MGA_FXBNDRY, (box->x2 << 16) | box->x1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_FCOL, clear->clear_color, MGA_DSTORG, dev_priv->back_offset, MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ADVANCE_DMA(); } if (clear->flags & MGA_DEPTH) { BEGIN_DMA(2); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, clear->depth_mask, MGA_YDSTLEN, (box->y1 << 16) | height, MGA_FXBNDRY, (box->x2 << 16) | box->x1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_FCOL, clear->clear_depth, MGA_DSTORG, dev_priv->depth_offset, MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ADVANCE_DMA(); } } BEGIN_DMA(1); /* Force reset of DWGCTL */ DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); ADVANCE_DMA(); FLUSH_DMA(); } static void mga_dma_dispatch_swap(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; int i; DMA_LOCALS; DRM_DEBUG("\n"); sarea_priv->last_frame.head = dev_priv->prim.tail; sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap; BEGIN_DMA(4 + nbox); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset, MGA_MACCESS, dev_priv->maccess, MGA_SRCORG, dev_priv->back_offset, MGA_AR5, dev_priv->front_pitch); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY); for (i = 0; i < nbox; i++) { struct drm_clip_rect *box = &pbox[i]; u32 height = box->y2 - box->y1; u32 start = box->y1 * dev_priv->front_pitch; DRM_DEBUG(" from=%d,%d to=%d,%d\n", box->x1, box->y1, box->x2, box->y2); DMA_BLOCK(MGA_AR0, start + box->x2 - 1, MGA_AR3, start + box->x1, MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1, MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height); } DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, ctx->plnwt, MGA_SRCORG, dev_priv->front_offset, MGA_DWGCTL, ctx->dwgctl); ADVANCE_DMA(); FLUSH_DMA(); DRM_DEBUG("... done.\n"); } static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; u32 address = (u32) buf->bus_address; u32 length = (u32) buf->used; int i = 0; DMA_LOCALS; DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used); if (buf->used) { buf_priv->dispatched = 1; MGA_EMIT_STATE(dev_priv, sarea_priv->dirty); do { if (i < sarea_priv->nbox) { mga_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); } BEGIN_DMA(1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_SECADDRESS, (address | MGA_DMA_VERTEX), MGA_SECEND, ((address + length) | dev_priv->dma_access)); ADVANCE_DMA(); } while (++i < sarea_priv->nbox); } if (buf_priv->discard) { AGE_BUFFER(buf_priv); buf->pending = 0; buf->used = 0; buf_priv->dispatched = 0; mga_freelist_put(dev, buf); } FLUSH_DMA(); } static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf, unsigned int start, unsigned int end) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; u32 address = (u32) buf->bus_address; int i = 0; DMA_LOCALS; DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end); if (start != end) { buf_priv->dispatched = 1; MGA_EMIT_STATE(dev_priv, sarea_priv->dirty); do { if (i < sarea_priv->nbox) { mga_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); } BEGIN_DMA(1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_SETUPADDRESS, address + start, MGA_SETUPEND, ((address + end) | dev_priv->dma_access)); ADVANCE_DMA(); } while (++i < sarea_priv->nbox); } if (buf_priv->discard) { AGE_BUFFER(buf_priv); buf->pending = 0; buf->used = 0; buf_priv->dispatched = 0; mga_freelist_put(dev, buf); } FLUSH_DMA(); } /* This copies a 64 byte aligned agp region to the frambuffer with a * standard blit, the ioctl needs to do checking. */ static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf, unsigned int dstorg, unsigned int length) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM; u32 y2; DMA_LOCALS; DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used); y2 = length / 64; BEGIN_DMA(5); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); DMA_BLOCK(MGA_DSTORG, dstorg, MGA_MACCESS, 0x00000000, MGA_SRCORG, srcorg, MGA_AR5, 64); DMA_BLOCK(MGA_PITCH, 64, MGA_PLNWT, 0xffffffff, MGA_DMAPAD, 0x00000000, MGA_DWGCTL, MGA_DWGCTL_COPY); DMA_BLOCK(MGA_AR0, 63, MGA_AR3, 0, MGA_FXBNDRY, (63 << 16) | 0, MGA_YDSTLEN + MGA_EXEC, y2); DMA_BLOCK(MGA_PLNWT, ctx->plnwt, MGA_SRCORG, dev_priv->front_offset, MGA_PITCH, dev_priv->front_pitch, MGA_DWGSYNC, 0x00007000); ADVANCE_DMA(); AGE_BUFFER(buf_priv); buf->pending = 0; buf->used = 0; buf_priv->dispatched = 0; mga_freelist_put(dev, buf); FLUSH_DMA(); } static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; u32 scandir = 0, i; DMA_LOCALS; DRM_DEBUG("\n"); BEGIN_DMA(4 + nbox); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY, MGA_PLNWT, blit->planemask, MGA_SRCORG, blit->srcorg, MGA_DSTORG, blit->dstorg); DMA_BLOCK(MGA_SGN, scandir, MGA_MACCESS, dev_priv->maccess, MGA_AR5, blit->ydir * blit->src_pitch, MGA_PITCH, blit->dst_pitch); for (i = 0; i < nbox; i++) { int srcx = pbox[i].x1 + blit->delta_sx; int srcy = pbox[i].y1 + blit->delta_sy; int dstx = pbox[i].x1 + blit->delta_dx; int dsty = pbox[i].y1 + blit->delta_dy; int h = pbox[i].y2 - pbox[i].y1; int w = pbox[i].x2 - pbox[i].x1 - 1; int start; if (blit->ydir == -1) { srcy = blit->height - srcy - 1; } start = srcy * blit->src_pitch + srcx; DMA_BLOCK(MGA_AR0, start + w, MGA_AR3, start, MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff), MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h); } /* Do something to flush AGP? */ /* Force reset of DWGCTL */ DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, ctx->plnwt, MGA_PITCH, dev_priv->front_pitch, MGA_DWGCTL, ctx->dwgctl); ADVANCE_DMA(); } /* ================================================================ * */ static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_clear_t *clear = data; LOCK_TEST_WITH_RETURN(dev, file_priv); if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_clear(dev, clear); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; return 0; } static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; LOCK_TEST_WITH_RETURN(dev, file_priv); if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_swap(dev); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; return 0; } static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_vertex_t *vertex = data; LOCK_TEST_WITH_RETURN(dev, file_priv); if (vertex->idx < 0 || vertex->idx > dma->buf_count) return -EINVAL; buf = dma->buflist[vertex->idx]; buf_priv = buf->dev_private; buf->used = vertex->used; buf_priv->discard = vertex->discard; if (!mga_verify_state(dev_priv)) { if (vertex->discard) { if (buf_priv->dispatched == 1) AGE_BUFFER(buf_priv); buf_priv->dispatched = 0; mga_freelist_put(dev, buf); } return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_vertex(dev, buf); return 0; } static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_indices_t *indices = data; LOCK_TEST_WITH_RETURN(dev, file_priv); if (indices->idx < 0 || indices->idx > dma->buf_count) return -EINVAL; buf = dma->buflist[indices->idx]; buf_priv = buf->dev_private; buf_priv->discard = indices->discard; if (!mga_verify_state(dev_priv)) { if (indices->discard) { if (buf_priv->dispatched == 1) AGE_BUFFER(buf_priv); buf_priv->dispatched = 0; mga_freelist_put(dev, buf); } return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_indices(dev, buf, indices->start, indices->end); return 0; } static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = dev->dev_private; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_iload_t *iload = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); #if 0 if (mga_do_wait_for_idle(dev_priv) < 0) { if (MGA_DMA_DEBUG) DRM_INFO("-EBUSY\n"); return -EBUSY; } #endif if (iload->idx < 0 || iload->idx > dma->buf_count) return -EINVAL; buf = dma->buflist[iload->idx]; buf_priv = buf->dev_private; if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) { mga_freelist_put(dev, buf); return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; return 0; } static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_blit_t *blit = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg)) return -EINVAL; WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_blit(dev, blit); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; return 0; } static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_getparam_t *param = data; int value; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); switch (param->param) { case MGA_PARAM_IRQ_NR: value = drm_dev_to_irq(dev); break; case MGA_PARAM_CARD_TYPE: value = dev_priv->chipset; break; default: return -EINVAL; } if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } return 0; } static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; u32 *fence = data; DMA_LOCALS; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); /* I would normal do this assignment in the declaration of fence, * but dev_priv may be NULL. */ *fence = dev_priv->next_fence_to_post; dev_priv->next_fence_to_post++; BEGIN_DMA(1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000); ADVANCE_DMA(); return 0; } static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file * file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; u32 *fence = data; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); mga_driver_fence_wait(dev, fence); return 0; } struct drm_ioctl_desc mga_ioctls[] = { DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), }; int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
gpl-2.0
DarkforestGroup/sony-kernel-msm7x30-ics
arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
1496
3330
/* * arch/arm/mach-orion5x/rd88f6183-ap-ge-setup.c * * Marvell Orion-1-90 AP GE Reference Design Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/spi/spi.h> #include <linux/spi/orion_spi.h> #include <linux/spi/flash.h> #include <linux/ethtool.h> #include <net/dsa.h> #include <asm/mach-types.h> #include <asm/gpio.h> #include <asm/leds.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" static struct mv643xx_eth_platform_data rd88f6183ap_ge_eth_data = { .phy_addr = -1, .speed = SPEED_1000, .duplex = DUPLEX_FULL, }; static struct dsa_chip_data rd88f6183ap_ge_switch_chip_data = { .port_names[0] = "lan1", .port_names[1] = "lan2", .port_names[2] = "lan3", .port_names[3] = "lan4", .port_names[4] = "wan", .port_names[5] = "cpu", }; static struct dsa_platform_data rd88f6183ap_ge_switch_plat_data = { .nr_chips = 1, .chip = &rd88f6183ap_ge_switch_chip_data, }; static struct mtd_partition rd88f6183ap_ge_partitions[] = { { .name = "kernel", .offset = 0x00000000, .size = 0x00200000, }, { .name = "rootfs", .offset = 0x00200000, .size = 0x00500000, }, { .name = "nvram", .offset = 0x00700000, .size = 0x00080000, }, }; static struct flash_platform_data rd88f6183ap_ge_spi_slave_data = { .type = "m25p64", .nr_parts = ARRAY_SIZE(rd88f6183ap_ge_partitions), .parts = rd88f6183ap_ge_partitions, }; static struct spi_board_info __initdata rd88f6183ap_ge_spi_slave_info[] = { { .modalias = "m25p80", .platform_data = &rd88f6183ap_ge_spi_slave_data, .irq = NO_IRQ, .max_speed_hz = 20000000, .bus_num = 0, .chip_select = 0, }, }; static void __init rd88f6183ap_ge_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_eth_init(&rd88f6183ap_ge_eth_data); orion5x_eth_switch_init(&rd88f6183ap_ge_switch_plat_data, gpio_to_irq(3)); spi_register_board_info(rd88f6183ap_ge_spi_slave_info, ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info)); orion5x_spi_init(); orion5x_uart0_init(); } static struct hw_pci rd88f6183ap_ge_pci __initdata = { .nr_controllers = 2, .swizzle = pci_std_swizzle, .setup = orion5x_pci_sys_setup, .scan = orion5x_pci_sys_scan_bus, .map_irq = orion5x_pci_map_irq, }; static int __init rd88f6183ap_ge_pci_init(void) { if (machine_is_rd88f6183ap_ge()) { orion5x_pci_disable(); pci_common_init(&rd88f6183ap_ge_pci); } return 0; } subsys_initcall(rd88f6183ap_ge_pci_init); MACHINE_START(RD88F6183AP_GE, "Marvell Orion-1-90 AP GE Reference Design") /* Maintainer: Lennert Buytenhek <buytenh@marvell.com> */ .phys_io = ORION5X_REGS_PHYS_BASE, .io_pg_offst = ((ORION5X_REGS_VIRT_BASE) >> 18) & 0xFFFC, .boot_params = 0x00000100, .init_machine = rd88f6183ap_ge_init, .map_io = orion5x_map_io, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, MACHINE_END
gpl-2.0
estiko/android_kernel_cyanogen_msm8916
drivers/media/platform/s5p-g2d/g2d.c
2008
21504
/* * Samsung S5P G2D - 2D Graphics Accelerator Driver * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Kamil Debski, <k.debski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the * License, or (at your option) any later version */ #include <linux/module.h> #include <linux/fs.h> #include <linux/version.h> #include <linux/timer.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/platform_device.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> #include "g2d.h" #include "g2d-regs.h" #define fh2ctx(__fh) container_of(__fh, struct g2d_ctx, fh) static struct g2d_fmt formats[] = { { .name = "XRGB_8888", .fourcc = V4L2_PIX_FMT_RGB32, .depth = 32, .hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_8888), }, { .name = "RGB_565", .fourcc = V4L2_PIX_FMT_RGB565X, .depth = 16, .hw = COLOR_MODE(ORDER_XRGB, MODE_RGB_565), }, { .name = "XRGB_1555", .fourcc = V4L2_PIX_FMT_RGB555X, .depth = 16, .hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_1555), }, { .name = "XRGB_4444", .fourcc = V4L2_PIX_FMT_RGB444, .depth = 16, .hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_4444), }, { .name = "PACKED_RGB_888", .fourcc = V4L2_PIX_FMT_RGB24, .depth = 24, .hw = COLOR_MODE(ORDER_XRGB, MODE_PACKED_RGB_888), }, }; #define NUM_FORMATS ARRAY_SIZE(formats) static struct g2d_frame def_frame = { .width = DEFAULT_WIDTH, .height = DEFAULT_HEIGHT, .c_width = DEFAULT_WIDTH, .c_height = DEFAULT_HEIGHT, .o_width = 0, .o_height = 0, .fmt = &formats[0], .right = DEFAULT_WIDTH, .bottom = DEFAULT_HEIGHT, }; static struct g2d_fmt *find_fmt(struct v4l2_format *f) { unsigned int i; for (i = 0; i < NUM_FORMATS; i++) { if (formats[i].fourcc == f->fmt.pix.pixelformat) return &formats[i]; } return NULL; } static struct g2d_frame *get_frame(struct g2d_ctx *ctx, enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: return &ctx->in; case V4L2_BUF_TYPE_VIDEO_CAPTURE: return &ctx->out; default: return ERR_PTR(-EINVAL); } } static int g2d_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct g2d_ctx *ctx = vb2_get_drv_priv(vq); struct g2d_frame *f = get_frame(ctx, vq->type); if (IS_ERR(f)) return PTR_ERR(f); sizes[0] = f->size; *nplanes = 1; alloc_ctxs[0] = ctx->dev->alloc_ctx; if (*nbuffers == 0) *nbuffers = 1; return 0; } static int g2d_buf_prepare(struct vb2_buffer *vb) { struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct g2d_frame *f = get_frame(ctx, vb->vb2_queue->type); if (IS_ERR(f)) return PTR_ERR(f); vb2_set_plane_payload(vb, 0, f->size); return 0; } static void g2d_buf_queue(struct vb2_buffer *vb) { struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); } static struct vb2_ops g2d_qops = { .queue_setup = g2d_queue_setup, .buf_prepare = g2d_buf_prepare, .buf_queue = g2d_buf_queue, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct g2d_ctx *ctx = priv; int ret; src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP | VB2_USERPTR; src_vq->drv_priv = ctx; src_vq->ops = &g2d_qops; src_vq->mem_ops = &vb2_dma_contig_memops; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; ret = vb2_queue_init(src_vq); if (ret) return ret; dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP | VB2_USERPTR; dst_vq->drv_priv = ctx; dst_vq->ops = &g2d_qops; dst_vq->mem_ops = &vb2_dma_contig_memops; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; return vb2_queue_init(dst_vq); } static int g2d_s_ctrl(struct v4l2_ctrl *ctrl) { struct g2d_ctx *ctx = container_of(ctrl->handler, struct g2d_ctx, ctrl_handler); unsigned long flags; spin_lock_irqsave(&ctx->dev->ctrl_lock, flags); switch (ctrl->id) { case V4L2_CID_COLORFX: if (ctrl->val == V4L2_COLORFX_NEGATIVE) ctx->rop = ROP4_INVERT; else ctx->rop = ROP4_COPY; break; case V4L2_CID_HFLIP: ctx->flip = ctx->ctrl_hflip->val | (ctx->ctrl_vflip->val << 1); break; } spin_unlock_irqrestore(&ctx->dev->ctrl_lock, flags); return 0; } static const struct v4l2_ctrl_ops g2d_ctrl_ops = { .s_ctrl = g2d_s_ctrl, }; static int g2d_setup_ctrls(struct g2d_ctx *ctx) { struct g2d_dev *dev = ctx->dev; v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3); ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &g2d_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &g2d_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std_menu( &ctx->ctrl_handler, &g2d_ctrl_ops, V4L2_CID_COLORFX, V4L2_COLORFX_NEGATIVE, ~((1 << V4L2_COLORFX_NONE) | (1 << V4L2_COLORFX_NEGATIVE)), V4L2_COLORFX_NONE); if (ctx->ctrl_handler.error) { int err = ctx->ctrl_handler.error; v4l2_err(&dev->v4l2_dev, "g2d_setup_ctrls failed\n"); v4l2_ctrl_handler_free(&ctx->ctrl_handler); return err; } v4l2_ctrl_cluster(2, &ctx->ctrl_hflip); return 0; } static int g2d_open(struct file *file) { struct g2d_dev *dev = video_drvdata(file); struct g2d_ctx *ctx = NULL; int ret = 0; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; /* Set default formats */ ctx->in = def_frame; ctx->out = def_frame; if (mutex_lock_interruptible(&dev->mutex)) { kfree(ctx); return -ERESTARTSYS; } ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); if (IS_ERR(ctx->m2m_ctx)) { ret = PTR_ERR(ctx->m2m_ctx); mutex_unlock(&dev->mutex); kfree(ctx); return ret; } v4l2_fh_init(&ctx->fh, video_devdata(file)); file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); g2d_setup_ctrls(ctx); /* Write the default values to the ctx struct */ v4l2_ctrl_handler_setup(&ctx->ctrl_handler); ctx->fh.ctrl_handler = &ctx->ctrl_handler; mutex_unlock(&dev->mutex); v4l2_info(&dev->v4l2_dev, "instance opened\n"); return 0; } static int g2d_release(struct file *file) { struct g2d_dev *dev = video_drvdata(file); struct g2d_ctx *ctx = fh2ctx(file->private_data); v4l2_ctrl_handler_free(&ctx->ctrl_handler); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); v4l2_info(&dev->v4l2_dev, "instance closed\n"); return 0; } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strncpy(cap->driver, G2D_NAME, sizeof(cap->driver) - 1); strncpy(cap->card, G2D_NAME, sizeof(cap->card) - 1); cap->bus_info[0] = 0; cap->version = KERNEL_VERSION(1, 0, 0); /* * This is only a mem-to-mem video device. The capture and output * device capability flags are left only for backward compatibility * and are scheduled for removal. */ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; return 0; } static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f) { struct g2d_fmt *fmt; if (f->index >= NUM_FORMATS) return -EINVAL; fmt = &formats[f->index]; f->pixelformat = fmt->fourcc; strncpy(f->description, fmt->name, sizeof(f->description) - 1); return 0; } static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f) { struct g2d_ctx *ctx = prv; struct vb2_queue *vq; struct g2d_frame *frm; vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); if (!vq) return -EINVAL; frm = get_frame(ctx, f->type); if (IS_ERR(frm)) return PTR_ERR(frm); f->fmt.pix.width = frm->width; f->fmt.pix.height = frm->height; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.pixelformat = frm->fmt->fourcc; f->fmt.pix.bytesperline = (frm->width * frm->fmt->depth) >> 3; f->fmt.pix.sizeimage = frm->size; return 0; } static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f) { struct g2d_fmt *fmt; enum v4l2_field *field; fmt = find_fmt(f); if (!fmt) return -EINVAL; field = &f->fmt.pix.field; if (*field == V4L2_FIELD_ANY) *field = V4L2_FIELD_NONE; else if (*field != V4L2_FIELD_NONE) return -EINVAL; if (f->fmt.pix.width > MAX_WIDTH) f->fmt.pix.width = MAX_WIDTH; if (f->fmt.pix.height > MAX_HEIGHT) f->fmt.pix.height = MAX_HEIGHT; if (f->fmt.pix.width < 1) f->fmt.pix.width = 1; if (f->fmt.pix.height < 1) f->fmt.pix.height = 1; f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f) { struct g2d_ctx *ctx = prv; struct g2d_dev *dev = ctx->dev; struct vb2_queue *vq; struct g2d_frame *frm; struct g2d_fmt *fmt; int ret = 0; /* Adjust all values accordingly to the hardware capabilities * and chosen format. */ ret = vidioc_try_fmt(file, prv, f); if (ret) return ret; vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); if (vb2_is_busy(vq)) { v4l2_err(&dev->v4l2_dev, "queue (%d) bust\n", f->type); return -EBUSY; } frm = get_frame(ctx, f->type); if (IS_ERR(frm)) return PTR_ERR(frm); fmt = find_fmt(f); if (!fmt) return -EINVAL; frm->width = f->fmt.pix.width; frm->height = f->fmt.pix.height; frm->size = f->fmt.pix.sizeimage; /* Reset crop settings */ frm->o_width = 0; frm->o_height = 0; frm->c_width = frm->width; frm->c_height = frm->height; frm->right = frm->width; frm->bottom = frm->height; frm->fmt = fmt; frm->stride = f->fmt.pix.bytesperline; return 0; } static unsigned int g2d_poll(struct file *file, struct poll_table_struct *wait) { struct g2d_ctx *ctx = fh2ctx(file->private_data); struct g2d_dev *dev = ctx->dev; unsigned int res; mutex_lock(&dev->mutex); res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait); mutex_unlock(&dev->mutex); return res; } static int g2d_mmap(struct file *file, struct vm_area_struct *vma) { struct g2d_ctx *ctx = fh2ctx(file->private_data); struct g2d_dev *dev = ctx->dev; int ret; if (mutex_lock_interruptible(&dev->mutex)) return -ERESTARTSYS; ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); mutex_unlock(&dev->mutex); return ret; } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct g2d_ctx *ctx = priv; return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct g2d_ctx *ctx = priv; return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct g2d_ctx *ctx = priv; return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct g2d_ctx *ctx = priv; return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct g2d_ctx *ctx = priv; return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct g2d_ctx *ctx = priv; return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); } static int vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cr) { struct g2d_ctx *ctx = priv; struct g2d_frame *f; f = get_frame(ctx, cr->type); if (IS_ERR(f)) return PTR_ERR(f); cr->bounds.left = 0; cr->bounds.top = 0; cr->bounds.width = f->width; cr->bounds.height = f->height; cr->defrect = cr->bounds; return 0; } static int vidioc_g_crop(struct file *file, void *prv, struct v4l2_crop *cr) { struct g2d_ctx *ctx = prv; struct g2d_frame *f; f = get_frame(ctx, cr->type); if (IS_ERR(f)) return PTR_ERR(f); cr->c.left = f->o_height; cr->c.top = f->o_width; cr->c.width = f->c_width; cr->c.height = f->c_height; return 0; } static int vidioc_try_crop(struct file *file, void *prv, const struct v4l2_crop *cr) { struct g2d_ctx *ctx = prv; struct g2d_dev *dev = ctx->dev; struct g2d_frame *f; f = get_frame(ctx, cr->type); if (IS_ERR(f)) return PTR_ERR(f); if (cr->c.top < 0 || cr->c.left < 0) { v4l2_err(&dev->v4l2_dev, "doesn't support negative values for top & left\n"); return -EINVAL; } return 0; } static int vidioc_s_crop(struct file *file, void *prv, const struct v4l2_crop *cr) { struct g2d_ctx *ctx = prv; struct g2d_frame *f; int ret; ret = vidioc_try_crop(file, prv, cr); if (ret) return ret; f = get_frame(ctx, cr->type); if (IS_ERR(f)) return PTR_ERR(f); f->c_width = cr->c.width; f->c_height = cr->c.height; f->o_width = cr->c.left; f->o_height = cr->c.top; f->bottom = f->o_height + f->c_height; f->right = f->o_width + f->c_width; return 0; } static void g2d_lock(void *prv) { struct g2d_ctx *ctx = prv; struct g2d_dev *dev = ctx->dev; mutex_lock(&dev->mutex); } static void g2d_unlock(void *prv) { struct g2d_ctx *ctx = prv; struct g2d_dev *dev = ctx->dev; mutex_unlock(&dev->mutex); } static void job_abort(void *prv) { struct g2d_ctx *ctx = prv; struct g2d_dev *dev = ctx->dev; int ret; if (dev->curr == NULL) /* No job currently running */ return; ret = wait_event_timeout(dev->irq_queue, dev->curr == NULL, msecs_to_jiffies(G2D_TIMEOUT)); } static void device_run(void *prv) { struct g2d_ctx *ctx = prv; struct g2d_dev *dev = ctx->dev; struct vb2_buffer *src, *dst; unsigned long flags; u32 cmd = 0; dev->curr = ctx; src = v4l2_m2m_next_src_buf(ctx->m2m_ctx); dst = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); clk_enable(dev->gate); g2d_reset(dev); spin_lock_irqsave(&dev->ctrl_lock, flags); g2d_set_src_size(dev, &ctx->in); g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0)); g2d_set_dst_size(dev, &ctx->out); g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0)); g2d_set_rop4(dev, ctx->rop); g2d_set_flip(dev, ctx->flip); if (ctx->in.c_width != ctx->out.c_width || ctx->in.c_height != ctx->out.c_height) { if (dev->variant->hw_rev == TYPE_G2D_3X) cmd |= CMD_V3_ENABLE_STRETCH; else g2d_set_v41_stretch(dev, &ctx->in, &ctx->out); } g2d_set_cmd(dev, cmd); g2d_start(dev); spin_unlock_irqrestore(&dev->ctrl_lock, flags); } static irqreturn_t g2d_isr(int irq, void *prv) { struct g2d_dev *dev = prv; struct g2d_ctx *ctx = dev->curr; struct vb2_buffer *src, *dst; g2d_clear_int(dev); clk_disable(dev->gate); BUG_ON(ctx == NULL); src = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); BUG_ON(src == NULL); BUG_ON(dst == NULL); dst->v4l2_buf.timecode = src->v4l2_buf.timecode; dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp; v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE); v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx); dev->curr = NULL; wake_up(&dev->irq_queue); return IRQ_HANDLED; } static const struct v4l2_file_operations g2d_fops = { .owner = THIS_MODULE, .open = g2d_open, .release = g2d_release, .poll = g2d_poll, .unlocked_ioctl = video_ioctl2, .mmap = g2d_mmap, }; static const struct v4l2_ioctl_ops g2d_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt, .vidioc_g_fmt_vid_cap = vidioc_g_fmt, .vidioc_try_fmt_vid_cap = vidioc_try_fmt, .vidioc_s_fmt_vid_cap = vidioc_s_fmt, .vidioc_enum_fmt_vid_out = vidioc_enum_fmt, .vidioc_g_fmt_vid_out = vidioc_g_fmt, .vidioc_try_fmt_vid_out = vidioc_try_fmt, .vidioc_s_fmt_vid_out = vidioc_s_fmt, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_crop = vidioc_g_crop, .vidioc_s_crop = vidioc_s_crop, .vidioc_cropcap = vidioc_cropcap, }; static struct video_device g2d_videodev = { .name = G2D_NAME, .fops = &g2d_fops, .ioctl_ops = &g2d_ioctl_ops, .minor = -1, .release = video_device_release, .vfl_dir = VFL_DIR_M2M, }; static struct v4l2_m2m_ops g2d_m2m_ops = { .device_run = device_run, .job_abort = job_abort, .lock = g2d_lock, .unlock = g2d_unlock, }; static const struct of_device_id exynos_g2d_match[]; static int g2d_probe(struct platform_device *pdev) { struct g2d_dev *dev; struct video_device *vfd; struct resource *res; const struct of_device_id *of_id; int ret = 0; dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->ctrl_lock); mutex_init(&dev->mutex); atomic_set(&dev->num_inst, 0); init_waitqueue_head(&dev->irq_queue); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dev->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dev->regs)) return PTR_ERR(dev->regs); dev->clk = clk_get(&pdev->dev, "sclk_fimg2d"); if (IS_ERR(dev->clk)) { dev_err(&pdev->dev, "failed to get g2d clock\n"); return -ENXIO; } ret = clk_prepare(dev->clk); if (ret) { dev_err(&pdev->dev, "failed to prepare g2d clock\n"); goto put_clk; } dev->gate = clk_get(&pdev->dev, "fimg2d"); if (IS_ERR(dev->gate)) { dev_err(&pdev->dev, "failed to get g2d clock gate\n"); ret = -ENXIO; goto unprep_clk; } ret = clk_prepare(dev->gate); if (ret) { dev_err(&pdev->dev, "failed to prepare g2d clock gate\n"); goto put_clk_gate; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "failed to find IRQ\n"); ret = -ENXIO; goto unprep_clk_gate; } dev->irq = res->start; ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr, 0, pdev->name, dev); if (ret) { dev_err(&pdev->dev, "failed to install IRQ\n"); goto put_clk_gate; } dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(dev->alloc_ctx)) { ret = PTR_ERR(dev->alloc_ctx); goto unprep_clk_gate; } ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) goto alloc_ctx_cleanup; vfd = video_device_alloc(); if (!vfd) { v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto unreg_v4l2_dev; } *vfd = g2d_videodev; vfd->lock = &dev->mutex; vfd->v4l2_dev = &dev->v4l2_dev; ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto rel_vdev; } video_set_drvdata(vfd, dev); snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name); dev->vfd = vfd; v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n", vfd->num); platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops); if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(dev->m2m_dev); goto unreg_video_dev; } def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3; if (!pdev->dev.of_node) { dev->variant = g2d_get_drv_data(pdev); } else { of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node); if (!of_id) { ret = -ENODEV; goto unreg_video_dev; } dev->variant = (struct g2d_variant *)of_id->data; } return 0; unreg_video_dev: video_unregister_device(dev->vfd); rel_vdev: video_device_release(vfd); unreg_v4l2_dev: v4l2_device_unregister(&dev->v4l2_dev); alloc_ctx_cleanup: vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); unprep_clk_gate: clk_unprepare(dev->gate); put_clk_gate: clk_put(dev->gate); unprep_clk: clk_unprepare(dev->clk); put_clk: clk_put(dev->clk); return ret; } static int g2d_remove(struct platform_device *pdev) { struct g2d_dev *dev = (struct g2d_dev *)platform_get_drvdata(pdev); v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME); v4l2_m2m_release(dev->m2m_dev); video_unregister_device(dev->vfd); v4l2_device_unregister(&dev->v4l2_dev); vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); clk_unprepare(dev->gate); clk_put(dev->gate); clk_unprepare(dev->clk); clk_put(dev->clk); return 0; } static struct g2d_variant g2d_drvdata_v3x = { .hw_rev = TYPE_G2D_3X, /* Revision 3.0 for S5PV210 and Exynos4210 */ }; static struct g2d_variant g2d_drvdata_v4x = { .hw_rev = TYPE_G2D_4X, /* Revision 4.1 for Exynos4X12 and Exynos5 */ }; static const struct of_device_id exynos_g2d_match[] = { { .compatible = "samsung,s5pv210-g2d", .data = &g2d_drvdata_v3x, }, { .compatible = "samsung,exynos4212-g2d", .data = &g2d_drvdata_v4x, }, {}, }; MODULE_DEVICE_TABLE(of, exynos_g2d_match); static struct platform_device_id g2d_driver_ids[] = { { .name = "s5p-g2d", .driver_data = (unsigned long)&g2d_drvdata_v3x, }, { .name = "s5p-g2d-v4x", .driver_data = (unsigned long)&g2d_drvdata_v4x, }, {}, }; MODULE_DEVICE_TABLE(platform, g2d_driver_ids); static struct platform_driver g2d_pdrv = { .probe = g2d_probe, .remove = g2d_remove, .id_table = g2d_driver_ids, .driver = { .name = G2D_NAME, .owner = THIS_MODULE, .of_match_table = exynos_g2d_match, }, }; module_platform_driver(g2d_pdrv); MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>"); MODULE_DESCRIPTION("S5P G2D 2d graphics accelerator driver"); MODULE_LICENSE("GPL");
gpl-2.0
Jackeagle/slte_kernel
arch/arm/mach-omap1/timer32k.c
2264
5790
/* * linux/arch/arm/mach-omap1/timer32k.c * * OMAP 32K Timer * * Copyright (C) 2004 - 2005 Nokia Corporation * Partial timer rewrite and additional dynamic tick timer support by * Tony Lindgen <tony@atomide.com> and * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> * OMAP Dual-mode timer framework support by Timo Teras * * MPU timer code based on the older MPU timer code for OMAP * Copyright (C) 2000 RidgeRun, Inc. * Author: Greg Lonnon <glonnon@ridgerun.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include <plat/counter-32k.h> #include <mach/hardware.h> #include "common.h" /* * --------------------------------------------------------------------------- * 32KHz OS timer * * This currently works only on 16xx, as 1510 does not have the continuous * 32KHz synchronous timer. The 32KHz synchronous timer is used to keep track * of time in addition to the 32KHz OS timer. Using only the 32KHz OS timer * on 1510 would be possible, but the timer would not be as accurate as * with the 32KHz synchronized timer. * --------------------------------------------------------------------------- */ /* 16xx specific defines */ #define OMAP1_32K_TIMER_BASE 0xfffb9000 #define OMAP1_32KSYNC_TIMER_BASE 0xfffbc400 #define OMAP1_32K_TIMER_CR 0x08 #define OMAP1_32K_TIMER_TVR 0x00 #define OMAP1_32K_TIMER_TCR 0x04 #define OMAP_32K_TICKS_PER_SEC (32768) /* * TRM says 1 / HZ = ( TVR + 1) / 32768, so TRV = (32768 / HZ) - 1 * so with HZ = 128, TVR = 255. */ #define OMAP_32K_TIMER_TICK_PERIOD ((OMAP_32K_TICKS_PER_SEC / HZ) - 1) #define JIFFIES_TO_HW_TICKS(nr_jiffies, clock_rate) \ (((nr_jiffies) * (clock_rate)) / HZ) static inline void omap_32k_timer_write(int val, int reg) { omap_writew(val, OMAP1_32K_TIMER_BASE + reg); } static inline unsigned long omap_32k_timer_read(int reg) { return omap_readl(OMAP1_32K_TIMER_BASE + reg) & 0xffffff; } static inline void omap_32k_timer_start(unsigned long load_val) { if (!load_val) load_val = 1; omap_32k_timer_write(load_val, OMAP1_32K_TIMER_TVR); omap_32k_timer_write(0x0f, OMAP1_32K_TIMER_CR); } static inline void omap_32k_timer_stop(void) { omap_32k_timer_write(0x0, OMAP1_32K_TIMER_CR); } #define omap_32k_timer_ack_irq() static int omap_32k_timer_set_next_event(unsigned long delta, struct clock_event_device *dev) { omap_32k_timer_start(delta); return 0; } static void omap_32k_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { omap_32k_timer_stop(); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD); break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: break; case CLOCK_EVT_MODE_RESUME: break; } } static struct clock_event_device clockevent_32k_timer = { .name = "32k-timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_next_event = omap_32k_timer_set_next_event, .set_mode = omap_32k_timer_set_mode, }; static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clockevent_32k_timer; omap_32k_timer_ack_irq(); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction omap_32k_timer_irq = { .name = "32KHz timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = omap_32k_timer_interrupt, }; static __init void omap_init_32k_timer(void) { setup_irq(INT_OS_TIMER, &omap_32k_timer_irq); clockevent_32k_timer.cpumask = cpumask_of(0); clockevents_config_and_register(&clockevent_32k_timer, OMAP_32K_TICKS_PER_SEC, 1, 0xfffffffe); } /* * --------------------------------------------------------------------------- * Timer initialization * --------------------------------------------------------------------------- */ int __init omap_32k_timer_init(void) { int ret = -ENODEV; if (cpu_is_omap16xx()) { void __iomem *base; struct clk *sync32k_ick; base = ioremap(OMAP1_32KSYNC_TIMER_BASE, SZ_1K); if (!base) { pr_err("32k_counter: failed to map base addr\n"); return -ENODEV; } sync32k_ick = clk_get(NULL, "omap_32ksync_ick"); if (!IS_ERR(sync32k_ick)) clk_enable(sync32k_ick); ret = omap_init_clocksource_32k(base); } if (!ret) omap_init_32k_timer(); return ret; }
gpl-2.0
AndroidGX/SimpleGX-KK-4.4.4_G901F
arch/arm/mach-pxa/magician.c
2264
18263
/* * Support for HTC Magician PDA phones: * i-mate JAM, O2 Xda mini, Orange SPV M500, Qtek s100, Qtek s110 * and T-Mobile MDA Compact. * * Copyright (c) 2006-2007 Philipp Zabel * * Based on hx4700.c, spitz.c and others. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/mfd/htc-egpio.h> #include <linux/mfd/htc-pasic3.h> #include <linux/mtd/physmap.h> #include <linux/pda_power.h> #include <linux/pwm_backlight.h> #include <linux/regulator/driver.h> #include <linux/regulator/gpio-regulator.h> #include <linux/regulator/machine.h> #include <linux/usb/gpio_vbus.h> #include <linux/i2c/pxa-i2c.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/system_info.h> #include <mach/pxa27x.h> #include <mach/magician.h> #include <linux/platform_data/video-pxafb.h> #include <linux/platform_data/mmc-pxamci.h> #include <linux/platform_data/irda-pxaficp.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include "devices.h" #include "generic.h" static unsigned long magician_pin_config[] __initdata = { /* SDRAM and Static Memory I/O Signals */ GPIO20_nSDCS_2, GPIO21_nSDCS_3, GPIO15_nCS_1, GPIO78_nCS_2, /* PASIC3 */ GPIO79_nCS_3, /* EGPIO CPLD */ GPIO80_nCS_4, GPIO33_nCS_5, /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, /* PWM 0 */ GPIO16_PWM0_OUT, /* I2S */ GPIO28_I2S_BITCLK_OUT, GPIO29_I2S_SDATA_IN, GPIO31_I2S_SYNC, GPIO113_I2S_SYSCLK, /* SSP 1 */ GPIO23_SSP1_SCLK, GPIO24_SSP1_SFRM, GPIO25_SSP1_TXD, /* SSP 2 */ GPIO19_SSP2_SCLK, GPIO14_SSP2_SFRM, GPIO89_SSP2_TXD, GPIO88_SSP2_RXD, /* MMC */ GPIO32_MMC_CLK, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, GPIO112_MMC_CMD, /* LCD */ GPIOxx_LCD_TFT_16BPP, /* QCI */ GPIO12_CIF_DD_7, GPIO17_CIF_DD_6, GPIO50_CIF_DD_3, GPIO51_CIF_DD_2, GPIO52_CIF_DD_4, GPIO53_CIF_MCLK, GPIO54_CIF_PCLK, GPIO55_CIF_DD_1, GPIO81_CIF_DD_0, GPIO82_CIF_DD_5, GPIO84_CIF_FV, GPIO85_CIF_LV, /* Magician specific input GPIOs */ GPIO9_GPIO, /* unknown */ GPIO10_GPIO, /* GSM_IRQ */ GPIO13_GPIO, /* CPLD_IRQ */ GPIO107_GPIO, /* DS1WM_IRQ */ GPIO108_GPIO, /* GSM_READY */ GPIO115_GPIO, /* nPEN_IRQ */ /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, }; /* * IRDA */ static struct pxaficp_platform_data magician_ficp_info = { .gpio_pwdown = GPIO83_MAGICIAN_nIR_EN, .transceiver_cap = IR_SIRMODE | IR_OFF, }; /* * GPIO Keys */ #define INIT_KEY(_code, _gpio, _desc) \ { \ .code = KEY_##_code, \ .gpio = _gpio, \ .desc = _desc, \ .type = EV_KEY, \ .wakeup = 1, \ } static struct gpio_keys_button magician_button_table[] = { INIT_KEY(POWER, GPIO0_MAGICIAN_KEY_POWER, "Power button"), INIT_KEY(ESC, GPIO37_MAGICIAN_KEY_HANGUP, "Hangup button"), INIT_KEY(F10, GPIO38_MAGICIAN_KEY_CONTACTS, "Contacts button"), INIT_KEY(CALENDAR, GPIO90_MAGICIAN_KEY_CALENDAR, "Calendar button"), INIT_KEY(CAMERA, GPIO91_MAGICIAN_KEY_CAMERA, "Camera button"), INIT_KEY(UP, GPIO93_MAGICIAN_KEY_UP, "Up button"), INIT_KEY(DOWN, GPIO94_MAGICIAN_KEY_DOWN, "Down button"), INIT_KEY(LEFT, GPIO95_MAGICIAN_KEY_LEFT, "Left button"), INIT_KEY(RIGHT, GPIO96_MAGICIAN_KEY_RIGHT, "Right button"), INIT_KEY(KPENTER, GPIO97_MAGICIAN_KEY_ENTER, "Action button"), INIT_KEY(RECORD, GPIO98_MAGICIAN_KEY_RECORD, "Record button"), INIT_KEY(VOLUMEUP, GPIO100_MAGICIAN_KEY_VOL_UP, "Volume up"), INIT_KEY(VOLUMEDOWN, GPIO101_MAGICIAN_KEY_VOL_DOWN, "Volume down"), INIT_KEY(PHONE, GPIO102_MAGICIAN_KEY_PHONE, "Phone button"), INIT_KEY(PLAY, GPIO99_MAGICIAN_HEADPHONE_IN, "Headset button"), }; static struct gpio_keys_platform_data gpio_keys_data = { .buttons = magician_button_table, .nbuttons = ARRAY_SIZE(magician_button_table), }; static struct platform_device gpio_keys = { .name = "gpio-keys", .dev = { .platform_data = &gpio_keys_data, }, .id = -1, }; /* * EGPIO (Xilinx CPLD) * * 7 32-bit aligned 8-bit registers: 3x output, 1x irq, 3x input */ static struct resource egpio_resources[] = { [0] = { .start = PXA_CS3_PHYS, .end = PXA_CS3_PHYS + 0x20 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_GPIO_TO_IRQ(GPIO13_MAGICIAN_CPLD_IRQ), .end = PXA_GPIO_TO_IRQ(GPIO13_MAGICIAN_CPLD_IRQ), .flags = IORESOURCE_IRQ, }, }; static struct htc_egpio_chip egpio_chips[] = { [0] = { .reg_start = 0, .gpio_base = MAGICIAN_EGPIO(0, 0), .num_gpios = 24, .direction = HTC_EGPIO_OUTPUT, .initial_values = 0x40, /* EGPIO_MAGICIAN_GSM_RESET */ }, [1] = { .reg_start = 4, .gpio_base = MAGICIAN_EGPIO(4, 0), .num_gpios = 24, .direction = HTC_EGPIO_INPUT, }, }; static struct htc_egpio_platform_data egpio_info = { .reg_width = 8, .bus_width = 32, .irq_base = IRQ_BOARD_START, .num_irqs = 4, .ack_register = 3, .chip = egpio_chips, .num_chips = ARRAY_SIZE(egpio_chips), }; static struct platform_device egpio = { .name = "htc-egpio", .id = -1, .resource = egpio_resources, .num_resources = ARRAY_SIZE(egpio_resources), .dev = { .platform_data = &egpio_info, }, }; /* * LCD - Toppoly TD028STEB1 or Samsung LTP280QV */ static struct pxafb_mode_info toppoly_modes[] = { { .pixclock = 96153, .bpp = 16, .xres = 240, .yres = 320, .hsync_len = 11, .vsync_len = 3, .left_margin = 19, .upper_margin = 2, .right_margin = 10, .lower_margin = 2, .sync = 0, }, }; static struct pxafb_mode_info samsung_modes[] = { { .pixclock = 96153, .bpp = 16, .xres = 240, .yres = 320, .hsync_len = 8, .vsync_len = 4, .left_margin = 9, .upper_margin = 4, .right_margin = 9, .lower_margin = 4, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; static void toppoly_lcd_power(int on, struct fb_var_screeninfo *si) { pr_debug("Toppoly LCD power\n"); if (on) { pr_debug("on\n"); gpio_set_value(EGPIO_MAGICIAN_TOPPOLY_POWER, 1); gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 1); udelay(2000); gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 1); udelay(2000); /* FIXME: enable LCDC here */ udelay(2000); gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 1); udelay(2000); gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 1); } else { pr_debug("off\n"); msleep(15); gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 0); udelay(500); gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 0); udelay(1000); gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 0); gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 0); } } static void samsung_lcd_power(int on, struct fb_var_screeninfo *si) { pr_debug("Samsung LCD power\n"); if (on) { pr_debug("on\n"); if (system_rev < 3) gpio_set_value(GPIO75_MAGICIAN_SAMSUNG_POWER, 1); else gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 1); mdelay(10); gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 1); mdelay(10); gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 1); mdelay(30); gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 1); mdelay(10); } else { pr_debug("off\n"); mdelay(10); gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 0); mdelay(30); gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 0); mdelay(10); gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 0); mdelay(10); if (system_rev < 3) gpio_set_value(GPIO75_MAGICIAN_SAMSUNG_POWER, 0); else gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 0); } } static struct pxafb_mach_info toppoly_info = { .modes = toppoly_modes, .num_modes = 1, .fixed_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP, .pxafb_lcd_power = toppoly_lcd_power, }; static struct pxafb_mach_info samsung_info = { .modes = samsung_modes, .num_modes = 1, .fixed_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |\ LCD_ALTERNATE_MAPPING, .pxafb_lcd_power = samsung_lcd_power, }; /* * Backlight */ static struct gpio magician_bl_gpios[] = { { EGPIO_MAGICIAN_BL_POWER, GPIOF_DIR_OUT, "Backlight power" }, { EGPIO_MAGICIAN_BL_POWER2, GPIOF_DIR_OUT, "Backlight power 2" }, }; static int magician_backlight_init(struct device *dev) { return gpio_request_array(ARRAY_AND_SIZE(magician_bl_gpios)); } static int magician_backlight_notify(struct device *dev, int brightness) { gpio_set_value(EGPIO_MAGICIAN_BL_POWER, brightness); if (brightness >= 200) { gpio_set_value(EGPIO_MAGICIAN_BL_POWER2, 1); return brightness - 72; } else { gpio_set_value(EGPIO_MAGICIAN_BL_POWER2, 0); return brightness; } } static void magician_backlight_exit(struct device *dev) { gpio_free_array(ARRAY_AND_SIZE(magician_bl_gpios)); } static struct platform_pwm_backlight_data backlight_data = { .pwm_id = 0, .max_brightness = 272, .dft_brightness = 100, .pwm_period_ns = 30923, .init = magician_backlight_init, .notify = magician_backlight_notify, .exit = magician_backlight_exit, }; static struct platform_device backlight = { .name = "pwm-backlight", .id = -1, .dev = { .parent = &pxa27x_device_pwm0.dev, .platform_data = &backlight_data, }, }; /* * LEDs */ static struct gpio_led gpio_leds[] = { { .name = "magician::vibra", .default_trigger = "none", .gpio = GPIO22_MAGICIAN_VIBRA_EN, }, { .name = "magician::phone_bl", .default_trigger = "backlight", .gpio = GPIO103_MAGICIAN_LED_KP, }, }; static struct gpio_led_platform_data gpio_led_info = { .leds = gpio_leds, .num_leds = ARRAY_SIZE(gpio_leds), }; static struct platform_device leds_gpio = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &gpio_led_info, }, }; static struct pasic3_led pasic3_leds[] = { { .led = { .name = "magician:red", .default_trigger = "ds2760-battery.0-charging", }, .hw_num = 0, .bit2 = PASIC3_BIT2_LED0, .mask = PASIC3_MASK_LED0, }, { .led = { .name = "magician:green", .default_trigger = "ds2760-battery.0-charging-or-full", }, .hw_num = 1, .bit2 = PASIC3_BIT2_LED1, .mask = PASIC3_MASK_LED1, }, { .led = { .name = "magician:blue", .default_trigger = "bluetooth", }, .hw_num = 2, .bit2 = PASIC3_BIT2_LED2, .mask = PASIC3_MASK_LED2, }, }; static struct pasic3_leds_machinfo pasic3_leds_info = { .num_leds = ARRAY_SIZE(pasic3_leds), .power_gpio = EGPIO_MAGICIAN_LED_POWER, .leds = pasic3_leds, }; /* * PASIC3 with DS1WM */ static struct resource pasic3_resources[] = { [0] = { .start = PXA_CS2_PHYS, .end = PXA_CS2_PHYS + 0x1b, .flags = IORESOURCE_MEM, }, /* No IRQ handler in the PASIC3, DS1WM needs an external IRQ */ [1] = { .start = PXA_GPIO_TO_IRQ(GPIO107_MAGICIAN_DS1WM_IRQ), .end = PXA_GPIO_TO_IRQ(GPIO107_MAGICIAN_DS1WM_IRQ), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct pasic3_platform_data pasic3_platform_data = { .led_pdata = &pasic3_leds_info, .clock_rate = 4000000, }; static struct platform_device pasic3 = { .name = "pasic3", .id = -1, .num_resources = ARRAY_SIZE(pasic3_resources), .resource = pasic3_resources, .dev = { .platform_data = &pasic3_platform_data, }, }; /* * USB "Transceiver" */ static struct resource gpio_vbus_resource = { .flags = IORESOURCE_IRQ, .start = IRQ_MAGICIAN_VBUS, .end = IRQ_MAGICIAN_VBUS, }; static struct gpio_vbus_mach_info gpio_vbus_info = { .gpio_pullup = GPIO27_MAGICIAN_USBC_PUEN, .gpio_vbus = EGPIO_MAGICIAN_CABLE_STATE_USB, }; static struct platform_device gpio_vbus = { .name = "gpio-vbus", .id = -1, .num_resources = 1, .resource = &gpio_vbus_resource, .dev = { .platform_data = &gpio_vbus_info, }, }; /* * External power */ static int power_supply_init(struct device *dev) { return gpio_request(EGPIO_MAGICIAN_CABLE_STATE_AC, "CABLE_STATE_AC"); } static int magician_is_ac_online(void) { return gpio_get_value(EGPIO_MAGICIAN_CABLE_STATE_AC); } static void power_supply_exit(struct device *dev) { gpio_free(EGPIO_MAGICIAN_CABLE_STATE_AC); } static char *magician_supplicants[] = { "ds2760-battery.0", "backup-battery" }; static struct pda_power_pdata power_supply_info = { .init = power_supply_init, .is_ac_online = magician_is_ac_online, .exit = power_supply_exit, .supplied_to = magician_supplicants, .num_supplicants = ARRAY_SIZE(magician_supplicants), }; static struct resource power_supply_resources[] = { [0] = { .name = "ac", .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, .start = IRQ_MAGICIAN_VBUS, .end = IRQ_MAGICIAN_VBUS, }, [1] = { .name = "usb", .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, .start = IRQ_MAGICIAN_VBUS, .end = IRQ_MAGICIAN_VBUS, }, }; static struct platform_device power_supply = { .name = "pda-power", .id = -1, .dev = { .platform_data = &power_supply_info, }, .resource = power_supply_resources, .num_resources = ARRAY_SIZE(power_supply_resources), }; /* * Battery charger */ static struct regulator_consumer_supply bq24022_consumers[] = { REGULATOR_SUPPLY("vbus_draw", NULL), REGULATOR_SUPPLY("ac_draw", NULL), }; static struct regulator_init_data bq24022_init_data = { .constraints = { .max_uA = 500000, .valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(bq24022_consumers), .consumer_supplies = bq24022_consumers, }; static struct gpio bq24022_gpios[] = { { EGPIO_MAGICIAN_BQ24022_ISET2, GPIOF_OUT_INIT_LOW, "bq24022_iset2" }, }; static struct gpio_regulator_state bq24022_states[] = { { .value = 100000, .gpios = (0 << 0) }, { .value = 500000, .gpios = (1 << 0) }, }; static struct gpio_regulator_config bq24022_info = { .supply_name = "bq24022", .enable_gpio = GPIO30_MAGICIAN_BQ24022_nCHARGE_EN, .enable_high = 0, .enabled_at_boot = 0, .gpios = bq24022_gpios, .nr_gpios = ARRAY_SIZE(bq24022_gpios), .states = bq24022_states, .nr_states = ARRAY_SIZE(bq24022_states), .type = REGULATOR_CURRENT, .init_data = &bq24022_init_data, }; static struct platform_device bq24022 = { .name = "gpio-regulator", .id = -1, .dev = { .platform_data = &bq24022_info, }, }; /* * MMC/SD */ static int magician_mci_init(struct device *dev, irq_handler_t detect_irq, void *data) { return request_irq(IRQ_MAGICIAN_SD, detect_irq, IRQF_DISABLED, "mmc card detect", data); } static void magician_mci_exit(struct device *dev, void *data) { free_irq(IRQ_MAGICIAN_SD, data); } static struct pxamci_platform_data magician_mci_info = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .init = magician_mci_init, .exit = magician_mci_exit, .gpio_card_detect = -1, .gpio_card_ro = EGPIO_MAGICIAN_nSD_READONLY, .gpio_card_ro_invert = 1, .gpio_power = EGPIO_MAGICIAN_SD_POWER, }; /* * USB OHCI */ static struct pxaohci_platform_data magician_ohci_info = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT1 | ENABLE_PORT3 | POWER_CONTROL_LOW, .power_budget = 0, }; /* * StrataFlash */ static void magician_set_vpp(struct platform_device *pdev, int vpp) { gpio_set_value(EGPIO_MAGICIAN_FLASH_VPP, vpp); } static struct resource strataflash_resource = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_64M - 1, .flags = IORESOURCE_MEM, }; static struct physmap_flash_data strataflash_data = { .width = 4, .set_vpp = magician_set_vpp, }; static struct platform_device strataflash = { .name = "physmap-flash", .id = -1, .resource = &strataflash_resource, .num_resources = 1, .dev = { .platform_data = &strataflash_data, }, }; /* * I2C */ static struct i2c_pxa_platform_data i2c_info = { .fast_mode = 1, }; /* * Platform devices */ static struct platform_device *devices[] __initdata = { &gpio_keys, &egpio, &backlight, &pasic3, &bq24022, &gpio_vbus, &power_supply, &strataflash, &leds_gpio, }; static struct gpio magician_global_gpios[] = { { GPIO13_MAGICIAN_CPLD_IRQ, GPIOF_IN, "CPLD_IRQ" }, { GPIO107_MAGICIAN_DS1WM_IRQ, GPIOF_IN, "DS1WM_IRQ" }, { GPIO104_MAGICIAN_LCD_POWER_1, GPIOF_OUT_INIT_LOW, "LCD power 1" }, { GPIO105_MAGICIAN_LCD_POWER_2, GPIOF_OUT_INIT_LOW, "LCD power 2" }, { GPIO106_MAGICIAN_LCD_POWER_3, GPIOF_OUT_INIT_LOW, "LCD power 3" }, { GPIO83_MAGICIAN_nIR_EN, GPIOF_OUT_INIT_HIGH, "nIR_EN" }, }; static void __init magician_init(void) { void __iomem *cpld; int lcd_select; int err; pxa2xx_mfp_config(ARRAY_AND_SIZE(magician_pin_config)); err = gpio_request_array(ARRAY_AND_SIZE(magician_global_gpios)); if (err) pr_err("magician: Failed to request GPIOs: %d\n", err); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); platform_add_devices(ARRAY_AND_SIZE(devices)); pxa_set_ficp_info(&magician_ficp_info); pxa27x_set_i2c_power_info(NULL); pxa_set_i2c_info(&i2c_info); pxa_set_mci_info(&magician_mci_info); pxa_set_ohci_info(&magician_ohci_info); /* Check LCD type we have */ cpld = ioremap_nocache(PXA_CS3_PHYS, 0x1000); if (cpld) { u8 board_id = __raw_readb(cpld+0x14); iounmap(cpld); system_rev = board_id & 0x7; lcd_select = board_id & 0x8; pr_info("LCD type: %s\n", lcd_select ? "Samsung" : "Toppoly"); if (lcd_select && (system_rev < 3)) gpio_request_one(GPIO75_MAGICIAN_SAMSUNG_POWER, GPIOF_OUT_INIT_LOW, "SAMSUNG_POWER"); pxa_set_fb_info(NULL, lcd_select ? &samsung_info : &toppoly_info); } else pr_err("LCD detection: CPLD mapping failed\n"); } MACHINE_START(MAGICIAN, "HTC Magician") .atag_offset = 0x100, .map_io = pxa27x_map_io, .nr_irqs = MAGICIAN_NR_IRQS, .init_irq = pxa27x_init_irq, .handle_irq = pxa27x_handle_irq, .init_machine = magician_init, .init_time = pxa_timer_init, .restart = pxa_restart, MACHINE_END
gpl-2.0
kannu1994/maguro_kernel
drivers/platform/x86/topstar-laptop.c
2776
5057
/* * ACPI driver for Topstar notebooks (hotkeys support only) * * Copyright (c) 2009 Herton Ronaldo Krzesinski <herton@mandriva.com.br> * * Implementation inspired by existing x86 platform drivers, in special * asus/eepc/fujitsu-laptop, thanks to their authors * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #define ACPI_TOPSTAR_CLASS "topstar" struct topstar_hkey { struct input_dev *inputdev; }; static const struct key_entry topstar_keymap[] = { { KE_KEY, 0x80, { KEY_BRIGHTNESSUP } }, { KE_KEY, 0x81, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, 0x83, { KEY_VOLUMEUP } }, { KE_KEY, 0x84, { KEY_VOLUMEDOWN } }, { KE_KEY, 0x85, { KEY_MUTE } }, { KE_KEY, 0x86, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0x87, { KEY_F13 } }, /* touchpad enable/disable key */ { KE_KEY, 0x88, { KEY_WLAN } }, { KE_KEY, 0x8a, { KEY_WWW } }, { KE_KEY, 0x8b, { KEY_MAIL } }, { KE_KEY, 0x8c, { KEY_MEDIA } }, /* Known non hotkey events don't handled or that we don't care yet */ { KE_IGNORE, 0x8e, }, { KE_IGNORE, 0x8f, }, { KE_IGNORE, 0x90, }, /* * 'G key' generate two event codes, convert to only * one event/key code for now, consider replacing by * a switch (3G switch - SW_3G?) */ { KE_KEY, 0x96, { KEY_F14 } }, { KE_KEY, 0x97, { KEY_F14 } }, { KE_END, 0 } }; static void acpi_topstar_notify(struct acpi_device *device, u32 event) { static bool dup_evnt[2]; bool *dup; struct topstar_hkey *hkey = acpi_driver_data(device); /* 0x83 and 0x84 key events comes duplicated... */ if (event == 0x83 || event == 0x84) { dup = &dup_evnt[event - 0x83]; if (*dup) { *dup = false; return; } *dup = true; } if (!sparse_keymap_report_event(hkey->inputdev, event, 1, true)) pr_info("unknown event = 0x%02x\n", event); } static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state) { acpi_status status; union acpi_object fncx_params[1] = { { .type = ACPI_TYPE_INTEGER } }; struct acpi_object_list fncx_arg_list = { 1, &fncx_params[0] }; fncx_params[0].integer.value = state ? 0x86 : 0x87; status = acpi_evaluate_object(device->handle, "FNCX", &fncx_arg_list, NULL); if (ACPI_FAILURE(status)) { pr_err("Unable to switch FNCX notifications\n"); return -ENODEV; } return 0; } static int acpi_topstar_init_hkey(struct topstar_hkey *hkey) { struct input_dev *input; int error; input = input_allocate_device(); if (!input) { pr_err("Unable to allocate input device\n"); return -ENOMEM; } input->name = "Topstar Laptop extra buttons"; input->phys = "topstar/input0"; input->id.bustype = BUS_HOST; error = sparse_keymap_setup(input, topstar_keymap, NULL); if (error) { pr_err("Unable to setup input device keymap\n"); goto err_free_dev; } error = input_register_device(input); if (error) { pr_err("Unable to register input device\n"); goto err_free_keymap; } hkey->inputdev = input; return 0; err_free_keymap: sparse_keymap_free(input); err_free_dev: input_free_device(input); return error; } static int acpi_topstar_add(struct acpi_device *device) { struct topstar_hkey *tps_hkey; tps_hkey = kzalloc(sizeof(struct topstar_hkey), GFP_KERNEL); if (!tps_hkey) return -ENOMEM; strcpy(acpi_device_name(device), "Topstar TPSACPI"); strcpy(acpi_device_class(device), ACPI_TOPSTAR_CLASS); if (acpi_topstar_fncx_switch(device, true)) goto add_err; if (acpi_topstar_init_hkey(tps_hkey)) goto add_err; device->driver_data = tps_hkey; return 0; add_err: kfree(tps_hkey); return -ENODEV; } static int acpi_topstar_remove(struct acpi_device *device, int type) { struct topstar_hkey *tps_hkey = acpi_driver_data(device); acpi_topstar_fncx_switch(device, false); sparse_keymap_free(tps_hkey->inputdev); input_unregister_device(tps_hkey->inputdev); kfree(tps_hkey); return 0; } static const struct acpi_device_id topstar_device_ids[] = { { "TPSACPI01", 0 }, { "", 0 }, }; MODULE_DEVICE_TABLE(acpi, topstar_device_ids); static struct acpi_driver acpi_topstar_driver = { .name = "Topstar laptop ACPI driver", .class = ACPI_TOPSTAR_CLASS, .ids = topstar_device_ids, .ops = { .add = acpi_topstar_add, .remove = acpi_topstar_remove, .notify = acpi_topstar_notify, }, }; static int __init topstar_laptop_init(void) { int ret; ret = acpi_bus_register_driver(&acpi_topstar_driver); if (ret < 0) return ret; pr_info("ACPI extras driver loaded\n"); return 0; } static void __exit topstar_laptop_exit(void) { acpi_bus_unregister_driver(&acpi_topstar_driver); } module_init(topstar_laptop_init); module_exit(topstar_laptop_exit); MODULE_AUTHOR("Herton Ronaldo Krzesinski"); MODULE_DESCRIPTION("Topstar Laptop ACPI Extras driver"); MODULE_LICENSE("GPL");
gpl-2.0
padovan/bluetooth-next
net/rds/loop.c
3288
5914
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/in.h> #include "rds.h" #include "loop.h" static DEFINE_SPINLOCK(loop_conns_lock); static LIST_HEAD(loop_conns); /* * This 'loopback' transport is a special case for flows that originate * and terminate on the same machine. * * Connection build-up notices if the destination address is thought of * as a local address by a transport. At that time it decides to use the * loopback transport instead of the bound transport of the sending socket. * * The loopback transport's sending path just hands the sent rds_message * straight to the receiving path via an embedded rds_incoming. */ /* * Usually a message transits both the sender and receiver's conns as it * flows to the receiver. In the loopback case, though, the receive path * is handed the sending conn so the sense of the addresses is reversed. */ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { struct scatterlist *sgp = &rm->data.op_sg[sg]; int ret = sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); /* Do not send cong updates to loopback */ if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); goto out; } BUG_ON(hdr_off || sg || off); rds_inc_init(&rm->m_inc, conn, conn->c_laddr); /* For the embedded inc. Matching put is in loop_inc_free() */ rds_message_addref(rm); rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc, GFP_KERNEL, KM_USER0); rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence), NULL); rds_inc_put(&rm->m_inc); out: return ret; } /* * See rds_loop_xmit(). Since our inc is embedded in the rm, we * make sure the rm lives at least until the inc is done. */ static void rds_loop_inc_free(struct rds_incoming *inc) { struct rds_message *rm = container_of(inc, struct rds_message, m_inc); rds_message_put(rm); } /* we need to at least give the thread something to succeed */ static int rds_loop_recv(struct rds_connection *conn) { return 0; } struct rds_loop_connection { struct list_head loop_node; struct rds_connection *conn; }; /* * Even the loopback transport needs to keep track of its connections, * so it can call rds_conn_destroy() on them on exit. N.B. there are * 1+ loopback addresses (127.*.*.*) so it's not a bug to have * multiple loopback conns allocated, although rather useless. */ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp) { struct rds_loop_connection *lc; unsigned long flags; lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL); if (!lc) return -ENOMEM; INIT_LIST_HEAD(&lc->loop_node); lc->conn = conn; conn->c_transport_data = lc; spin_lock_irqsave(&loop_conns_lock, flags); list_add_tail(&lc->loop_node, &loop_conns); spin_unlock_irqrestore(&loop_conns_lock, flags); return 0; } static void rds_loop_conn_free(void *arg) { struct rds_loop_connection *lc = arg; unsigned long flags; rdsdebug("lc %p\n", lc); spin_lock_irqsave(&loop_conns_lock, flags); list_del(&lc->loop_node); spin_unlock_irqrestore(&loop_conns_lock, flags); kfree(lc); } static int rds_loop_conn_connect(struct rds_connection *conn) { rds_connect_complete(conn); return 0; } static void rds_loop_conn_shutdown(struct rds_connection *conn) { } void rds_loop_exit(void) { struct rds_loop_connection *lc, *_lc; LIST_HEAD(tmp_list); /* avoid calling conn_destroy with irqs off */ spin_lock_irq(&loop_conns_lock); list_splice(&loop_conns, &tmp_list); INIT_LIST_HEAD(&loop_conns); spin_unlock_irq(&loop_conns_lock); list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) { WARN_ON(lc->conn->c_passive); rds_conn_destroy(lc->conn); } } /* * This is missing .xmit_* because loop doesn't go through generic * rds_send_xmit() and doesn't call rds_recv_incoming(). .listen_stop and * .laddr_check are missing because transport.c doesn't iterate over * rds_loop_transport. */ struct rds_transport rds_loop_transport = { .xmit = rds_loop_xmit, .recv = rds_loop_recv, .conn_alloc = rds_loop_conn_alloc, .conn_free = rds_loop_conn_free, .conn_connect = rds_loop_conn_connect, .conn_shutdown = rds_loop_conn_shutdown, .inc_copy_to_user = rds_message_inc_copy_to_user, .inc_free = rds_loop_inc_free, .t_name = "loopback", };
gpl-2.0
TheSSJ/zf2_mmkernel
drivers/power/goldfish_battery.c
3288
6224
/* * Power supply driver for the goldfish emulator * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2012 Intel, Inc. * Copyright (C) 2013 Intel, Inc. * Author: Mike Lockwood <lockwood@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/io.h> struct goldfish_battery_data { void __iomem *reg_base; int irq; spinlock_t lock; struct power_supply battery; struct power_supply ac; }; #define GOLDFISH_BATTERY_READ(data, addr) \ (readl(data->reg_base + addr)) #define GOLDFISH_BATTERY_WRITE(data, addr, x) \ (writel(x, data->reg_base + addr)) /* * Temporary variable used between goldfish_battery_probe() and * goldfish_battery_open(). */ static struct goldfish_battery_data *battery_data; enum { /* status register */ BATTERY_INT_STATUS = 0x00, /* set this to enable IRQ */ BATTERY_INT_ENABLE = 0x04, BATTERY_AC_ONLINE = 0x08, BATTERY_STATUS = 0x0C, BATTERY_HEALTH = 0x10, BATTERY_PRESENT = 0x14, BATTERY_CAPACITY = 0x18, BATTERY_STATUS_CHANGED = 1U << 0, AC_STATUS_CHANGED = 1U << 1, BATTERY_INT_MASK = BATTERY_STATUS_CHANGED | AC_STATUS_CHANGED, }; static int goldfish_ac_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct goldfish_battery_data *data = container_of(psy, struct goldfish_battery_data, ac); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_AC_ONLINE); break; default: ret = -EINVAL; break; } return ret; } static int goldfish_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct goldfish_battery_data *data = container_of(psy, struct goldfish_battery_data, battery); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_STATUS); break; case POWER_SUPPLY_PROP_HEALTH: val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_HEALTH); break; case POWER_SUPPLY_PROP_PRESENT: val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_PRESENT); break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CAPACITY); break; default: ret = -EINVAL; break; } return ret; } static enum power_supply_property goldfish_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CAPACITY, }; static enum power_supply_property goldfish_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static irqreturn_t goldfish_battery_interrupt(int irq, void *dev_id) { unsigned long irq_flags; struct goldfish_battery_data *data = dev_id; uint32_t status; spin_lock_irqsave(&data->lock, irq_flags); /* read status flags, which will clear the interrupt */ status = GOLDFISH_BATTERY_READ(data, BATTERY_INT_STATUS); status &= BATTERY_INT_MASK; if (status & BATTERY_STATUS_CHANGED) power_supply_changed(&data->battery); if (status & AC_STATUS_CHANGED) power_supply_changed(&data->ac); spin_unlock_irqrestore(&data->lock, irq_flags); return status ? IRQ_HANDLED : IRQ_NONE; } static int goldfish_battery_probe(struct platform_device *pdev) { int ret; struct resource *r; struct goldfish_battery_data *data; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; spin_lock_init(&data->lock); data->battery.properties = goldfish_battery_props; data->battery.num_properties = ARRAY_SIZE(goldfish_battery_props); data->battery.get_property = goldfish_battery_get_property; data->battery.name = "battery"; data->battery.type = POWER_SUPPLY_TYPE_BATTERY; data->ac.properties = goldfish_ac_props; data->ac.num_properties = ARRAY_SIZE(goldfish_ac_props); data->ac.get_property = goldfish_ac_get_property; data->ac.name = "ac"; data->ac.type = POWER_SUPPLY_TYPE_MAINS; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (r == NULL) { dev_err(&pdev->dev, "platform_get_resource failed\n"); return -ENODEV; } data->reg_base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (data->reg_base == NULL) { dev_err(&pdev->dev, "unable to remap MMIO\n"); return -ENOMEM; } data->irq = platform_get_irq(pdev, 0); if (data->irq < 0) { dev_err(&pdev->dev, "platform_get_irq failed\n"); return -ENODEV; } ret = devm_request_irq(&pdev->dev, data->irq, goldfish_battery_interrupt, IRQF_SHARED, pdev->name, data); if (ret) return ret; ret = power_supply_register(&pdev->dev, &data->ac); if (ret) return ret; ret = power_supply_register(&pdev->dev, &data->battery); if (ret) { power_supply_unregister(&data->ac); return ret; } platform_set_drvdata(pdev, data); battery_data = data; GOLDFISH_BATTERY_WRITE(data, BATTERY_INT_ENABLE, BATTERY_INT_MASK); return 0; } static int goldfish_battery_remove(struct platform_device *pdev) { struct goldfish_battery_data *data = platform_get_drvdata(pdev); power_supply_unregister(&data->battery); power_supply_unregister(&data->ac); battery_data = NULL; return 0; } static struct platform_driver goldfish_battery_device = { .probe = goldfish_battery_probe, .remove = goldfish_battery_remove, .driver = { .name = "goldfish-battery" } }; module_platform_driver(goldfish_battery_device); MODULE_AUTHOR("Mike Lockwood lockwood@android.com"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Battery driver for the Goldfish emulator");
gpl-2.0
GAXUSXX/GaXusKernel
drivers/infiniband/hw/qib/qib_mad.c
4056
62867
/* * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_smi.h> #include "qib.h" #include "qib_mad.h" static int reply(struct ib_smp *smp) { /* * The verbs framework will handle the directed/LID route * packet changes. */ smp->method = IB_MGMT_METHOD_GET_RESP; if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) smp->status |= IB_SMP_DIRECTION; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) { struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent; struct ib_smp *smp; int ret; unsigned long flags; unsigned long timeout; agent = ibp->send_agent; if (!agent) return; /* o14-3.2.1 */ if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE)) return; /* o14-2 */ if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout)) return; send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); if (IS_ERR(send_buf)) return; smp = send_buf->mad; smp->base_version = IB_MGMT_BASE_VERSION; smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; smp->class_version = 1; smp->method = IB_MGMT_METHOD_TRAP; ibp->tid++; smp->tid = cpu_to_be64(ibp->tid); smp->attr_id = IB_SMP_ATTR_NOTICE; /* o14-1: smp->mkey = 0; */ memcpy(smp->data, data, len); spin_lock_irqsave(&ibp->lock, flags); if (!ibp->sm_ah) { if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { struct ib_ah *ah; struct ib_ah_attr attr; memset(&attr, 0, sizeof attr); attr.dlid = ibp->sm_lid; attr.port_num = ppd_from_ibp(ibp)->port; ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); if (IS_ERR(ah)) ret = -EINVAL; else { send_buf->ah = ah; ibp->sm_ah = to_iah(ah); ret = 0; } } else ret = -EINVAL; } else { send_buf->ah = &ibp->sm_ah->ibah; ret = 0; } spin_unlock_irqrestore(&ibp->lock, flags); if (!ret) ret = ib_post_send_mad(send_buf, NULL); if (!ret) { /* 4.096 usec. */ timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000; ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout); } else { ib_free_send_mad(send_buf); ibp->trap_timeout = 0; } } /* * Send a bad [PQ]_Key trap (ch. 14.3.8). */ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, u32 qp1, u32 qp2, __be16 lid1, __be16 lid2) { struct ib_mad_notice_attr data; if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) ibp->pkey_violations++; else ibp->qkey_violations++; ibp->n_pkt_drops++; /* Send violation trap */ data.generic_type = IB_NOTICE_TYPE_SECURITY; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; data.trap_num = trap_num; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof data.details); data.details.ntc_257_258.lid1 = lid1; data.details.ntc_257_258.lid2 = lid2; data.details.ntc_257_258.key = cpu_to_be32(key); data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); qib_send_trap(ibp, &data, sizeof data); } /* * Send a bad M_Key trap (ch. 14.3.9). */ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) { struct ib_mad_notice_attr data; /* Send violation trap */ data.generic_type = IB_NOTICE_TYPE_SECURITY; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof data.details); data.details.ntc_256.lid = data.issuer_lid; data.details.ntc_256.method = smp->method; data.details.ntc_256.attr_id = smp->attr_id; data.details.ntc_256.attr_mod = smp->attr_mod; data.details.ntc_256.mkey = smp->mkey; if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { u8 hop_cnt; data.details.ntc_256.dr_slid = smp->dr_slid; data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE; hop_cnt = smp->hop_cnt; if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) { data.details.ntc_256.dr_trunc_hop |= IB_NOTICE_TRAP_DR_TRUNC; hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path); } data.details.ntc_256.dr_trunc_hop |= hop_cnt; memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path, hop_cnt); } qib_send_trap(ibp, &data, sizeof data); } /* * Send a Port Capability Mask Changed trap (ch. 14.3.11). */ void qib_cap_mask_chg(struct qib_ibport *ibp) { struct ib_mad_notice_attr data; data.generic_type = IB_NOTICE_TYPE_INFO; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof data.details); data.details.ntc_144.lid = data.issuer_lid; data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); qib_send_trap(ibp, &data, sizeof data); } /* * Send a System Image GUID Changed trap (ch. 14.3.12). */ void qib_sys_guid_chg(struct qib_ibport *ibp) { struct ib_mad_notice_attr data; data.generic_type = IB_NOTICE_TYPE_INFO; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof data.details); data.details.ntc_145.lid = data.issuer_lid; data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; qib_send_trap(ibp, &data, sizeof data); } /* * Send a Node Description Changed trap (ch. 14.3.13). */ void qib_node_desc_chg(struct qib_ibport *ibp) { struct ib_mad_notice_attr data; data.generic_type = IB_NOTICE_TYPE_INFO; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof data.details); data.details.ntc_144.lid = data.issuer_lid; data.details.ntc_144.local_changes = 1; data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; qib_send_trap(ibp, &data, sizeof data); } static int subn_get_nodedescription(struct ib_smp *smp, struct ib_device *ibdev) { if (smp->attr_mod) smp->status |= IB_SMP_INVALID_FIELD; memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); return reply(smp); } static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct ib_node_info *nip = (struct ib_node_info *)&smp->data; struct qib_devdata *dd = dd_from_ibdev(ibdev); u32 vendor, majrev, minrev; unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ /* GUID 0 is illegal */ if (smp->attr_mod || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) smp->status |= IB_SMP_INVALID_FIELD; else nip->port_guid = dd->pport[pidx].guid; nip->base_version = 1; nip->class_version = 1; nip->node_type = 1; /* channel adapter */ nip->num_ports = ibdev->phys_port_cnt; /* This is already in network order */ nip->sys_guid = ib_qib_sys_image_guid; nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */ nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd)); nip->device_id = cpu_to_be16(dd->deviceid); majrev = dd->majrev; minrev = dd->minrev; nip->revision = cpu_to_be32((majrev << 16) | minrev); nip->local_port_num = port; vendor = dd->vendorid; nip->vendor_id[0] = QIB_SRC_OUI_1; nip->vendor_id[1] = QIB_SRC_OUI_2; nip->vendor_id[2] = QIB_SRC_OUI_3; return reply(smp); } static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct qib_devdata *dd = dd_from_ibdev(ibdev); u32 startgx = 8 * be32_to_cpu(smp->attr_mod); __be64 *p = (__be64 *) smp->data; unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ /* 32 blocks of 8 64-bit GUIDs per block */ memset(smp->data, 0, sizeof(smp->data)); if (startgx == 0 && pidx < dd->num_pports) { struct qib_pportdata *ppd = dd->pport + pidx; struct qib_ibport *ibp = &ppd->ibport_data; __be64 g = ppd->guid; unsigned i; /* GUID 0 is illegal */ if (g == 0) smp->status |= IB_SMP_INVALID_FIELD; else { /* The first is a copy of the read-only HW GUID. */ p[0] = g; for (i = 1; i < QIB_GUIDS_PER_PORT; i++) p[i] = ibp->guids[i - 1]; } } else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w) { (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w); } static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s) { (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s); } static int get_overrunthreshold(struct qib_pportdata *ppd) { return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH); } /** * set_overrunthreshold - set the overrun threshold * @ppd: the physical port data * @n: the new threshold * * Note that this will only take effect when the link state changes. */ static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n) { (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH, (u32)n); return 0; } static int get_phyerrthreshold(struct qib_pportdata *ppd) { return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH); } /** * set_phyerrthreshold - set the physical error threshold * @ppd: the physical port data * @n: the new threshold * * Note that this will only take effect when the link state changes. */ static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n) { (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH, (u32)n); return 0; } /** * get_linkdowndefaultstate - get the default linkdown state * @ppd: the physical port data * * Returns zero if the default is POLL, 1 if the default is SLEEP. */ static int get_linkdowndefaultstate(struct qib_pportdata *ppd) { return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) == IB_LINKINITCMD_SLEEP; } static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) { int ret = 0; /* Is the mkey in the process of expiring? */ if (ibp->mkey_lease_timeout && time_after_eq(jiffies, ibp->mkey_lease_timeout)) { /* Clear timeout and mkey protection field. */ ibp->mkey_lease_timeout = 0; ibp->mkeyprot = 0; } /* M_Key checking depends on Portinfo:M_Key_protect_bits */ if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 && ibp->mkey != smp->mkey && (smp->method == IB_MGMT_METHOD_SET || smp->method == IB_MGMT_METHOD_TRAP_REPRESS || (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) { if (ibp->mkey_violations != 0xFFFF) ++ibp->mkey_violations; if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) ibp->mkey_lease_timeout = jiffies + ibp->mkey_lease_period * HZ; /* Generate a trap notice. */ qib_bad_mkey(ibp, smp); ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; } else if (ibp->mkey_lease_timeout) ibp->mkey_lease_timeout = 0; return ret; } static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct qib_devdata *dd; struct qib_pportdata *ppd; struct qib_ibport *ibp; struct ib_port_info *pip = (struct ib_port_info *)smp->data; u8 mtu; int ret; u32 state; u32 port_num = be32_to_cpu(smp->attr_mod); if (port_num == 0) port_num = port; else { if (port_num > ibdev->phys_port_cnt) { smp->status |= IB_SMP_INVALID_FIELD; ret = reply(smp); goto bail; } if (port_num != port) { ibp = to_iport(ibdev, port_num); ret = check_mkey(ibp, smp, 0); if (ret) goto bail; } } dd = dd_from_ibdev(ibdev); /* IB numbers ports from 1, hdw from 0 */ ppd = dd->pport + (port_num - 1); ibp = &ppd->ibport_data; /* Clear all fields. Only set the non-zero fields. */ memset(smp->data, 0, sizeof(smp->data)); /* Only return the mkey if the protection field allows it. */ if (!(smp->method == IB_MGMT_METHOD_GET && ibp->mkey != smp->mkey && ibp->mkeyprot == 1)) pip->mkey = ibp->mkey; pip->gid_prefix = ibp->gid_prefix; pip->lid = cpu_to_be16(ppd->lid); pip->sm_lid = cpu_to_be16(ibp->sm_lid); pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); /* pip->diag_code; */ pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period); pip->local_port_num = port; pip->link_width_enabled = ppd->link_width_enabled; pip->link_width_supported = ppd->link_width_supported; pip->link_width_active = ppd->link_width_active; state = dd->f_iblink_state(ppd->lastibcstat); pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state; pip->portphysstate_linkdown = (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | (get_linkdowndefaultstate(ppd) ? 1 : 2); pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc; pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | ppd->link_speed_enabled; switch (ppd->ibmtu) { default: /* something is wrong; fall through */ case 4096: mtu = IB_MTU_4096; break; case 2048: mtu = IB_MTU_2048; break; case 1024: mtu = IB_MTU_1024; break; case 512: mtu = IB_MTU_512; break; case 256: mtu = IB_MTU_256; break; } pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl; pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ pip->vl_high_limit = ibp->vl_high_limit; pip->vl_arb_high_cap = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); pip->vl_arb_low_cap = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP); /* InitTypeReply = 0 */ pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; /* HCAs ignore VLStallCount and HOQLife */ /* pip->vlstallcnt_hoqlife; */ pip->operationalvl_pei_peo_fpi_fpo = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; pip->mkey_violations = cpu_to_be16(ibp->mkey_violations); /* P_KeyViolations are counted by hardware. */ pip->pkey_violations = cpu_to_be16(ibp->pkey_violations); pip->qkey_violations = cpu_to_be16(ibp->qkey_violations); /* Only the hardware GUID is supported for now */ pip->guid_cap = QIB_GUIDS_PER_PORT; pip->clientrereg_resv_subnetto = ibp->subnet_timeout; /* 32.768 usec. response time (guessing) */ pip->resv_resptimevalue = 3; pip->localphyerrors_overrunerrors = (get_phyerrthreshold(ppd) << 4) | get_overrunthreshold(ppd); /* pip->max_credit_hint; */ if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { u32 v; v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); pip->link_roundtrip_latency[0] = v >> 16; pip->link_roundtrip_latency[1] = v >> 8; pip->link_roundtrip_latency[2] = v; } ret = reply(smp); bail: return ret; } /** * get_pkeys - return the PKEY table * @dd: the qlogic_ib device * @port: the IB port number * @pkeys: the pkey table is placed here */ static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) { struct qib_pportdata *ppd = dd->pport + port - 1; /* * always a kernel context, no locking needed. * If we get here with ppd setup, no need to check * that pd is valid. */ struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx]; memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys)); return 0; } static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); u16 *p = (u16 *) smp->data; __be16 *q = (__be16 *) smp->data; /* 64 blocks of 32 16-bit P_Key entries */ memset(smp->data, 0, sizeof(smp->data)); if (startpx == 0) { struct qib_devdata *dd = dd_from_ibdev(ibdev); unsigned i, n = qib_get_npkeys(dd); get_pkeys(dd, port, p); for (i = 0; i < n; i++) q[i] = cpu_to_be16(p[i]); } else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct qib_devdata *dd = dd_from_ibdev(ibdev); u32 startgx = 8 * be32_to_cpu(smp->attr_mod); __be64 *p = (__be64 *) smp->data; unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ /* 32 blocks of 8 64-bit GUIDs per block */ if (startgx == 0 && pidx < dd->num_pports) { struct qib_pportdata *ppd = dd->pport + pidx; struct qib_ibport *ibp = &ppd->ibport_data; unsigned i; /* The first entry is read-only. */ for (i = 1; i < QIB_GUIDS_PER_PORT; i++) ibp->guids[i - 1] = p[i]; } else smp->status |= IB_SMP_INVALID_FIELD; /* The only GUID we support is the first read-only entry. */ return subn_get_guidinfo(smp, ibdev, port); } /** * subn_set_portinfo - set port information * @smp: the incoming SM packet * @ibdev: the infiniband device * @port: the port on the device * * Set Portinfo (see ch. 14.2.5.6). */ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct ib_port_info *pip = (struct ib_port_info *)smp->data; struct ib_event event; struct qib_devdata *dd; struct qib_pportdata *ppd; struct qib_ibport *ibp; char clientrereg = 0; unsigned long flags; u16 lid, smlid; u8 lwe; u8 lse; u8 state; u8 vls; u8 msl; u16 lstate; int ret, ore, mtu; u32 port_num = be32_to_cpu(smp->attr_mod); if (port_num == 0) port_num = port; else { if (port_num > ibdev->phys_port_cnt) goto err; /* Port attributes can only be set on the receiving port */ if (port_num != port) goto get_only; } dd = dd_from_ibdev(ibdev); /* IB numbers ports from 1, hdw from 0 */ ppd = dd->pport + (port_num - 1); ibp = &ppd->ibport_data; event.device = ibdev; event.element.port_num = port; ibp->mkey = pip->mkey; ibp->gid_prefix = pip->gid_prefix; ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); lid = be16_to_cpu(pip->lid); /* Must be a valid unicast LID address. */ if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) smp->status |= IB_SMP_INVALID_FIELD; else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { if (ppd->lid != lid) qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT); qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7); event.event = IB_EVENT_LID_CHANGE; ib_dispatch_event(&event); } smlid = be16_to_cpu(pip->sm_lid); msl = pip->neighbormtu_mastersmsl & 0xF; /* Must be a valid unicast LID address. */ if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) smp->status |= IB_SMP_INVALID_FIELD; else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { spin_lock_irqsave(&ibp->lock, flags); if (ibp->sm_ah) { if (smlid != ibp->sm_lid) ibp->sm_ah->attr.dlid = smlid; if (msl != ibp->sm_sl) ibp->sm_ah->attr.sl = msl; } spin_unlock_irqrestore(&ibp->lock, flags); if (smlid != ibp->sm_lid) ibp->sm_lid = smlid; if (msl != ibp->sm_sl) ibp->sm_sl = msl; event.event = IB_EVENT_SM_CHANGE; ib_dispatch_event(&event); } /* Allow 1x or 4x to be set (see 14.2.6.6). */ lwe = pip->link_width_enabled; if (lwe) { if (lwe == 0xFF) set_link_width_enabled(ppd, ppd->link_width_supported); else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) smp->status |= IB_SMP_INVALID_FIELD; else if (lwe != ppd->link_width_enabled) set_link_width_enabled(ppd, lwe); } lse = pip->linkspeedactive_enabled & 0xF; if (lse) { /* * The IB 1.2 spec. only allows link speed values * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific * speeds. */ if (lse == 15) set_link_speed_enabled(ppd, ppd->link_speed_supported); else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) smp->status |= IB_SMP_INVALID_FIELD; else if (lse != ppd->link_speed_enabled) set_link_speed_enabled(ppd, lse); } /* Set link down default state. */ switch (pip->portphysstate_linkdown & 0xF) { case 0: /* NOP */ break; case 1: /* SLEEP */ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, IB_LINKINITCMD_SLEEP); break; case 2: /* POLL */ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, IB_LINKINITCMD_POLL); break; default: smp->status |= IB_SMP_INVALID_FIELD; } ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; ibp->vl_high_limit = pip->vl_high_limit; (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, ibp->vl_high_limit); mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); if (mtu == -1) smp->status |= IB_SMP_INVALID_FIELD; else qib_set_mtu(ppd, mtu); /* Set operational VLs */ vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; if (vls) { if (vls > ppd->vls_supported) smp->status |= IB_SMP_INVALID_FIELD; else (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); } if (pip->mkey_violations == 0) ibp->mkey_violations = 0; if (pip->pkey_violations == 0) ibp->pkey_violations = 0; if (pip->qkey_violations == 0) ibp->qkey_violations = 0; ore = pip->localphyerrors_overrunerrors; if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) smp->status |= IB_SMP_INVALID_FIELD; if (set_overrunthreshold(ppd, (ore & 0xF))) smp->status |= IB_SMP_INVALID_FIELD; ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; if (pip->clientrereg_resv_subnetto & 0x80) { clientrereg = 1; event.event = IB_EVENT_CLIENT_REREGISTER; ib_dispatch_event(&event); } /* * Do the port state change now that the other link parameters * have been set. * Changing the port physical state only makes sense if the link * is down or is being set to down. */ state = pip->linkspeed_portstate & 0xF; lstate = (pip->portphysstate_linkdown >> 4) & 0xF; if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) smp->status |= IB_SMP_INVALID_FIELD; /* * Only state changes of DOWN, ARM, and ACTIVE are valid * and must be in the correct state to take effect (see 7.2.6). */ switch (state) { case IB_PORT_NOP: if (lstate == 0) break; /* FALLTHROUGH */ case IB_PORT_DOWN: if (lstate == 0) lstate = QIB_IB_LINKDOWN_ONLY; else if (lstate == 1) lstate = QIB_IB_LINKDOWN_SLEEP; else if (lstate == 2) lstate = QIB_IB_LINKDOWN; else if (lstate == 3) lstate = QIB_IB_LINKDOWN_DISABLE; else { smp->status |= IB_SMP_INVALID_FIELD; break; } spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_LINKV; spin_unlock_irqrestore(&ppd->lflags_lock, flags); qib_set_linkstate(ppd, lstate); /* * Don't send a reply if the response would be sent * through the disabled port. */ if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto done; } qib_wait_linkstate(ppd, QIBL_LINKV, 10); break; case IB_PORT_ARMED: qib_set_linkstate(ppd, QIB_IB_LINKARM); break; case IB_PORT_ACTIVE: qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); break; default: smp->status |= IB_SMP_INVALID_FIELD; } ret = subn_get_portinfo(smp, ibdev, port); if (clientrereg) pip->clientrereg_resv_subnetto |= 0x80; goto get_only; err: smp->status |= IB_SMP_INVALID_FIELD; get_only: ret = subn_get_portinfo(smp, ibdev, port); done: return ret; } /** * rm_pkey - decrecment the reference count for the given PKEY * @dd: the qlogic_ib device * @key: the PKEY index * * Return true if this was the last reference and the hardware table entry * needs to be changed. */ static int rm_pkey(struct qib_pportdata *ppd, u16 key) { int i; int ret; for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { if (ppd->pkeys[i] != key) continue; if (atomic_dec_and_test(&ppd->pkeyrefs[i])) { ppd->pkeys[i] = 0; ret = 1; goto bail; } break; } ret = 0; bail: return ret; } /** * add_pkey - add the given PKEY to the hardware table * @dd: the qlogic_ib device * @key: the PKEY * * Return an error code if unable to add the entry, zero if no change, * or 1 if the hardware PKEY register needs to be updated. */ static int add_pkey(struct qib_pportdata *ppd, u16 key) { int i; u16 lkey = key & 0x7FFF; int any = 0; int ret; if (lkey == 0x7FFF) { ret = 0; goto bail; } /* Look for an empty slot or a matching PKEY. */ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { if (!ppd->pkeys[i]) { any++; continue; } /* If it matches exactly, try to increment the ref count */ if (ppd->pkeys[i] == key) { if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) { ret = 0; goto bail; } /* Lost the race. Look for an empty slot below. */ atomic_dec(&ppd->pkeyrefs[i]); any++; } /* * It makes no sense to have both the limited and unlimited * PKEY set at the same time since the unlimited one will * disable the limited one. */ if ((ppd->pkeys[i] & 0x7FFF) == lkey) { ret = -EEXIST; goto bail; } } if (!any) { ret = -EBUSY; goto bail; } for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { if (!ppd->pkeys[i] && atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { /* for qibstats, etc. */ ppd->pkeys[i] = key; ret = 1; goto bail; } } ret = -EBUSY; bail: return ret; } /** * set_pkeys - set the PKEY table for ctxt 0 * @dd: the qlogic_ib device * @port: the IB port number * @pkeys: the PKEY table */ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) { struct qib_pportdata *ppd; struct qib_ctxtdata *rcd; int i; int changed = 0; /* * IB port one/two always maps to context zero/one, * always a kernel context, no locking needed * If we get here with ppd setup, no need to check * that rcd is valid. */ ppd = dd->pport + (port - 1); rcd = dd->rcd[ppd->hw_pidx]; for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { u16 key = pkeys[i]; u16 okey = rcd->pkeys[i]; if (key == okey) continue; /* * The value of this PKEY table entry is changing. * Remove the old entry in the hardware's array of PKEYs. */ if (okey & 0x7FFF) changed |= rm_pkey(ppd, okey); if (key & 0x7FFF) { int ret = add_pkey(ppd, key); if (ret < 0) key = 0; else changed |= ret; } rcd->pkeys[i] = key; } if (changed) { struct ib_event event; (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); event.event = IB_EVENT_PKEY_CHANGE; event.device = &dd->verbs_dev.ibdev; event.element.port_num = 1; ib_dispatch_event(&event); } return 0; } static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); __be16 *p = (__be16 *) smp->data; u16 *q = (u16 *) smp->data; struct qib_devdata *dd = dd_from_ibdev(ibdev); unsigned i, n = qib_get_npkeys(dd); for (i = 0; i < n; i++) q[i] = be16_to_cpu(p[i]); if (startpx != 0 || set_pkeys(dd, port, q) != 0) smp->status |= IB_SMP_INVALID_FIELD; return subn_get_pkeytable(smp, ibdev, port); } static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct qib_ibport *ibp = to_iport(ibdev, port); u8 *p = (u8 *) smp->data; unsigned i; memset(smp->data, 0, sizeof(smp->data)); if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) smp->status |= IB_SMP_UNSUP_METHOD; else for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1]; return reply(smp); } static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct qib_ibport *ibp = to_iport(ibdev, port); u8 *p = (u8 *) smp->data; unsigned i; if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) { smp->status |= IB_SMP_UNSUP_METHOD; return reply(smp); } for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) { ibp->sl_to_vl[i] = *p >> 4; ibp->sl_to_vl[i + 1] = *p & 0xF; } qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)), _QIB_EVENT_SL2VL_CHANGE_BIT); return subn_get_sl_to_vl(smp, ibdev, port); } static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { unsigned which = be32_to_cpu(smp->attr_mod) >> 16; struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); memset(smp->data, 0, sizeof(smp->data)); if (ppd->vls_supported == IB_VL_VL0) smp->status |= IB_SMP_UNSUP_METHOD; else if (which == IB_VLARB_LOWPRI_0_31) (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, smp->data); else if (which == IB_VLARB_HIGHPRI_0_31) (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, smp->data); else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { unsigned which = be32_to_cpu(smp->attr_mod) >> 16; struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); if (ppd->vls_supported == IB_VL_VL0) smp->status |= IB_SMP_UNSUP_METHOD; else if (which == IB_VLARB_LOWPRI_0_31) (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, smp->data); else if (which == IB_VLARB_HIGHPRI_0_31) (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, smp->data); else smp->status |= IB_SMP_INVALID_FIELD; return subn_get_vl_arb(smp, ibdev, port); } static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { /* * For now, we only send the trap once so no need to process this. * o13-6, o13-7, * o14-3.a4 The SMA shall not send any message in response to a valid * SubnTrapRepress() message. */ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; } static int pma_get_classportinfo(struct ib_pma_mad *pmp, struct ib_device *ibdev) { struct ib_class_port_info *p = (struct ib_class_port_info *)pmp->data; struct qib_devdata *dd = dd_from_ibdev(ibdev); memset(pmp->data, 0, sizeof(pmp->data)); if (pmp->mad_hdr.attr_mod != 0) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; /* Note that AllPortSelect is not valid */ p->base_version = 1; p->class_version = 1; p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; /* * Set the most significant bit of CM2 to indicate support for * congestion statistics */ p->reserved[0] = dd->psxmitwait_supported << 7; /* * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. */ p->resp_time_value = 18; return reply((struct ib_smp *) pmp); } static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplescontrol *p = (struct ib_pma_portsamplescontrol *)pmp->data; struct qib_ibdev *dev = to_idev(ibdev); struct qib_devdata *dd = dd_from_dev(dev); struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); unsigned long flags; u8 port_select = p->port_select; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; goto bail; } spin_lock_irqsave(&ibp->lock, flags); p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); p->counter_width = 4; /* 32 bit counters */ p->counter_mask0_9 = COUNTER_MASK0_9; p->sample_start = cpu_to_be32(ibp->pma_sample_start); p->sample_interval = cpu_to_be32(ibp->pma_sample_interval); p->tag = cpu_to_be16(ibp->pma_tag); p->counter_select[0] = ibp->pma_counter_select[0]; p->counter_select[1] = ibp->pma_counter_select[1]; p->counter_select[2] = ibp->pma_counter_select[2]; p->counter_select[3] = ibp->pma_counter_select[3]; p->counter_select[4] = ibp->pma_counter_select[4]; spin_unlock_irqrestore(&ibp->lock, flags); bail: return reply((struct ib_smp *) pmp); } static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplescontrol *p = (struct ib_pma_portsamplescontrol *)pmp->data; struct qib_ibdev *dev = to_idev(ibdev); struct qib_devdata *dd = dd_from_dev(dev); struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); unsigned long flags; u8 status, xmit_flags; int ret; if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; ret = reply((struct ib_smp *) pmp); goto bail; } spin_lock_irqsave(&ibp->lock, flags); /* Port Sampling code owns the PS* HW counters */ xmit_flags = ppd->cong_stats.flags; ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE; status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); if (status == IB_PMA_SAMPLE_STATUS_DONE || (status == IB_PMA_SAMPLE_STATUS_RUNNING && xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { ibp->pma_sample_start = be32_to_cpu(p->sample_start); ibp->pma_sample_interval = be32_to_cpu(p->sample_interval); ibp->pma_tag = be16_to_cpu(p->tag); ibp->pma_counter_select[0] = p->counter_select[0]; ibp->pma_counter_select[1] = p->counter_select[1]; ibp->pma_counter_select[2] = p->counter_select[2]; ibp->pma_counter_select[3] = p->counter_select[3]; ibp->pma_counter_select[4] = p->counter_select[4]; dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval, ibp->pma_sample_start); } spin_unlock_irqrestore(&ibp->lock, flags); ret = pma_get_portsamplescontrol(pmp, ibdev, port); bail: return ret; } static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd, __be16 sel) { u64 ret; switch (sel) { case IB_PMA_PORT_XMIT_DATA: ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA); break; case IB_PMA_PORT_RCV_DATA: ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA); break; case IB_PMA_PORT_XMIT_PKTS: ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS); break; case IB_PMA_PORT_RCV_PKTS: ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS); break; case IB_PMA_PORT_XMIT_WAIT: ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT); break; default: ret = 0; } return ret; } /* This function assumes that the xmit_wait lock is already held */ static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd) { u32 delta; delta = get_counter(&ppd->ibport_data, ppd, IB_PMA_PORT_XMIT_WAIT); return ppd->cong_stats.counter + delta; } static void cache_hw_sample_counters(struct qib_pportdata *ppd) { struct qib_ibport *ibp = &ppd->ibport_data; ppd->cong_stats.counter_cache.psxmitdata = get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA); ppd->cong_stats.counter_cache.psrcvdata = get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA); ppd->cong_stats.counter_cache.psxmitpkts = get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS); ppd->cong_stats.counter_cache.psrcvpkts = get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS); ppd->cong_stats.counter_cache.psxmitwait = get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT); } static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd, __be16 sel) { u64 ret; switch (sel) { case IB_PMA_PORT_XMIT_DATA: ret = ppd->cong_stats.counter_cache.psxmitdata; break; case IB_PMA_PORT_RCV_DATA: ret = ppd->cong_stats.counter_cache.psrcvdata; break; case IB_PMA_PORT_XMIT_PKTS: ret = ppd->cong_stats.counter_cache.psxmitpkts; break; case IB_PMA_PORT_RCV_PKTS: ret = ppd->cong_stats.counter_cache.psrcvpkts; break; case IB_PMA_PORT_XMIT_WAIT: ret = ppd->cong_stats.counter_cache.psxmitwait; break; default: ret = 0; } return ret; } static int pma_get_portsamplesresult(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplesresult *p = (struct ib_pma_portsamplesresult *)pmp->data; struct qib_ibdev *dev = to_idev(ibdev); struct qib_devdata *dd = dd_from_dev(dev); struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); unsigned long flags; u8 status; int i; memset(pmp->data, 0, sizeof(pmp->data)); spin_lock_irqsave(&ibp->lock, flags); p->tag = cpu_to_be16(ibp->pma_tag); if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; else { status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); p->sample_status = cpu_to_be16(status); if (status == IB_PMA_SAMPLE_STATUS_DONE) { cache_hw_sample_counters(ppd); ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0); ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; } } for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) p->counter[i] = cpu_to_be32( get_cache_hw_sample_counters( ppd, ibp->pma_counter_select[i])); spin_unlock_irqrestore(&ibp->lock, flags); return reply((struct ib_smp *) pmp); } static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplesresult_ext *p = (struct ib_pma_portsamplesresult_ext *)pmp->data; struct qib_ibdev *dev = to_idev(ibdev); struct qib_devdata *dd = dd_from_dev(dev); struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); unsigned long flags; u8 status; int i; /* Port Sampling code owns the PS* HW counters */ memset(pmp->data, 0, sizeof(pmp->data)); spin_lock_irqsave(&ibp->lock, flags); p->tag = cpu_to_be16(ibp->pma_tag); if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; else { status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); p->sample_status = cpu_to_be16(status); /* 64 bits */ p->extended_width = cpu_to_be32(0x80000000); if (status == IB_PMA_SAMPLE_STATUS_DONE) { cache_hw_sample_counters(ppd); ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0); ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; } } for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) p->counter[i] = cpu_to_be64( get_cache_hw_sample_counters( ppd, ibp->pma_counter_select[i])); spin_unlock_irqrestore(&ibp->lock, flags); return reply((struct ib_smp *) pmp); } static int pma_get_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_verbs_counters cntrs; u8 port_select = p->port_select; qib_get_counters(ppd, &cntrs); /* Adjust counters for any resets done. */ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; cntrs.link_error_recovery_counter -= ibp->z_link_error_recovery_counter; cntrs.link_downed_counter -= ibp->z_link_downed_counter; cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors; cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; cntrs.port_xmit_data -= ibp->z_port_xmit_data; cntrs.port_rcv_data -= ibp->z_port_rcv_data; cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; cntrs.local_link_integrity_errors -= ibp->z_local_link_integrity_errors; cntrs.excessive_buffer_overrun_errors -= ibp->z_excessive_buffer_overrun_errors; cntrs.vl15_dropped -= ibp->z_vl15_dropped; cntrs.vl15_dropped += ibp->n_vl15_dropped; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->mad_hdr.attr_mod != 0 || port_select != port) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; if (cntrs.symbol_error_counter > 0xFFFFUL) p->symbol_error_counter = cpu_to_be16(0xFFFF); else p->symbol_error_counter = cpu_to_be16((u16)cntrs.symbol_error_counter); if (cntrs.link_error_recovery_counter > 0xFFUL) p->link_error_recovery_counter = 0xFF; else p->link_error_recovery_counter = (u8)cntrs.link_error_recovery_counter; if (cntrs.link_downed_counter > 0xFFUL) p->link_downed_counter = 0xFF; else p->link_downed_counter = (u8)cntrs.link_downed_counter; if (cntrs.port_rcv_errors > 0xFFFFUL) p->port_rcv_errors = cpu_to_be16(0xFFFF); else p->port_rcv_errors = cpu_to_be16((u16) cntrs.port_rcv_errors); if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); else p->port_rcv_remphys_errors = cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); if (cntrs.port_xmit_discards > 0xFFFFUL) p->port_xmit_discards = cpu_to_be16(0xFFFF); else p->port_xmit_discards = cpu_to_be16((u16)cntrs.port_xmit_discards); if (cntrs.local_link_integrity_errors > 0xFUL) cntrs.local_link_integrity_errors = 0xFUL; if (cntrs.excessive_buffer_overrun_errors > 0xFUL) cntrs.excessive_buffer_overrun_errors = 0xFUL; p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) | cntrs.excessive_buffer_overrun_errors; if (cntrs.vl15_dropped > 0xFFFFUL) p->vl15_dropped = cpu_to_be16(0xFFFF); else p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); if (cntrs.port_xmit_data > 0xFFFFFFFFUL) p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); if (cntrs.port_rcv_data > 0xFFFFFFFFUL) p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_packets = cpu_to_be32((u32)cntrs.port_xmit_packets); if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_packets = cpu_to_be32((u32) cntrs.port_rcv_packets); return reply((struct ib_smp *) pmp); } static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { /* Congestion PMA packets start at offset 24 not 64 */ struct ib_pma_portcounters_cong *p = (struct ib_pma_portcounters_cong *)pmp->reserved; struct qib_verbs_counters cntrs; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_devdata *dd = dd_from_ppd(ppd); u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF; u64 xmit_wait_counter; unsigned long flags; /* * This check is performed only in the GET method because the * SET method ends up calling this anyway. */ if (!dd->psxmitwait_supported) pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; if (port_select != port) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; qib_get_counters(ppd, &cntrs); spin_lock_irqsave(&ppd->ibport_data.lock, flags); xmit_wait_counter = xmit_wait_get_value_delta(ppd); spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); /* Adjust counters for any resets done. */ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; cntrs.link_error_recovery_counter -= ibp->z_link_error_recovery_counter; cntrs.link_downed_counter -= ibp->z_link_downed_counter; cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors; cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; cntrs.local_link_integrity_errors -= ibp->z_local_link_integrity_errors; cntrs.excessive_buffer_overrun_errors -= ibp->z_excessive_buffer_overrun_errors; cntrs.vl15_dropped -= ibp->z_vl15_dropped; cntrs.vl15_dropped += ibp->n_vl15_dropped; cntrs.port_xmit_data -= ibp->z_port_xmit_data; cntrs.port_rcv_data -= ibp->z_port_rcv_data; cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; memset(pmp->reserved, 0, sizeof(pmp->reserved) + sizeof(pmp->data)); /* * Set top 3 bits to indicate interval in picoseconds in * remaining bits. */ p->port_check_rate = cpu_to_be16((QIB_XMIT_RATE_PICO << 13) | (dd->psxmitwait_check_rate & ~(QIB_XMIT_RATE_PICO << 13))); p->port_adr_events = cpu_to_be64(0); p->port_xmit_wait = cpu_to_be64(xmit_wait_counter); p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data); p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data); p->port_xmit_packets = cpu_to_be64(cntrs.port_xmit_packets); p->port_rcv_packets = cpu_to_be64(cntrs.port_rcv_packets); if (cntrs.symbol_error_counter > 0xFFFFUL) p->symbol_error_counter = cpu_to_be16(0xFFFF); else p->symbol_error_counter = cpu_to_be16( (u16)cntrs.symbol_error_counter); if (cntrs.link_error_recovery_counter > 0xFFUL) p->link_error_recovery_counter = 0xFF; else p->link_error_recovery_counter = (u8)cntrs.link_error_recovery_counter; if (cntrs.link_downed_counter > 0xFFUL) p->link_downed_counter = 0xFF; else p->link_downed_counter = (u8)cntrs.link_downed_counter; if (cntrs.port_rcv_errors > 0xFFFFUL) p->port_rcv_errors = cpu_to_be16(0xFFFF); else p->port_rcv_errors = cpu_to_be16((u16) cntrs.port_rcv_errors); if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); else p->port_rcv_remphys_errors = cpu_to_be16( (u16)cntrs.port_rcv_remphys_errors); if (cntrs.port_xmit_discards > 0xFFFFUL) p->port_xmit_discards = cpu_to_be16(0xFFFF); else p->port_xmit_discards = cpu_to_be16((u16)cntrs.port_xmit_discards); if (cntrs.local_link_integrity_errors > 0xFUL) cntrs.local_link_integrity_errors = 0xFUL; if (cntrs.excessive_buffer_overrun_errors > 0xFUL) cntrs.excessive_buffer_overrun_errors = 0xFUL; p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) | cntrs.excessive_buffer_overrun_errors; if (cntrs.vl15_dropped > 0xFFFFUL) p->vl15_dropped = cpu_to_be16(0xFFFF); else p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); return reply((struct ib_smp *)pmp); } static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)pmp->data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); u64 swords, rwords, spkts, rpkts, xwait; u8 port_select = p->port_select; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; goto bail; } qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); /* Adjust counters for any resets done. */ swords -= ibp->z_port_xmit_data; rwords -= ibp->z_port_rcv_data; spkts -= ibp->z_port_xmit_packets; rpkts -= ibp->z_port_rcv_packets; p->port_xmit_data = cpu_to_be64(swords); p->port_rcv_data = cpu_to_be64(rwords); p->port_xmit_packets = cpu_to_be64(spkts); p->port_rcv_packets = cpu_to_be64(rpkts); p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit); p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv); p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit); p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv); bail: return reply((struct ib_smp *) pmp); } static int pma_set_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_verbs_counters cntrs; /* * Since the HW doesn't support clearing counters, we save the * current count and subtract it from future responses. */ qib_get_counters(ppd, &cntrs); if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) ibp->z_symbol_error_counter = cntrs.symbol_error_counter; if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY) ibp->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; if (p->counter_select & IB_PMA_SEL_LINK_DOWNED) ibp->z_link_downed_counter = cntrs.link_downed_counter; if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS) ibp->z_port_rcv_errors = cntrs.port_rcv_errors; if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS) ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS) ibp->z_port_xmit_discards = cntrs.port_xmit_discards; if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS) ibp->z_local_link_integrity_errors = cntrs.local_link_integrity_errors; if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS) ibp->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { ibp->n_vl15_dropped = 0; ibp->z_vl15_dropped = cntrs.vl15_dropped; } if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) ibp->z_port_xmit_data = cntrs.port_xmit_data; if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA) ibp->z_port_rcv_data = cntrs.port_rcv_data; if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS) ibp->z_port_xmit_packets = cntrs.port_xmit_packets; if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS) ibp->z_port_rcv_packets = cntrs.port_rcv_packets; return pma_get_portcounters(pmp, ibdev, port); } static int pma_set_portcounters_cong(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_devdata *dd = dd_from_ppd(ppd); struct qib_verbs_counters cntrs; u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF; int ret = 0; unsigned long flags; qib_get_counters(ppd, &cntrs); /* Get counter values before we save them */ ret = pma_get_portcounters_cong(pmp, ibdev, port); if (counter_select & IB_PMA_SEL_CONG_XMIT) { spin_lock_irqsave(&ppd->ibport_data.lock, flags); ppd->cong_stats.counter = 0; dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); } if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { ibp->z_port_xmit_data = cntrs.port_xmit_data; ibp->z_port_rcv_data = cntrs.port_rcv_data; ibp->z_port_xmit_packets = cntrs.port_xmit_packets; ibp->z_port_rcv_packets = cntrs.port_rcv_packets; } if (counter_select & IB_PMA_SEL_CONG_ALL) { ibp->z_symbol_error_counter = cntrs.symbol_error_counter; ibp->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; ibp->z_link_downed_counter = cntrs.link_downed_counter; ibp->z_port_rcv_errors = cntrs.port_rcv_errors; ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; ibp->z_port_xmit_discards = cntrs.port_xmit_discards; ibp->z_local_link_integrity_errors = cntrs.local_link_integrity_errors; ibp->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; ibp->n_vl15_dropped = 0; ibp->z_vl15_dropped = cntrs.vl15_dropped; } return ret; } static int pma_set_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); u64 swords, rwords, spkts, rpkts, xwait; qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) ibp->z_port_xmit_data = swords; if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA) ibp->z_port_rcv_data = rwords; if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS) ibp->z_port_xmit_packets = spkts; if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) ibp->z_port_rcv_packets = rpkts; if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) ibp->n_unicast_xmit = 0; if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) ibp->n_unicast_rcv = 0; if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) ibp->n_multicast_xmit = 0; if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) ibp->n_multicast_rcv = 0; return pma_get_portcounters_ext(pmp, ibdev, port); } static int process_subn(struct ib_device *ibdev, int mad_flags, u8 port, struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_smp *smp = (struct ib_smp *)out_mad; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); int ret; *out_mad = *in_mad; if (smp->class_version != 1) { smp->status |= IB_SMP_UNSUP_VERSION; ret = reply(smp); goto bail; } ret = check_mkey(ibp, smp, mad_flags); if (ret) { u32 port_num = be32_to_cpu(smp->attr_mod); /* * If this is a get/set portinfo, we already check the * M_Key if the MAD is for another port and the M_Key * is OK on the receiving port. This check is needed * to increment the error counters when the M_Key * fails to match on *both* ports. */ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && (smp->method == IB_MGMT_METHOD_GET || smp->method == IB_MGMT_METHOD_SET) && port_num && port_num <= ibdev->phys_port_cnt && port != port_num) (void) check_mkey(to_iport(ibdev, port_num), smp, 0); goto bail; } switch (smp->method) { case IB_MGMT_METHOD_GET: switch (smp->attr_id) { case IB_SMP_ATTR_NODE_DESC: ret = subn_get_nodedescription(smp, ibdev); goto bail; case IB_SMP_ATTR_NODE_INFO: ret = subn_get_nodeinfo(smp, ibdev, port); goto bail; case IB_SMP_ATTR_GUID_INFO: ret = subn_get_guidinfo(smp, ibdev, port); goto bail; case IB_SMP_ATTR_PORT_INFO: ret = subn_get_portinfo(smp, ibdev, port); goto bail; case IB_SMP_ATTR_PKEY_TABLE: ret = subn_get_pkeytable(smp, ibdev, port); goto bail; case IB_SMP_ATTR_SL_TO_VL_TABLE: ret = subn_get_sl_to_vl(smp, ibdev, port); goto bail; case IB_SMP_ATTR_VL_ARB_TABLE: ret = subn_get_vl_arb(smp, ibdev, port); goto bail; case IB_SMP_ATTR_SM_INFO: if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } if (ibp->port_cap_flags & IB_PORT_SM) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } /* FALLTHROUGH */ default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); goto bail; } case IB_MGMT_METHOD_SET: switch (smp->attr_id) { case IB_SMP_ATTR_GUID_INFO: ret = subn_set_guidinfo(smp, ibdev, port); goto bail; case IB_SMP_ATTR_PORT_INFO: ret = subn_set_portinfo(smp, ibdev, port); goto bail; case IB_SMP_ATTR_PKEY_TABLE: ret = subn_set_pkeytable(smp, ibdev, port); goto bail; case IB_SMP_ATTR_SL_TO_VL_TABLE: ret = subn_set_sl_to_vl(smp, ibdev, port); goto bail; case IB_SMP_ATTR_VL_ARB_TABLE: ret = subn_set_vl_arb(smp, ibdev, port); goto bail; case IB_SMP_ATTR_SM_INFO: if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } if (ibp->port_cap_flags & IB_PORT_SM) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } /* FALLTHROUGH */ default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); goto bail; } case IB_MGMT_METHOD_TRAP_REPRESS: if (smp->attr_id == IB_SMP_ATTR_NOTICE) ret = subn_trap_repress(smp, ibdev, port); else { smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); } goto bail; case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_REPORT: case IB_MGMT_METHOD_REPORT_RESP: case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; goto bail; case IB_MGMT_METHOD_SEND: if (ib_get_smp_direction(smp) && smp->attr_id == QIB_VENDOR_IPG) { ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT, smp->data[0]); ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; } else ret = IB_MAD_RESULT_SUCCESS; goto bail; default: smp->status |= IB_SMP_UNSUP_METHOD; ret = reply(smp); } bail: return ret; } static int process_perf(struct ib_device *ibdev, u8 port, struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; int ret; *out_mad = *in_mad; if (pmp->mad_hdr.class_version != 1) { pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_smp *) pmp); goto bail; } switch (pmp->mad_hdr.method) { case IB_MGMT_METHOD_GET: switch (pmp->mad_hdr.attr_id) { case IB_PMA_CLASS_PORT_INFO: ret = pma_get_classportinfo(pmp, ibdev); goto bail; case IB_PMA_PORT_SAMPLES_CONTROL: ret = pma_get_portsamplescontrol(pmp, ibdev, port); goto bail; case IB_PMA_PORT_SAMPLES_RESULT: ret = pma_get_portsamplesresult(pmp, ibdev, port); goto bail; case IB_PMA_PORT_SAMPLES_RESULT_EXT: ret = pma_get_portsamplesresult_ext(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS: ret = pma_get_portcounters(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS_EXT: ret = pma_get_portcounters_ext(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS_CONG: ret = pma_get_portcounters_cong(pmp, ibdev, port); goto bail; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) pmp); goto bail; } case IB_MGMT_METHOD_SET: switch (pmp->mad_hdr.attr_id) { case IB_PMA_PORT_SAMPLES_CONTROL: ret = pma_set_portsamplescontrol(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS: ret = pma_set_portcounters(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS_EXT: ret = pma_set_portcounters_ext(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS_CONG: ret = pma_set_portcounters_cong(pmp, ibdev, port); goto bail; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) pmp); goto bail; } case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; goto bail; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_smp *) pmp); } bail: return ret; } /** * qib_process_mad - process an incoming MAD packet * @ibdev: the infiniband device this packet came in on * @mad_flags: MAD flags * @port: the port number this packet came in on * @in_wc: the work completion entry for this packet * @in_grh: the global route header for this packet * @in_mad: the incoming MAD * @out_mad: any outgoing MAD reply * * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not * interested in processing. * * Note that the verbs framework has already done the MAD sanity checks, * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE * MADs. * * This is called by the ib_mad module. */ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { int ret; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_LID_ROUTED: ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); goto bail; case IB_MGMT_CLASS_PERF_MGMT: ret = process_perf(ibdev, port, in_mad, out_mad); goto bail; default: ret = IB_MAD_RESULT_SUCCESS; } bail: return ret; } static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { ib_free_send_mad(mad_send_wc->send_buf); } static void xmit_wait_timer_func(unsigned long opaque) { struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; struct qib_devdata *dd = dd_from_ppd(ppd); unsigned long flags; u8 status; spin_lock_irqsave(&ppd->ibport_data.lock, flags); if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); if (status == IB_PMA_SAMPLE_STATUS_DONE) { /* save counter cache */ cache_hw_sample_counters(ppd); ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; } else goto done; } ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); done: spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); mod_timer(&ppd->cong_stats.timer, jiffies + HZ); } int qib_create_agents(struct qib_ibdev *dev) { struct qib_devdata *dd = dd_from_dev(dev); struct ib_mad_agent *agent; struct qib_ibport *ibp; int p; int ret; for (p = 0; p < dd->num_pports; p++) { ibp = &dd->pport[p].ibport_data; agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, NULL, 0, send_handler, NULL, NULL); if (IS_ERR(agent)) { ret = PTR_ERR(agent); goto err; } /* Initialize xmit_wait structure */ dd->pport[p].cong_stats.counter = 0; init_timer(&dd->pport[p].cong_stats.timer); dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func; dd->pport[p].cong_stats.timer.data = (unsigned long)(&dd->pport[p]); dd->pport[p].cong_stats.timer.expires = 0; add_timer(&dd->pport[p].cong_stats.timer); ibp->send_agent = agent; } return 0; err: for (p = 0; p < dd->num_pports; p++) { ibp = &dd->pport[p].ibport_data; if (ibp->send_agent) { agent = ibp->send_agent; ibp->send_agent = NULL; ib_unregister_mad_agent(agent); } } return ret; } void qib_free_agents(struct qib_ibdev *dev) { struct qib_devdata *dd = dd_from_dev(dev); struct ib_mad_agent *agent; struct qib_ibport *ibp; int p; for (p = 0; p < dd->num_pports; p++) { ibp = &dd->pport[p].ibport_data; if (ibp->send_agent) { agent = ibp->send_agent; ibp->send_agent = NULL; ib_unregister_mad_agent(agent); } if (ibp->sm_ah) { ib_destroy_ah(&ibp->sm_ah->ibah); ibp->sm_ah = NULL; } if (dd->pport[p].cong_stats.timer.data) del_timer_sync(&dd->pport[p].cong_stats.timer); } }
gpl-2.0
jiangbeilengyu/famkernel
arch/mips/pci/fixup-au1000.c
4568
1743
/* * BRIEF MODULE DESCRIPTION * Board specific PCI fixups. * * Copyright 2001-2003, 2008 MontaVista Software Inc. * Author: MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/pci.h> #include <linux/init.h> extern char irq_tab_alchemy[][5]; int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_tab_alchemy[slot][pin]; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
redglasses/android_kernel_lge_g3-V20f
drivers/media/dvb/frontends/au8522_dig.c
4824
23193
/* Auvitek AU8522 QAM/8VSB demodulator driver Copyright (C) 2008 Steven Toth <stoth@linuxtv.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/delay.h> #include "dvb_frontend.h" #include "au8522.h" #include "au8522_priv.h" static int debug; /* Despite the name "hybrid_tuner", the framework works just as well for hybrid demodulators as well... */ static LIST_HEAD(hybrid_tuner_instance_list); static DEFINE_MUTEX(au8522_list_mutex); #define dprintk(arg...)\ do { if (debug)\ printk(arg);\ } while (0) /* 16 bit registers, 8 bit values */ int au8522_writereg(struct au8522_state *state, u16 reg, u8 data) { int ret; u8 buf[] = { (reg >> 8) | 0x80, reg & 0xff, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 3 }; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, " "ret == %i)\n", __func__, reg, data, ret); return (ret != 1) ? -1 : 0; } u8 au8522_readreg(struct au8522_state *state, u16 reg) { int ret; u8 b0[] = { (reg >> 8) | 0x40, reg & 0xff }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 2 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) printk(KERN_ERR "%s: readreg error (ret == %i)\n", __func__, ret); return b1[0]; } static int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct au8522_state *state = fe->demodulator_priv; dprintk("%s(%d)\n", __func__, enable); if (state->operational_mode == AU8522_ANALOG_MODE) { /* We're being asked to manage the gate even though we're not in digital mode. This can occur if we get switched over to analog mode before the dvb_frontend kernel thread has completely shutdown */ return 0; } if (enable) return au8522_writereg(state, 0x106, 1); else return au8522_writereg(state, 0x106, 0); } struct mse2snr_tab { u16 val; u16 data; }; /* VSB SNR lookup table */ static struct mse2snr_tab vsb_mse2snr_tab[] = { { 0, 270 }, { 2, 250 }, { 3, 240 }, { 5, 230 }, { 7, 220 }, { 9, 210 }, { 12, 200 }, { 13, 195 }, { 15, 190 }, { 17, 185 }, { 19, 180 }, { 21, 175 }, { 24, 170 }, { 27, 165 }, { 31, 160 }, { 32, 158 }, { 33, 156 }, { 36, 152 }, { 37, 150 }, { 39, 148 }, { 40, 146 }, { 41, 144 }, { 43, 142 }, { 44, 140 }, { 48, 135 }, { 50, 130 }, { 43, 142 }, { 53, 125 }, { 56, 120 }, { 256, 115 }, }; /* QAM64 SNR lookup table */ static struct mse2snr_tab qam64_mse2snr_tab[] = { { 15, 0 }, { 16, 290 }, { 17, 288 }, { 18, 286 }, { 19, 284 }, { 20, 282 }, { 21, 281 }, { 22, 279 }, { 23, 277 }, { 24, 275 }, { 25, 273 }, { 26, 271 }, { 27, 269 }, { 28, 268 }, { 29, 266 }, { 30, 264 }, { 31, 262 }, { 32, 260 }, { 33, 259 }, { 34, 258 }, { 35, 256 }, { 36, 255 }, { 37, 254 }, { 38, 252 }, { 39, 251 }, { 40, 250 }, { 41, 249 }, { 42, 248 }, { 43, 246 }, { 44, 245 }, { 45, 244 }, { 46, 242 }, { 47, 241 }, { 48, 240 }, { 50, 239 }, { 51, 238 }, { 53, 237 }, { 54, 236 }, { 56, 235 }, { 57, 234 }, { 59, 233 }, { 60, 232 }, { 62, 231 }, { 63, 230 }, { 65, 229 }, { 67, 228 }, { 68, 227 }, { 70, 226 }, { 71, 225 }, { 73, 224 }, { 74, 223 }, { 76, 222 }, { 78, 221 }, { 80, 220 }, { 82, 219 }, { 85, 218 }, { 88, 217 }, { 90, 216 }, { 92, 215 }, { 93, 214 }, { 94, 212 }, { 95, 211 }, { 97, 210 }, { 99, 209 }, { 101, 208 }, { 102, 207 }, { 104, 206 }, { 107, 205 }, { 111, 204 }, { 114, 203 }, { 118, 202 }, { 122, 201 }, { 125, 200 }, { 128, 199 }, { 130, 198 }, { 132, 197 }, { 256, 190 }, }; /* QAM256 SNR lookup table */ static struct mse2snr_tab qam256_mse2snr_tab[] = { { 16, 0 }, { 17, 400 }, { 18, 398 }, { 19, 396 }, { 20, 394 }, { 21, 392 }, { 22, 390 }, { 23, 388 }, { 24, 386 }, { 25, 384 }, { 26, 382 }, { 27, 380 }, { 28, 379 }, { 29, 378 }, { 30, 377 }, { 31, 376 }, { 32, 375 }, { 33, 374 }, { 34, 373 }, { 35, 372 }, { 36, 371 }, { 37, 370 }, { 38, 362 }, { 39, 354 }, { 40, 346 }, { 41, 338 }, { 42, 330 }, { 43, 328 }, { 44, 326 }, { 45, 324 }, { 46, 322 }, { 47, 320 }, { 48, 319 }, { 49, 318 }, { 50, 317 }, { 51, 316 }, { 52, 315 }, { 53, 314 }, { 54, 313 }, { 55, 312 }, { 56, 311 }, { 57, 310 }, { 58, 308 }, { 59, 306 }, { 60, 304 }, { 61, 302 }, { 62, 300 }, { 63, 298 }, { 65, 295 }, { 68, 294 }, { 70, 293 }, { 73, 292 }, { 76, 291 }, { 78, 290 }, { 79, 289 }, { 81, 288 }, { 82, 287 }, { 83, 286 }, { 84, 285 }, { 85, 284 }, { 86, 283 }, { 88, 282 }, { 89, 281 }, { 256, 280 }, }; static int au8522_mse2snr_lookup(struct mse2snr_tab *tab, int sz, int mse, u16 *snr) { int i, ret = -EINVAL; dprintk("%s()\n", __func__); for (i = 0; i < sz; i++) { if (mse < tab[i].val) { *snr = tab[i].data; ret = 0; break; } } dprintk("%s() snr=%d\n", __func__, *snr); return ret; } static int au8522_set_if(struct dvb_frontend *fe, enum au8522_if_freq if_freq) { struct au8522_state *state = fe->demodulator_priv; u8 r0b5, r0b6, r0b7; char *ifmhz; switch (if_freq) { case AU8522_IF_3_25MHZ: ifmhz = "3.25"; r0b5 = 0x00; r0b6 = 0x3d; r0b7 = 0xa0; break; case AU8522_IF_4MHZ: ifmhz = "4.00"; r0b5 = 0x00; r0b6 = 0x4b; r0b7 = 0xd9; break; case AU8522_IF_6MHZ: ifmhz = "6.00"; r0b5 = 0xfb; r0b6 = 0x8e; r0b7 = 0x39; break; default: dprintk("%s() IF Frequency not supported\n", __func__); return -EINVAL; } dprintk("%s() %s MHz\n", __func__, ifmhz); au8522_writereg(state, 0x80b5, r0b5); au8522_writereg(state, 0x80b6, r0b6); au8522_writereg(state, 0x80b7, r0b7); return 0; } /* VSB Modulation table */ static struct { u16 reg; u16 data; } VSB_mod_tab[] = { { 0x8090, 0x84 }, { 0x4092, 0x11 }, { 0x2005, 0x00 }, { 0x8091, 0x80 }, { 0x80a3, 0x0c }, { 0x80a4, 0xe8 }, { 0x8081, 0xc4 }, { 0x80a5, 0x40 }, { 0x80a7, 0x40 }, { 0x80a6, 0x67 }, { 0x8262, 0x20 }, { 0x821c, 0x30 }, { 0x80d8, 0x1a }, { 0x8227, 0xa0 }, { 0x8121, 0xff }, { 0x80a8, 0xf0 }, { 0x80a9, 0x05 }, { 0x80aa, 0x77 }, { 0x80ab, 0xf0 }, { 0x80ac, 0x05 }, { 0x80ad, 0x77 }, { 0x80ae, 0x41 }, { 0x80af, 0x66 }, { 0x821b, 0xcc }, { 0x821d, 0x80 }, { 0x80a4, 0xe8 }, { 0x8231, 0x13 }, }; /* QAM64 Modulation table */ static struct { u16 reg; u16 data; } QAM64_mod_tab[] = { { 0x00a3, 0x09 }, { 0x00a4, 0x00 }, { 0x0081, 0xc4 }, { 0x00a5, 0x40 }, { 0x00aa, 0x77 }, { 0x00ad, 0x77 }, { 0x00a6, 0x67 }, { 0x0262, 0x20 }, { 0x021c, 0x30 }, { 0x00b8, 0x3e }, { 0x00b9, 0xf0 }, { 0x00ba, 0x01 }, { 0x00bb, 0x18 }, { 0x00bc, 0x50 }, { 0x00bd, 0x00 }, { 0x00be, 0xea }, { 0x00bf, 0xef }, { 0x00c0, 0xfc }, { 0x00c1, 0xbd }, { 0x00c2, 0x1f }, { 0x00c3, 0xfc }, { 0x00c4, 0xdd }, { 0x00c5, 0xaf }, { 0x00c6, 0x00 }, { 0x00c7, 0x38 }, { 0x00c8, 0x30 }, { 0x00c9, 0x05 }, { 0x00ca, 0x4a }, { 0x00cb, 0xd0 }, { 0x00cc, 0x01 }, { 0x00cd, 0xd9 }, { 0x00ce, 0x6f }, { 0x00cf, 0xf9 }, { 0x00d0, 0x70 }, { 0x00d1, 0xdf }, { 0x00d2, 0xf7 }, { 0x00d3, 0xc2 }, { 0x00d4, 0xdf }, { 0x00d5, 0x02 }, { 0x00d6, 0x9a }, { 0x00d7, 0xd0 }, { 0x0250, 0x0d }, { 0x0251, 0xcd }, { 0x0252, 0xe0 }, { 0x0253, 0x05 }, { 0x0254, 0xa7 }, { 0x0255, 0xff }, { 0x0256, 0xed }, { 0x0257, 0x5b }, { 0x0258, 0xae }, { 0x0259, 0xe6 }, { 0x025a, 0x3d }, { 0x025b, 0x0f }, { 0x025c, 0x0d }, { 0x025d, 0xea }, { 0x025e, 0xf2 }, { 0x025f, 0x51 }, { 0x0260, 0xf5 }, { 0x0261, 0x06 }, { 0x021a, 0x00 }, { 0x0546, 0x40 }, { 0x0210, 0xc7 }, { 0x0211, 0xaa }, { 0x0212, 0xab }, { 0x0213, 0x02 }, { 0x0502, 0x00 }, { 0x0121, 0x04 }, { 0x0122, 0x04 }, { 0x052e, 0x10 }, { 0x00a4, 0xca }, { 0x00a7, 0x40 }, { 0x0526, 0x01 }, }; /* QAM256 Modulation table */ static struct { u16 reg; u16 data; } QAM256_mod_tab[] = { { 0x80a3, 0x09 }, { 0x80a4, 0x00 }, { 0x8081, 0xc4 }, { 0x80a5, 0x40 }, { 0x80aa, 0x77 }, { 0x80ad, 0x77 }, { 0x80a6, 0x67 }, { 0x8262, 0x20 }, { 0x821c, 0x30 }, { 0x80b8, 0x3e }, { 0x80b9, 0xf0 }, { 0x80ba, 0x01 }, { 0x80bb, 0x18 }, { 0x80bc, 0x50 }, { 0x80bd, 0x00 }, { 0x80be, 0xea }, { 0x80bf, 0xef }, { 0x80c0, 0xfc }, { 0x80c1, 0xbd }, { 0x80c2, 0x1f }, { 0x80c3, 0xfc }, { 0x80c4, 0xdd }, { 0x80c5, 0xaf }, { 0x80c6, 0x00 }, { 0x80c7, 0x38 }, { 0x80c8, 0x30 }, { 0x80c9, 0x05 }, { 0x80ca, 0x4a }, { 0x80cb, 0xd0 }, { 0x80cc, 0x01 }, { 0x80cd, 0xd9 }, { 0x80ce, 0x6f }, { 0x80cf, 0xf9 }, { 0x80d0, 0x70 }, { 0x80d1, 0xdf }, { 0x80d2, 0xf7 }, { 0x80d3, 0xc2 }, { 0x80d4, 0xdf }, { 0x80d5, 0x02 }, { 0x80d6, 0x9a }, { 0x80d7, 0xd0 }, { 0x8250, 0x0d }, { 0x8251, 0xcd }, { 0x8252, 0xe0 }, { 0x8253, 0x05 }, { 0x8254, 0xa7 }, { 0x8255, 0xff }, { 0x8256, 0xed }, { 0x8257, 0x5b }, { 0x8258, 0xae }, { 0x8259, 0xe6 }, { 0x825a, 0x3d }, { 0x825b, 0x0f }, { 0x825c, 0x0d }, { 0x825d, 0xea }, { 0x825e, 0xf2 }, { 0x825f, 0x51 }, { 0x8260, 0xf5 }, { 0x8261, 0x06 }, { 0x821a, 0x00 }, { 0x8546, 0x40 }, { 0x8210, 0x26 }, { 0x8211, 0xf6 }, { 0x8212, 0x84 }, { 0x8213, 0x02 }, { 0x8502, 0x01 }, { 0x8121, 0x04 }, { 0x8122, 0x04 }, { 0x852e, 0x10 }, { 0x80a4, 0xca }, { 0x80a7, 0x40 }, { 0x8526, 0x01 }, }; static int au8522_enable_modulation(struct dvb_frontend *fe, fe_modulation_t m) { struct au8522_state *state = fe->demodulator_priv; int i; dprintk("%s(0x%08x)\n", __func__, m); switch (m) { case VSB_8: dprintk("%s() VSB_8\n", __func__); for (i = 0; i < ARRAY_SIZE(VSB_mod_tab); i++) au8522_writereg(state, VSB_mod_tab[i].reg, VSB_mod_tab[i].data); au8522_set_if(fe, state->config->vsb_if); break; case QAM_64: dprintk("%s() QAM 64\n", __func__); for (i = 0; i < ARRAY_SIZE(QAM64_mod_tab); i++) au8522_writereg(state, QAM64_mod_tab[i].reg, QAM64_mod_tab[i].data); au8522_set_if(fe, state->config->qam_if); break; case QAM_256: dprintk("%s() QAM 256\n", __func__); for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab); i++) au8522_writereg(state, QAM256_mod_tab[i].reg, QAM256_mod_tab[i].data); au8522_set_if(fe, state->config->qam_if); break; default: dprintk("%s() Invalid modulation\n", __func__); return -EINVAL; } state->current_modulation = m; return 0; } /* Talk to the demod, set the FEC, GUARD, QAM settings etc */ static int au8522_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct au8522_state *state = fe->demodulator_priv; int ret = -EINVAL; dprintk("%s(frequency=%d)\n", __func__, c->frequency); if ((state->current_frequency == c->frequency) && (state->current_modulation == c->modulation)) return 0; if (fe->ops.tuner_ops.set_params) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } if (ret < 0) return ret; /* Allow the tuner to settle */ msleep(100); au8522_enable_modulation(fe, c->modulation); state->current_frequency = c->frequency; return 0; } /* Reset the demod hardware and reset all of the configuration registers to a default state. */ int au8522_init(struct dvb_frontend *fe) { struct au8522_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); state->operational_mode = AU8522_DIGITAL_MODE; /* Clear out any state associated with the digital side of the chip, so that when it gets powered back up it won't think that it is already tuned */ state->current_frequency = 0; au8522_writereg(state, 0xa4, 1 << 5); au8522_i2c_gate_ctrl(fe, 1); return 0; } static int au8522_led_gpio_enable(struct au8522_state *state, int onoff) { struct au8522_led_config *led_config = state->config->led_cfg; u8 val; /* bail out if we can't control an LED */ if (!led_config || !led_config->gpio_output || !led_config->gpio_output_enable || !led_config->gpio_output_disable) return 0; val = au8522_readreg(state, 0x4000 | (led_config->gpio_output & ~0xc000)); if (onoff) { /* enable GPIO output */ val &= ~((led_config->gpio_output_enable >> 8) & 0xff); val |= (led_config->gpio_output_enable & 0xff); } else { /* disable GPIO output */ val &= ~((led_config->gpio_output_disable >> 8) & 0xff); val |= (led_config->gpio_output_disable & 0xff); } return au8522_writereg(state, 0x8000 | (led_config->gpio_output & ~0xc000), val); } /* led = 0 | off * led = 1 | signal ok * led = 2 | signal strong * led < 0 | only light led if leds are currently off */ static int au8522_led_ctrl(struct au8522_state *state, int led) { struct au8522_led_config *led_config = state->config->led_cfg; int i, ret = 0; /* bail out if we can't control an LED */ if (!led_config || !led_config->gpio_leds || !led_config->num_led_states || !led_config->led_states) return 0; if (led < 0) { /* if LED is already lit, then leave it as-is */ if (state->led_state) return 0; else led *= -1; } /* toggle LED if changing state */ if (state->led_state != led) { u8 val; dprintk("%s: %d\n", __func__, led); au8522_led_gpio_enable(state, 1); val = au8522_readreg(state, 0x4000 | (led_config->gpio_leds & ~0xc000)); /* start with all leds off */ for (i = 0; i < led_config->num_led_states; i++) val &= ~led_config->led_states[i]; /* set selected LED state */ if (led < led_config->num_led_states) val |= led_config->led_states[led]; else if (led_config->num_led_states) val |= led_config->led_states[led_config->num_led_states - 1]; ret = au8522_writereg(state, 0x8000 | (led_config->gpio_leds & ~0xc000), val); if (ret < 0) return ret; state->led_state = led; if (led == 0) au8522_led_gpio_enable(state, 0); } return 0; } int au8522_sleep(struct dvb_frontend *fe) { struct au8522_state *state = fe->demodulator_priv; dprintk("%s()\n", __func__); /* Only power down if the digital side is currently using the chip */ if (state->operational_mode == AU8522_ANALOG_MODE) { /* We're not in one of the expected power modes, which means that the DVB thread is probably telling us to go to sleep even though the analog frontend has already started using the chip. So ignore the request */ return 0; } /* turn off led */ au8522_led_ctrl(state, 0); /* Power down the chip */ au8522_writereg(state, 0xa4, 1 << 5); state->current_frequency = 0; return 0; } static int au8522_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct au8522_state *state = fe->demodulator_priv; u8 reg; u32 tuner_status = 0; *status = 0; if (state->current_modulation == VSB_8) { dprintk("%s() Checking VSB_8\n", __func__); reg = au8522_readreg(state, 0x4088); if ((reg & 0x03) == 0x03) *status |= FE_HAS_LOCK | FE_HAS_SYNC | FE_HAS_VITERBI; } else { dprintk("%s() Checking QAM\n", __func__); reg = au8522_readreg(state, 0x4541); if (reg & 0x80) *status |= FE_HAS_VITERBI; if (reg & 0x20) *status |= FE_HAS_LOCK | FE_HAS_SYNC; } switch (state->config->status_mode) { case AU8522_DEMODLOCKING: dprintk("%s() DEMODLOCKING\n", __func__); if (*status & FE_HAS_VITERBI) *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL; break; case AU8522_TUNERLOCKING: /* Get the tuner status */ dprintk("%s() TUNERLOCKING\n", __func__); if (fe->ops.tuner_ops.get_status) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.tuner_ops.get_status(fe, &tuner_status); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } if (tuner_status) *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL; break; } state->fe_status = *status; if (*status & FE_HAS_LOCK) /* turn on LED, if it isn't on already */ au8522_led_ctrl(state, -1); else /* turn off LED */ au8522_led_ctrl(state, 0); dprintk("%s() status 0x%08x\n", __func__, *status); return 0; } static int au8522_led_status(struct au8522_state *state, const u16 *snr) { struct au8522_led_config *led_config = state->config->led_cfg; int led; u16 strong; /* bail out if we can't control an LED */ if (!led_config) return 0; if (0 == (state->fe_status & FE_HAS_LOCK)) return au8522_led_ctrl(state, 0); else if (state->current_modulation == QAM_256) strong = led_config->qam256_strong; else if (state->current_modulation == QAM_64) strong = led_config->qam64_strong; else /* (state->current_modulation == VSB_8) */ strong = led_config->vsb8_strong; if (*snr >= strong) led = 2; else led = 1; if ((state->led_state) && (((strong < *snr) ? (*snr - strong) : (strong - *snr)) <= 10)) /* snr didn't change enough to bother * changing the color of the led */ return 0; return au8522_led_ctrl(state, led); } static int au8522_read_snr(struct dvb_frontend *fe, u16 *snr) { struct au8522_state *state = fe->demodulator_priv; int ret = -EINVAL; dprintk("%s()\n", __func__); if (state->current_modulation == QAM_256) ret = au8522_mse2snr_lookup(qam256_mse2snr_tab, ARRAY_SIZE(qam256_mse2snr_tab), au8522_readreg(state, 0x4522), snr); else if (state->current_modulation == QAM_64) ret = au8522_mse2snr_lookup(qam64_mse2snr_tab, ARRAY_SIZE(qam64_mse2snr_tab), au8522_readreg(state, 0x4522), snr); else /* VSB_8 */ ret = au8522_mse2snr_lookup(vsb_mse2snr_tab, ARRAY_SIZE(vsb_mse2snr_tab), au8522_readreg(state, 0x4311), snr); if (state->config->led_cfg) au8522_led_status(state, snr); return ret; } static int au8522_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength) { /* borrowed from lgdt330x.c * * Calculate strength from SNR up to 35dB * Even though the SNR can go higher than 35dB, * there is some comfort factor in having a range of * strong signals that can show at 100% */ u16 snr; u32 tmp; int ret = au8522_read_snr(fe, &snr); *signal_strength = 0; if (0 == ret) { /* The following calculation method was chosen * purely for the sake of code re-use from the * other demod drivers that use this method */ /* Convert from SNR in dB * 10 to 8.24 fixed-point */ tmp = (snr * ((1 << 24) / 10)); /* Convert from 8.24 fixed-point to * scale the range 0 - 35*2^24 into 0 - 65535*/ if (tmp >= 8960 * 0x10000) *signal_strength = 0xffff; else *signal_strength = tmp / 8960; } return ret; } static int au8522_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct au8522_state *state = fe->demodulator_priv; if (state->current_modulation == VSB_8) *ucblocks = au8522_readreg(state, 0x4087); else *ucblocks = au8522_readreg(state, 0x4543); return 0; } static int au8522_read_ber(struct dvb_frontend *fe, u32 *ber) { return au8522_read_ucblocks(fe, ber); } static int au8522_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct au8522_state *state = fe->demodulator_priv; c->frequency = state->current_frequency; c->modulation = state->current_modulation; return 0; } static int au8522_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static struct dvb_frontend_ops au8522_ops; int au8522_get_state(struct au8522_state **state, struct i2c_adapter *i2c, u8 client_address) { int ret; mutex_lock(&au8522_list_mutex); ret = hybrid_tuner_request_state(struct au8522_state, (*state), hybrid_tuner_instance_list, i2c, client_address, "au8522"); mutex_unlock(&au8522_list_mutex); return ret; } void au8522_release_state(struct au8522_state *state) { mutex_lock(&au8522_list_mutex); if (state != NULL) hybrid_tuner_release_state(state); mutex_unlock(&au8522_list_mutex); } static void au8522_release(struct dvb_frontend *fe) { struct au8522_state *state = fe->demodulator_priv; au8522_release_state(state); } struct dvb_frontend *au8522_attach(const struct au8522_config *config, struct i2c_adapter *i2c) { struct au8522_state *state = NULL; int instance; /* allocate memory for the internal state */ instance = au8522_get_state(&state, i2c, config->demod_address); switch (instance) { case 0: dprintk("%s state allocation failed\n", __func__); break; case 1: /* new demod instance */ dprintk("%s using new instance\n", __func__); break; default: /* existing demod instance */ dprintk("%s using existing instance\n", __func__); break; } /* setup the state */ state->config = config; state->i2c = i2c; state->operational_mode = AU8522_DIGITAL_MODE; /* create dvb_frontend */ memcpy(&state->frontend.ops, &au8522_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; if (au8522_init(&state->frontend) != 0) { printk(KERN_ERR "%s: Failed to initialize correctly\n", __func__); goto error; } /* Note: Leaving the I2C gate open here. */ au8522_i2c_gate_ctrl(&state->frontend, 1); return &state->frontend; error: au8522_release_state(state); return NULL; } EXPORT_SYMBOL(au8522_attach); static struct dvb_frontend_ops au8522_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, .info = { .name = "Auvitek AU8522 QAM/8VSB Frontend", .frequency_min = 54000000, .frequency_max = 858000000, .frequency_stepsize = 62500, .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB }, .init = au8522_init, .sleep = au8522_sleep, .i2c_gate_ctrl = au8522_i2c_gate_ctrl, .set_frontend = au8522_set_frontend, .get_frontend = au8522_get_frontend, .get_tune_settings = au8522_get_tune_settings, .read_status = au8522_read_status, .read_ber = au8522_read_ber, .read_signal_strength = au8522_read_signal_strength, .read_snr = au8522_read_snr, .read_ucblocks = au8522_read_ucblocks, .release = au8522_release, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Enable verbose debug messages"); MODULE_DESCRIPTION("Auvitek AU8522 QAM-B/ATSC Demodulator driver"); MODULE_AUTHOR("Steven Toth"); MODULE_LICENSE("GPL");
gpl-2.0
shivdasgujare/linux-omap
drivers/watchdog/wdt977.c
7384
12117
/* * Wdt977 0.04: A Watchdog Device for Netwinder W83977AF chip * * (c) Copyright 1998 Rebel.com (Woody Suwalski <woody@netwinder.org>) * * ----------------------- * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * ----------------------- * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com> * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT * 19-Dec-2001 Woody Suwalski: Netwinder fixes, ioctl interface * 06-Jan-2002 Woody Suwalski: For compatibility, convert all timeouts * from minutes to seconds. * 07-Jul-2003 Daniele Bellucci: Audit return code of misc_register in * nwwatchdog_init. * 25-Oct-2005 Woody Suwalski: Convert addresses to #defs, add spinlocks * remove limitiation to be used on * Netwinders only */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/watchdog.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/mach-types.h> #define WATCHDOG_VERSION "0.04" #define WATCHDOG_NAME "Wdt977" #define IO_INDEX_PORT 0x370 /* on some systems it can be 0x3F0 */ #define IO_DATA_PORT (IO_INDEX_PORT + 1) #define UNLOCK_DATA 0x87 #define LOCK_DATA 0xAA #define DEVICE_REGISTER 0x07 #define DEFAULT_TIMEOUT 60 /* default timeout in seconds */ static int timeout = DEFAULT_TIMEOUT; static int timeoutM; /* timeout in minutes */ static unsigned long timer_alive; static int testmode; static char expect_close; static DEFINE_SPINLOCK(spinlock); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300, default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")"); module_param(testmode, int, 0); MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * Start the watchdog */ static int wdt977_start(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* select device Aux2 (device=8) and set watchdog regs F2, F3 and F4 * F2 has the timeout in minutes * F3 could be set to the POWER LED blink (with GP17 set to PowerLed) * at timeout, and to reset timer on kbd/mouse activity (not impl.) * F4 is used to just clear the TIMEOUT'ed state (bit 0) */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(timeoutM, IO_DATA_PORT); outb_p(0xF3, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); /* another setting is 0E for kbd/mouse/LED */ outb_p(0xF4, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); /* At last select device Aux1 (dev=7) and set GP16 as a * watchdog output. In test mode watch the bit 1 on F4 to * indicate "triggered" */ if (!testmode) { outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x07, IO_DATA_PORT); outb_p(0xE6, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); } /* lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); pr_info("activated\n"); return 0; } /* * Stop the watchdog */ static int wdt977_stop(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* select device Aux2 (device=8) and set watchdog regs F2,F3 and F4 * F3 is reset to its default state * F4 can clear the TIMEOUT'ed state (bit 0) - back to default * We can not use GP17 as a PowerLed, as we use its usage as a RedLed */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(0xFF, IO_DATA_PORT); outb_p(0xF3, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); outb_p(0xF4, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(0x00, IO_DATA_PORT); /* at last select device Aux1 (dev=7) and set GP16 as a watchdog output */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x07, IO_DATA_PORT); outb_p(0xE6, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); /* lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); pr_info("shutdown\n"); return 0; } /* * Send a keepalive ping to the watchdog * This is done by simply re-writing the timeout to reg. 0xF2 */ static int wdt977_keepalive(void) { unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* select device Aux2 (device=8) and kicks watchdog reg F2 */ /* F2 has the timeout in minutes */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF2, IO_INDEX_PORT); outb_p(timeoutM, IO_DATA_PORT); /* lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); return 0; } /* * Set the watchdog timeout value */ static int wdt977_set_timeout(int t) { int tmrval; /* convert seconds to minutes, rounding up */ tmrval = (t + 59) / 60; if (machine_is_netwinder()) { /* we have a hw bug somewhere, so each 977 minute is actually * only 30sec. This limits the max timeout to half of device * max of 255 minutes... */ tmrval += tmrval; } if (tmrval < 1 || tmrval > 255) return -EINVAL; /* timeout is the timeout in seconds, timeoutM is the timeout in minutes) */ timeout = t; timeoutM = tmrval; return 0; } /* * Get the watchdog status */ static int wdt977_get_status(int *status) { int new_status; unsigned long flags; spin_lock_irqsave(&spinlock, flags); /* unlock the SuperIO chip */ outb_p(UNLOCK_DATA, IO_INDEX_PORT); outb_p(UNLOCK_DATA, IO_INDEX_PORT); /* select device Aux2 (device=8) and read watchdog reg F4 */ outb_p(DEVICE_REGISTER, IO_INDEX_PORT); outb_p(0x08, IO_DATA_PORT); outb_p(0xF4, IO_INDEX_PORT); new_status = inb_p(IO_DATA_PORT); /* lock the SuperIO chip */ outb_p(LOCK_DATA, IO_INDEX_PORT); spin_unlock_irqrestore(&spinlock, flags); *status = 0; if (new_status & 1) *status |= WDIOF_CARDRESET; return 0; } /* * /dev/watchdog handling */ static int wdt977_open(struct inode *inode, struct file *file) { /* If the watchdog is alive we don't need to start it again */ if (test_and_set_bit(0, &timer_alive)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); wdt977_start(); return nonseekable_open(inode, file); } static int wdt977_release(struct inode *inode, struct file *file) { /* * Shut off the timer. * Lock it in if it's a module and we set nowayout */ if (expect_close == 42) { wdt977_stop(); clear_bit(0, &timer_alive); } else { wdt977_keepalive(); pr_crit("Unexpected close, not stopping watchdog!\n"); } expect_close = 0; return 0; } /* * wdt977_write: * @file: file handle to the watchdog * @buf: buffer to write (unused as data does not matter here * @count: count of bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we we don't define content meaning. */ static ssize_t wdt977_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { if (count) { if (!nowayout) { size_t i; /* In case it was set long ago */ expect_close = 0; for (i = 0; i != count; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } /* someone wrote to us, we should restart timer */ wdt977_keepalive(); } return count; } static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .firmware_version = 1, .identity = WATCHDOG_NAME, }; /* * wdt977_ioctl: * @inode: inode of the device * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. */ static long wdt977_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int status; int new_options, retval = -EINVAL; int new_timeout; union { struct watchdog_info __user *ident; int __user *i; } uarg; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: wdt977_get_status(&status); return put_user(status, uarg.i); case WDIOC_GETBOOTSTATUS: return put_user(0, uarg.i); case WDIOC_SETOPTIONS: if (get_user(new_options, uarg.i)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { wdt977_stop(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { wdt977_start(); retval = 0; } return retval; case WDIOC_KEEPALIVE: wdt977_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, uarg.i)) return -EFAULT; if (wdt977_set_timeout(new_timeout)) return -EINVAL; wdt977_keepalive(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); default: return -ENOTTY; } } static int wdt977_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdt977_stop(); return NOTIFY_DONE; } static const struct file_operations wdt977_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = wdt977_write, .unlocked_ioctl = wdt977_ioctl, .open = wdt977_open, .release = wdt977_release, }; static struct miscdevice wdt977_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdt977_fops, }; static struct notifier_block wdt977_notifier = { .notifier_call = wdt977_notify_sys, }; static int __init wd977_init(void) { int rc; pr_info("driver v%s\n", WATCHDOG_VERSION); /* Check that the timeout value is within its range; if not reset to the default */ if (wdt977_set_timeout(timeout)) { wdt977_set_timeout(DEFAULT_TIMEOUT); pr_info("timeout value must be 60 < timeout < 15300, using %d\n", DEFAULT_TIMEOUT); } /* on Netwinder the IOports are already reserved by * arch/arm/mach-footbridge/netwinder-hw.c */ if (!machine_is_netwinder()) { if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME)) { pr_err("I/O address 0x%04x already in use\n", IO_INDEX_PORT); rc = -EIO; goto err_out; } } rc = register_reboot_notifier(&wdt977_notifier); if (rc) { pr_err("cannot register reboot notifier (err=%d)\n", rc); goto err_out_region; } rc = misc_register(&wdt977_miscdev); if (rc) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", wdt977_miscdev.minor, rc); goto err_out_reboot; } pr_info("initialized. timeout=%d sec (nowayout=%d, testmode=%i)\n", timeout, nowayout, testmode); return 0; err_out_reboot: unregister_reboot_notifier(&wdt977_notifier); err_out_region: if (!machine_is_netwinder()) release_region(IO_INDEX_PORT, 2); err_out: return rc; } static void __exit wd977_exit(void) { wdt977_stop(); misc_deregister(&wdt977_miscdev); unregister_reboot_notifier(&wdt977_notifier); release_region(IO_INDEX_PORT, 2); } module_init(wd977_init); module_exit(wd977_exit); MODULE_AUTHOR("Woody Suwalski <woodys@xandros.com>"); MODULE_DESCRIPTION("W83977AF Watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
bilalliberty/android_kernel_htc_liberty-villec2
net/netfilter/xt_nfacct.c
7896
1966
/* * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org> * (C) 2011 Intra2net AG <http://www.intra2net.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 (or any * later at your option) as published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/nfnetlink_acct.h> #include <linux/netfilter/xt_nfacct.h> MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_DESCRIPTION("Xtables: match for the extended accounting infrastructure"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_nfacct"); MODULE_ALIAS("ip6t_nfacct"); static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_nfacct_match_info *info = par->targinfo; nfnl_acct_update(skb, info->nfacct); return true; } static int nfacct_mt_checkentry(const struct xt_mtchk_param *par) { struct xt_nfacct_match_info *info = par->matchinfo; struct nf_acct *nfacct; nfacct = nfnl_acct_find_get(info->name); if (nfacct == NULL) { pr_info("xt_nfacct: accounting object with name `%s' " "does not exists\n", info->name); return -ENOENT; } info->nfacct = nfacct; return 0; } static void nfacct_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_nfacct_match_info *info = par->matchinfo; nfnl_acct_put(info->nfacct); } static struct xt_match nfacct_mt_reg __read_mostly = { .name = "nfacct", .family = NFPROTO_UNSPEC, .checkentry = nfacct_mt_checkentry, .match = nfacct_mt, .destroy = nfacct_mt_destroy, .matchsize = sizeof(struct xt_nfacct_match_info), .me = THIS_MODULE, }; static int __init nfacct_mt_init(void) { return xt_register_match(&nfacct_mt_reg); } static void __exit nfacct_mt_exit(void) { xt_unregister_match(&nfacct_mt_reg); } module_init(nfacct_mt_init); module_exit(nfacct_mt_exit);
gpl-2.0
DirtyUnicorns/android_kernel_asus_grouper
drivers/video/sis/init.c
8152
108147
/* $XFree86$ */ /* $XdotOrg$ */ /* * Mode initializing code (CRT1 section) for * for SiS 300/305/540/630/730, * SiS 315/550/[M]650/651/[M]661[FGM]X/[M]74x[GX]/330/[M]76x[GX], * XGI Volari V3XT/V5/V8, Z7 * (Universal module for Linux kernel framebuffer and X.org/XFree86 4.x) * * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria * * If distributed as part of the Linux kernel, the following license terms * apply: * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the named License, * * or any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA * * Otherwise, the following license terms apply: * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following conditions * * are met: * * 1) Redistributions of source code must retain the above copyright * * notice, this list of conditions and the following disclaimer. * * 2) Redistributions in binary form must reproduce the above copyright * * notice, this list of conditions and the following disclaimer in the * * documentation and/or other materials provided with the distribution. * * 3) The name of the author may not be used to endorse or promote products * * derived from this software without specific prior written permission. * * * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Thomas Winischhofer <thomas@winischhofer.net> * * Formerly based on non-functional code-fragements for 300 series by SiS, Inc. * Used by permission. */ #include "init.h" #ifdef CONFIG_FB_SIS_300 #include "300vtbl.h" #endif #ifdef CONFIG_FB_SIS_315 #include "310vtbl.h" #endif #if defined(ALLOC_PRAGMA) #pragma alloc_text(PAGE,SiSSetMode) #endif /*********************************************/ /* POINTER INITIALIZATION */ /*********************************************/ #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static void InitCommonPointer(struct SiS_Private *SiS_Pr) { SiS_Pr->SiS_SModeIDTable = SiS_SModeIDTable; SiS_Pr->SiS_StResInfo = SiS_StResInfo; SiS_Pr->SiS_ModeResInfo = SiS_ModeResInfo; SiS_Pr->SiS_StandTable = SiS_StandTable; SiS_Pr->SiS_NTSCTiming = SiS_NTSCTiming; SiS_Pr->SiS_PALTiming = SiS_PALTiming; SiS_Pr->SiS_HiTVSt1Timing = SiS_HiTVSt1Timing; SiS_Pr->SiS_HiTVSt2Timing = SiS_HiTVSt2Timing; SiS_Pr->SiS_HiTVExtTiming = SiS_HiTVExtTiming; SiS_Pr->SiS_HiTVGroup3Data = SiS_HiTVGroup3Data; SiS_Pr->SiS_HiTVGroup3Simu = SiS_HiTVGroup3Simu; #if 0 SiS_Pr->SiS_HiTVTextTiming = SiS_HiTVTextTiming; SiS_Pr->SiS_HiTVGroup3Text = SiS_HiTVGroup3Text; #endif SiS_Pr->SiS_StPALData = SiS_StPALData; SiS_Pr->SiS_ExtPALData = SiS_ExtPALData; SiS_Pr->SiS_StNTSCData = SiS_StNTSCData; SiS_Pr->SiS_ExtNTSCData = SiS_ExtNTSCData; SiS_Pr->SiS_St1HiTVData = SiS_StHiTVData; SiS_Pr->SiS_St2HiTVData = SiS_St2HiTVData; SiS_Pr->SiS_ExtHiTVData = SiS_ExtHiTVData; SiS_Pr->SiS_St525iData = SiS_StNTSCData; SiS_Pr->SiS_St525pData = SiS_St525pData; SiS_Pr->SiS_St750pData = SiS_St750pData; SiS_Pr->SiS_Ext525iData = SiS_ExtNTSCData; SiS_Pr->SiS_Ext525pData = SiS_ExtNTSCData; SiS_Pr->SiS_Ext750pData = SiS_Ext750pData; SiS_Pr->pSiS_OutputSelect = &SiS_OutputSelect; SiS_Pr->pSiS_SoftSetting = &SiS_SoftSetting; SiS_Pr->SiS_LCD1280x720Data = SiS_LCD1280x720Data; SiS_Pr->SiS_StLCD1280x768_2Data = SiS_StLCD1280x768_2Data; SiS_Pr->SiS_ExtLCD1280x768_2Data = SiS_ExtLCD1280x768_2Data; SiS_Pr->SiS_LCD1280x800Data = SiS_LCD1280x800Data; SiS_Pr->SiS_LCD1280x800_2Data = SiS_LCD1280x800_2Data; SiS_Pr->SiS_LCD1280x854Data = SiS_LCD1280x854Data; SiS_Pr->SiS_LCD1280x960Data = SiS_LCD1280x960Data; SiS_Pr->SiS_StLCD1400x1050Data = SiS_StLCD1400x1050Data; SiS_Pr->SiS_ExtLCD1400x1050Data = SiS_ExtLCD1400x1050Data; SiS_Pr->SiS_LCD1680x1050Data = SiS_LCD1680x1050Data; SiS_Pr->SiS_StLCD1600x1200Data = SiS_StLCD1600x1200Data; SiS_Pr->SiS_ExtLCD1600x1200Data = SiS_ExtLCD1600x1200Data; SiS_Pr->SiS_NoScaleData = SiS_NoScaleData; SiS_Pr->SiS_LVDS320x240Data_1 = SiS_LVDS320x240Data_1; SiS_Pr->SiS_LVDS320x240Data_2 = SiS_LVDS320x240Data_2; SiS_Pr->SiS_LVDS640x480Data_1 = SiS_LVDS640x480Data_1; SiS_Pr->SiS_LVDS800x600Data_1 = SiS_LVDS800x600Data_1; SiS_Pr->SiS_LVDS1024x600Data_1 = SiS_LVDS1024x600Data_1; SiS_Pr->SiS_LVDS1024x768Data_1 = SiS_LVDS1024x768Data_1; SiS_Pr->SiS_LVDSCRT1320x240_1 = SiS_LVDSCRT1320x240_1; SiS_Pr->SiS_LVDSCRT1320x240_2 = SiS_LVDSCRT1320x240_2; SiS_Pr->SiS_LVDSCRT1320x240_2_H = SiS_LVDSCRT1320x240_2_H; SiS_Pr->SiS_LVDSCRT1320x240_3 = SiS_LVDSCRT1320x240_3; SiS_Pr->SiS_LVDSCRT1320x240_3_H = SiS_LVDSCRT1320x240_3_H; SiS_Pr->SiS_LVDSCRT1640x480_1 = SiS_LVDSCRT1640x480_1; SiS_Pr->SiS_LVDSCRT1640x480_1_H = SiS_LVDSCRT1640x480_1_H; #if 0 SiS_Pr->SiS_LVDSCRT11024x600_1 = SiS_LVDSCRT11024x600_1; SiS_Pr->SiS_LVDSCRT11024x600_1_H = SiS_LVDSCRT11024x600_1_H; SiS_Pr->SiS_LVDSCRT11024x600_2 = SiS_LVDSCRT11024x600_2; SiS_Pr->SiS_LVDSCRT11024x600_2_H = SiS_LVDSCRT11024x600_2_H; #endif SiS_Pr->SiS_CHTVUNTSCData = SiS_CHTVUNTSCData; SiS_Pr->SiS_CHTVONTSCData = SiS_CHTVONTSCData; SiS_Pr->SiS_PanelMinLVDS = Panel_800x600; /* lowest value LVDS/LCDA */ SiS_Pr->SiS_PanelMin301 = Panel_1024x768; /* lowest value 301 */ } #endif #ifdef CONFIG_FB_SIS_300 static void InitTo300Pointer(struct SiS_Private *SiS_Pr) { InitCommonPointer(SiS_Pr); SiS_Pr->SiS_VBModeIDTable = SiS300_VBModeIDTable; SiS_Pr->SiS_EModeIDTable = SiS300_EModeIDTable; SiS_Pr->SiS_RefIndex = SiS300_RefIndex; SiS_Pr->SiS_CRT1Table = SiS300_CRT1Table; if(SiS_Pr->ChipType == SIS_300) { SiS_Pr->SiS_MCLKData_0 = SiS300_MCLKData_300; /* 300 */ } else { SiS_Pr->SiS_MCLKData_0 = SiS300_MCLKData_630; /* 630, 730 */ } SiS_Pr->SiS_VCLKData = SiS300_VCLKData; SiS_Pr->SiS_VBVCLKData = (struct SiS_VBVCLKData *)SiS300_VCLKData; SiS_Pr->SiS_SR15 = SiS300_SR15; SiS_Pr->SiS_PanelDelayTbl = SiS300_PanelDelayTbl; SiS_Pr->SiS_PanelDelayTblLVDS = SiS300_PanelDelayTbl; SiS_Pr->SiS_ExtLCD1024x768Data = SiS300_ExtLCD1024x768Data; SiS_Pr->SiS_St2LCD1024x768Data = SiS300_St2LCD1024x768Data; SiS_Pr->SiS_ExtLCD1280x1024Data = SiS300_ExtLCD1280x1024Data; SiS_Pr->SiS_St2LCD1280x1024Data = SiS300_St2LCD1280x1024Data; SiS_Pr->SiS_CRT2Part2_1024x768_1 = SiS300_CRT2Part2_1024x768_1; SiS_Pr->SiS_CRT2Part2_1024x768_2 = SiS300_CRT2Part2_1024x768_2; SiS_Pr->SiS_CRT2Part2_1024x768_3 = SiS300_CRT2Part2_1024x768_3; SiS_Pr->SiS_CHTVUPALData = SiS300_CHTVUPALData; SiS_Pr->SiS_CHTVOPALData = SiS300_CHTVOPALData; SiS_Pr->SiS_CHTVUPALMData = SiS_CHTVUNTSCData; /* not supported on 300 series */ SiS_Pr->SiS_CHTVOPALMData = SiS_CHTVONTSCData; /* not supported on 300 series */ SiS_Pr->SiS_CHTVUPALNData = SiS300_CHTVUPALData; /* not supported on 300 series */ SiS_Pr->SiS_CHTVOPALNData = SiS300_CHTVOPALData; /* not supported on 300 series */ SiS_Pr->SiS_CHTVSOPALData = SiS300_CHTVSOPALData; SiS_Pr->SiS_LVDS848x480Data_1 = SiS300_LVDS848x480Data_1; SiS_Pr->SiS_LVDS848x480Data_2 = SiS300_LVDS848x480Data_2; SiS_Pr->SiS_LVDSBARCO1024Data_1 = SiS300_LVDSBARCO1024Data_1; SiS_Pr->SiS_LVDSBARCO1366Data_1 = SiS300_LVDSBARCO1366Data_1; SiS_Pr->SiS_LVDSBARCO1366Data_2 = SiS300_LVDSBARCO1366Data_2; SiS_Pr->SiS_PanelType04_1a = SiS300_PanelType04_1a; SiS_Pr->SiS_PanelType04_2a = SiS300_PanelType04_2a; SiS_Pr->SiS_PanelType04_1b = SiS300_PanelType04_1b; SiS_Pr->SiS_PanelType04_2b = SiS300_PanelType04_2b; SiS_Pr->SiS_CHTVCRT1UNTSC = SiS300_CHTVCRT1UNTSC; SiS_Pr->SiS_CHTVCRT1ONTSC = SiS300_CHTVCRT1ONTSC; SiS_Pr->SiS_CHTVCRT1UPAL = SiS300_CHTVCRT1UPAL; SiS_Pr->SiS_CHTVCRT1OPAL = SiS300_CHTVCRT1OPAL; SiS_Pr->SiS_CHTVCRT1SOPAL = SiS300_CHTVCRT1SOPAL; SiS_Pr->SiS_CHTVReg_UNTSC = SiS300_CHTVReg_UNTSC; SiS_Pr->SiS_CHTVReg_ONTSC = SiS300_CHTVReg_ONTSC; SiS_Pr->SiS_CHTVReg_UPAL = SiS300_CHTVReg_UPAL; SiS_Pr->SiS_CHTVReg_OPAL = SiS300_CHTVReg_OPAL; SiS_Pr->SiS_CHTVReg_UPALM = SiS300_CHTVReg_UNTSC; /* not supported on 300 series */ SiS_Pr->SiS_CHTVReg_OPALM = SiS300_CHTVReg_ONTSC; /* not supported on 300 series */ SiS_Pr->SiS_CHTVReg_UPALN = SiS300_CHTVReg_UPAL; /* not supported on 300 series */ SiS_Pr->SiS_CHTVReg_OPALN = SiS300_CHTVReg_OPAL; /* not supported on 300 series */ SiS_Pr->SiS_CHTVReg_SOPAL = SiS300_CHTVReg_SOPAL; SiS_Pr->SiS_CHTVVCLKUNTSC = SiS300_CHTVVCLKUNTSC; SiS_Pr->SiS_CHTVVCLKONTSC = SiS300_CHTVVCLKONTSC; SiS_Pr->SiS_CHTVVCLKUPAL = SiS300_CHTVVCLKUPAL; SiS_Pr->SiS_CHTVVCLKOPAL = SiS300_CHTVVCLKOPAL; SiS_Pr->SiS_CHTVVCLKUPALM = SiS300_CHTVVCLKUNTSC; /* not supported on 300 series */ SiS_Pr->SiS_CHTVVCLKOPALM = SiS300_CHTVVCLKONTSC; /* not supported on 300 series */ SiS_Pr->SiS_CHTVVCLKUPALN = SiS300_CHTVVCLKUPAL; /* not supported on 300 series */ SiS_Pr->SiS_CHTVVCLKOPALN = SiS300_CHTVVCLKOPAL; /* not supported on 300 series */ SiS_Pr->SiS_CHTVVCLKSOPAL = SiS300_CHTVVCLKSOPAL; } #endif #ifdef CONFIG_FB_SIS_315 static void InitTo310Pointer(struct SiS_Private *SiS_Pr) { InitCommonPointer(SiS_Pr); SiS_Pr->SiS_EModeIDTable = SiS310_EModeIDTable; SiS_Pr->SiS_RefIndex = SiS310_RefIndex; SiS_Pr->SiS_CRT1Table = SiS310_CRT1Table; if(SiS_Pr->ChipType >= SIS_340) { SiS_Pr->SiS_MCLKData_0 = SiS310_MCLKData_0_340; /* 340 + XGI */ } else if(SiS_Pr->ChipType >= SIS_761) { SiS_Pr->SiS_MCLKData_0 = SiS310_MCLKData_0_761; /* 761 - preliminary */ } else if(SiS_Pr->ChipType >= SIS_760) { SiS_Pr->SiS_MCLKData_0 = SiS310_MCLKData_0_760; /* 760 */ } else if(SiS_Pr->ChipType >= SIS_661) { SiS_Pr->SiS_MCLKData_0 = SiS310_MCLKData_0_660; /* 661/741 */ } else if(SiS_Pr->ChipType == SIS_330) { SiS_Pr->SiS_MCLKData_0 = SiS310_MCLKData_0_330; /* 330 */ } else if(SiS_Pr->ChipType > SIS_315PRO) { SiS_Pr->SiS_MCLKData_0 = SiS310_MCLKData_0_650; /* 550, 650, 740 */ } else { SiS_Pr->SiS_MCLKData_0 = SiS310_MCLKData_0_315; /* 315 */ } if(SiS_Pr->ChipType >= SIS_340) { SiS_Pr->SiS_MCLKData_1 = SiS310_MCLKData_1_340; } else { SiS_Pr->SiS_MCLKData_1 = SiS310_MCLKData_1; } SiS_Pr->SiS_VCLKData = SiS310_VCLKData; SiS_Pr->SiS_VBVCLKData = SiS310_VBVCLKData; SiS_Pr->SiS_SR15 = SiS310_SR15; SiS_Pr->SiS_PanelDelayTbl = SiS310_PanelDelayTbl; SiS_Pr->SiS_PanelDelayTblLVDS = SiS310_PanelDelayTblLVDS; SiS_Pr->SiS_St2LCD1024x768Data = SiS310_St2LCD1024x768Data; SiS_Pr->SiS_ExtLCD1024x768Data = SiS310_ExtLCD1024x768Data; SiS_Pr->SiS_St2LCD1280x1024Data = SiS310_St2LCD1280x1024Data; SiS_Pr->SiS_ExtLCD1280x1024Data = SiS310_ExtLCD1280x1024Data; SiS_Pr->SiS_CRT2Part2_1024x768_1 = SiS310_CRT2Part2_1024x768_1; SiS_Pr->SiS_CHTVUPALData = SiS310_CHTVUPALData; SiS_Pr->SiS_CHTVOPALData = SiS310_CHTVOPALData; SiS_Pr->SiS_CHTVUPALMData = SiS310_CHTVUPALMData; SiS_Pr->SiS_CHTVOPALMData = SiS310_CHTVOPALMData; SiS_Pr->SiS_CHTVUPALNData = SiS310_CHTVUPALNData; SiS_Pr->SiS_CHTVOPALNData = SiS310_CHTVOPALNData; SiS_Pr->SiS_CHTVSOPALData = SiS310_CHTVSOPALData; SiS_Pr->SiS_CHTVCRT1UNTSC = SiS310_CHTVCRT1UNTSC; SiS_Pr->SiS_CHTVCRT1ONTSC = SiS310_CHTVCRT1ONTSC; SiS_Pr->SiS_CHTVCRT1UPAL = SiS310_CHTVCRT1UPAL; SiS_Pr->SiS_CHTVCRT1OPAL = SiS310_CHTVCRT1OPAL; SiS_Pr->SiS_CHTVCRT1SOPAL = SiS310_CHTVCRT1OPAL; SiS_Pr->SiS_CHTVReg_UNTSC = SiS310_CHTVReg_UNTSC; SiS_Pr->SiS_CHTVReg_ONTSC = SiS310_CHTVReg_ONTSC; SiS_Pr->SiS_CHTVReg_UPAL = SiS310_CHTVReg_UPAL; SiS_Pr->SiS_CHTVReg_OPAL = SiS310_CHTVReg_OPAL; SiS_Pr->SiS_CHTVReg_UPALM = SiS310_CHTVReg_UPALM; SiS_Pr->SiS_CHTVReg_OPALM = SiS310_CHTVReg_OPALM; SiS_Pr->SiS_CHTVReg_UPALN = SiS310_CHTVReg_UPALN; SiS_Pr->SiS_CHTVReg_OPALN = SiS310_CHTVReg_OPALN; SiS_Pr->SiS_CHTVReg_SOPAL = SiS310_CHTVReg_OPAL; SiS_Pr->SiS_CHTVVCLKUNTSC = SiS310_CHTVVCLKUNTSC; SiS_Pr->SiS_CHTVVCLKONTSC = SiS310_CHTVVCLKONTSC; SiS_Pr->SiS_CHTVVCLKUPAL = SiS310_CHTVVCLKUPAL; SiS_Pr->SiS_CHTVVCLKOPAL = SiS310_CHTVVCLKOPAL; SiS_Pr->SiS_CHTVVCLKUPALM = SiS310_CHTVVCLKUPALM; SiS_Pr->SiS_CHTVVCLKOPALM = SiS310_CHTVVCLKOPALM; SiS_Pr->SiS_CHTVVCLKUPALN = SiS310_CHTVVCLKUPALN; SiS_Pr->SiS_CHTVVCLKOPALN = SiS310_CHTVVCLKOPALN; SiS_Pr->SiS_CHTVVCLKSOPAL = SiS310_CHTVVCLKOPAL; } #endif bool SiSInitPtr(struct SiS_Private *SiS_Pr) { if(SiS_Pr->ChipType < SIS_315H) { #ifdef CONFIG_FB_SIS_300 InitTo300Pointer(SiS_Pr); #else return false; #endif } else { #ifdef CONFIG_FB_SIS_315 InitTo310Pointer(SiS_Pr); #else return false; #endif } return true; } /*********************************************/ /* HELPER: Get ModeID */ /*********************************************/ static unsigned short SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, int Depth, bool FSTN, int LCDwidth, int LCDheight) { unsigned short ModeIndex = 0; switch(HDisplay) { case 320: if(VDisplay == 200) ModeIndex = ModeIndex_320x200[Depth]; else if(VDisplay == 240) { if((VBFlags & CRT2_LCD) && (FSTN)) ModeIndex = ModeIndex_320x240_FSTN[Depth]; else ModeIndex = ModeIndex_320x240[Depth]; } break; case 400: if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDwidth >= 600))) { if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth]; } break; case 512: if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDwidth >= 768))) { if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth]; } break; case 640: if(VDisplay == 480) ModeIndex = ModeIndex_640x480[Depth]; else if(VDisplay == 400) ModeIndex = ModeIndex_640x400[Depth]; break; case 720: if(VDisplay == 480) ModeIndex = ModeIndex_720x480[Depth]; else if(VDisplay == 576) ModeIndex = ModeIndex_720x576[Depth]; break; case 768: if(VDisplay == 576) ModeIndex = ModeIndex_768x576[Depth]; break; case 800: if(VDisplay == 600) ModeIndex = ModeIndex_800x600[Depth]; else if(VDisplay == 480) ModeIndex = ModeIndex_800x480[Depth]; break; case 848: if(VDisplay == 480) ModeIndex = ModeIndex_848x480[Depth]; break; case 856: if(VDisplay == 480) ModeIndex = ModeIndex_856x480[Depth]; break; case 960: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 540) ModeIndex = ModeIndex_960x540[Depth]; else if(VDisplay == 600) ModeIndex = ModeIndex_960x600[Depth]; } break; case 1024: if(VDisplay == 576) ModeIndex = ModeIndex_1024x576[Depth]; else if(VDisplay == 768) ModeIndex = ModeIndex_1024x768[Depth]; else if(VGAEngine == SIS_300_VGA) { if(VDisplay == 600) ModeIndex = ModeIndex_1024x600[Depth]; } break; case 1152: if(VDisplay == 864) ModeIndex = ModeIndex_1152x864[Depth]; if(VGAEngine == SIS_300_VGA) { if(VDisplay == 768) ModeIndex = ModeIndex_1152x768[Depth]; } break; case 1280: switch(VDisplay) { case 720: ModeIndex = ModeIndex_1280x720[Depth]; break; case 768: if(VGAEngine == SIS_300_VGA) { ModeIndex = ModeIndex_300_1280x768[Depth]; } else { ModeIndex = ModeIndex_310_1280x768[Depth]; } break; case 800: if(VGAEngine == SIS_315_VGA) { ModeIndex = ModeIndex_1280x800[Depth]; } break; case 854: if(VGAEngine == SIS_315_VGA) { ModeIndex = ModeIndex_1280x854[Depth]; } break; case 960: ModeIndex = ModeIndex_1280x960[Depth]; break; case 1024: ModeIndex = ModeIndex_1280x1024[Depth]; break; } break; case 1360: if(VDisplay == 768) ModeIndex = ModeIndex_1360x768[Depth]; if(VGAEngine == SIS_300_VGA) { if(VDisplay == 1024) ModeIndex = ModeIndex_300_1360x1024[Depth]; } break; case 1400: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 1050) { ModeIndex = ModeIndex_1400x1050[Depth]; } } break; case 1600: if(VDisplay == 1200) ModeIndex = ModeIndex_1600x1200[Depth]; break; case 1680: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 1050) ModeIndex = ModeIndex_1680x1050[Depth]; } break; case 1920: if(VDisplay == 1440) ModeIndex = ModeIndex_1920x1440[Depth]; else if(VGAEngine == SIS_315_VGA) { if(VDisplay == 1080) ModeIndex = ModeIndex_1920x1080[Depth]; } break; case 2048: if(VDisplay == 1536) { if(VGAEngine == SIS_300_VGA) { ModeIndex = ModeIndex_300_2048x1536[Depth]; } else { ModeIndex = ModeIndex_310_2048x1536[Depth]; } } break; } return ModeIndex; } unsigned short SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, int Depth, bool FSTN, unsigned short CustomT, int LCDwidth, int LCDheight, unsigned int VBFlags2) { unsigned short ModeIndex = 0; if(VBFlags2 & (VB2_LVDS | VB2_30xBDH)) { switch(HDisplay) { case 320: if((CustomT != CUT_PANEL848) && (CustomT != CUT_PANEL856)) { if(VDisplay == 200) { if(!FSTN) ModeIndex = ModeIndex_320x200[Depth]; } else if(VDisplay == 240) { if(!FSTN) ModeIndex = ModeIndex_320x240[Depth]; else if(VGAEngine == SIS_315_VGA) { ModeIndex = ModeIndex_320x240_FSTN[Depth]; } } } break; case 400: if((CustomT != CUT_PANEL848) && (CustomT != CUT_PANEL856)) { if(!((VGAEngine == SIS_300_VGA) && (VBFlags2 & VB2_TRUMPION))) { if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth]; } } break; case 512: if((CustomT != CUT_PANEL848) && (CustomT != CUT_PANEL856)) { if(!((VGAEngine == SIS_300_VGA) && (VBFlags2 & VB2_TRUMPION))) { if(LCDwidth >= 1024 && LCDwidth != 1152 && LCDheight >= 768) { if(VDisplay == 384) { ModeIndex = ModeIndex_512x384[Depth]; } } } } break; case 640: if(VDisplay == 480) ModeIndex = ModeIndex_640x480[Depth]; else if(VDisplay == 400) { if((CustomT != CUT_PANEL848) && (CustomT != CUT_PANEL856)) ModeIndex = ModeIndex_640x400[Depth]; } break; case 800: if(VDisplay == 600) ModeIndex = ModeIndex_800x600[Depth]; break; case 848: if(CustomT == CUT_PANEL848) { if(VDisplay == 480) ModeIndex = ModeIndex_848x480[Depth]; } break; case 856: if(CustomT == CUT_PANEL856) { if(VDisplay == 480) ModeIndex = ModeIndex_856x480[Depth]; } break; case 1024: if(VDisplay == 768) ModeIndex = ModeIndex_1024x768[Depth]; else if(VGAEngine == SIS_300_VGA) { if((VDisplay == 600) && (LCDheight == 600)) { ModeIndex = ModeIndex_1024x600[Depth]; } } break; case 1152: if(VGAEngine == SIS_300_VGA) { if((VDisplay == 768) && (LCDheight == 768)) { ModeIndex = ModeIndex_1152x768[Depth]; } } break; case 1280: if(VDisplay == 1024) ModeIndex = ModeIndex_1280x1024[Depth]; else if(VGAEngine == SIS_315_VGA) { if((VDisplay == 768) && (LCDheight == 768)) { ModeIndex = ModeIndex_310_1280x768[Depth]; } } break; case 1360: if(VGAEngine == SIS_300_VGA) { if(CustomT == CUT_BARCO1366) { if(VDisplay == 1024) ModeIndex = ModeIndex_300_1360x1024[Depth]; } } if(CustomT == CUT_PANEL848) { if(VDisplay == 768) ModeIndex = ModeIndex_1360x768[Depth]; } break; case 1400: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 1050) ModeIndex = ModeIndex_1400x1050[Depth]; } break; case 1600: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 1200) ModeIndex = ModeIndex_1600x1200[Depth]; } break; } } else if(VBFlags2 & VB2_SISBRIDGE) { switch(HDisplay) { case 320: if(VDisplay == 200) ModeIndex = ModeIndex_320x200[Depth]; else if(VDisplay == 240) ModeIndex = ModeIndex_320x240[Depth]; break; case 400: if(LCDwidth >= 800 && LCDheight >= 600) { if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth]; } break; case 512: if(LCDwidth >= 1024 && LCDheight >= 768 && LCDwidth != 1152) { if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth]; } break; case 640: if(VDisplay == 480) ModeIndex = ModeIndex_640x480[Depth]; else if(VDisplay == 400) ModeIndex = ModeIndex_640x400[Depth]; break; case 720: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 480) ModeIndex = ModeIndex_720x480[Depth]; else if(VDisplay == 576) ModeIndex = ModeIndex_720x576[Depth]; } break; case 768: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 576) ModeIndex = ModeIndex_768x576[Depth]; } break; case 800: if(VDisplay == 600) ModeIndex = ModeIndex_800x600[Depth]; if(VGAEngine == SIS_315_VGA) { if(VDisplay == 480) ModeIndex = ModeIndex_800x480[Depth]; } break; case 848: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 480) ModeIndex = ModeIndex_848x480[Depth]; } break; case 856: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 480) ModeIndex = ModeIndex_856x480[Depth]; } break; case 960: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 540) ModeIndex = ModeIndex_960x540[Depth]; else if(VDisplay == 600) ModeIndex = ModeIndex_960x600[Depth]; } break; case 1024: if(VDisplay == 768) ModeIndex = ModeIndex_1024x768[Depth]; if(VGAEngine == SIS_315_VGA) { if(VDisplay == 576) ModeIndex = ModeIndex_1024x576[Depth]; } break; case 1152: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 864) ModeIndex = ModeIndex_1152x864[Depth]; } break; case 1280: switch(VDisplay) { case 720: ModeIndex = ModeIndex_1280x720[Depth]; case 768: if(VGAEngine == SIS_300_VGA) { ModeIndex = ModeIndex_300_1280x768[Depth]; } else { ModeIndex = ModeIndex_310_1280x768[Depth]; } break; case 800: if(VGAEngine == SIS_315_VGA) { ModeIndex = ModeIndex_1280x800[Depth]; } break; case 854: if(VGAEngine == SIS_315_VGA) { ModeIndex = ModeIndex_1280x854[Depth]; } break; case 960: ModeIndex = ModeIndex_1280x960[Depth]; break; case 1024: ModeIndex = ModeIndex_1280x1024[Depth]; break; } break; case 1360: if(VGAEngine == SIS_315_VGA) { /* OVER1280 only? */ if(VDisplay == 768) ModeIndex = ModeIndex_1360x768[Depth]; } break; case 1400: if(VGAEngine == SIS_315_VGA) { if(VBFlags2 & VB2_LCDOVER1280BRIDGE) { if(VDisplay == 1050) ModeIndex = ModeIndex_1400x1050[Depth]; } } break; case 1600: if(VGAEngine == SIS_315_VGA) { if(VBFlags2 & VB2_LCDOVER1280BRIDGE) { if(VDisplay == 1200) ModeIndex = ModeIndex_1600x1200[Depth]; } } break; #ifndef VB_FORBID_CRT2LCD_OVER_1600 case 1680: if(VGAEngine == SIS_315_VGA) { if(VBFlags2 & VB2_LCDOVER1280BRIDGE) { if(VDisplay == 1050) ModeIndex = ModeIndex_1680x1050[Depth]; } } break; case 1920: if(VGAEngine == SIS_315_VGA) { if(VBFlags2 & VB2_LCDOVER1600BRIDGE) { if(VDisplay == 1440) ModeIndex = ModeIndex_1920x1440[Depth]; } } break; case 2048: if(VGAEngine == SIS_315_VGA) { if(VBFlags2 & VB2_LCDOVER1600BRIDGE) { if(VDisplay == 1536) ModeIndex = ModeIndex_310_2048x1536[Depth]; } } break; #endif } } return ModeIndex; } unsigned short SiS_GetModeID_TV(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, int Depth, unsigned int VBFlags2) { unsigned short ModeIndex = 0; if(VBFlags2 & VB2_CHRONTEL) { switch(HDisplay) { case 512: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth]; } break; case 640: if(VDisplay == 480) ModeIndex = ModeIndex_640x480[Depth]; else if(VDisplay == 400) ModeIndex = ModeIndex_640x400[Depth]; break; case 800: if(VDisplay == 600) ModeIndex = ModeIndex_800x600[Depth]; break; case 1024: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 768) ModeIndex = ModeIndex_1024x768[Depth]; } break; } } else if(VBFlags2 & VB2_SISTVBRIDGE) { switch(HDisplay) { case 320: if(VDisplay == 200) ModeIndex = ModeIndex_320x200[Depth]; else if(VDisplay == 240) ModeIndex = ModeIndex_320x240[Depth]; break; case 400: if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth]; break; case 512: if( ((VBFlags & TV_YPBPR) && (VBFlags & (TV_YPBPR750P | TV_YPBPR1080I))) || (VBFlags & TV_HIVISION) || ((!(VBFlags & (TV_YPBPR | TV_PALM))) && (VBFlags & TV_PAL)) ) { if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth]; } break; case 640: if(VDisplay == 480) ModeIndex = ModeIndex_640x480[Depth]; else if(VDisplay == 400) ModeIndex = ModeIndex_640x400[Depth]; break; case 720: if((!(VBFlags & TV_HIVISION)) && (!((VBFlags & TV_YPBPR) && (VBFlags & TV_YPBPR1080I)))) { if(VDisplay == 480) { ModeIndex = ModeIndex_720x480[Depth]; } else if(VDisplay == 576) { if( ((VBFlags & TV_YPBPR) && (VBFlags & TV_YPBPR750P)) || ((!(VBFlags & (TV_YPBPR | TV_PALM))) && (VBFlags & TV_PAL)) ) ModeIndex = ModeIndex_720x576[Depth]; } } break; case 768: if((!(VBFlags & TV_HIVISION)) && (!((VBFlags & TV_YPBPR) && (VBFlags & TV_YPBPR1080I)))) { if( ((VBFlags & TV_YPBPR) && (VBFlags & TV_YPBPR750P)) || ((!(VBFlags & (TV_YPBPR | TV_PALM))) && (VBFlags & TV_PAL)) ) { if(VDisplay == 576) ModeIndex = ModeIndex_768x576[Depth]; } } break; case 800: if(VDisplay == 600) ModeIndex = ModeIndex_800x600[Depth]; else if(VDisplay == 480) { if(!((VBFlags & TV_YPBPR) && (VBFlags & TV_YPBPR750P))) { ModeIndex = ModeIndex_800x480[Depth]; } } break; case 960: if(VGAEngine == SIS_315_VGA) { if(VDisplay == 600) { if((VBFlags & TV_HIVISION) || ((VBFlags & TV_YPBPR) && (VBFlags & TV_YPBPR1080I))) { ModeIndex = ModeIndex_960x600[Depth]; } } } break; case 1024: if(VDisplay == 768) { if(VBFlags2 & VB2_30xBLV) { ModeIndex = ModeIndex_1024x768[Depth]; } } else if(VDisplay == 576) { if( (VBFlags & TV_HIVISION) || ((VBFlags & TV_YPBPR) && (VBFlags & TV_YPBPR1080I)) || ((VBFlags2 & VB2_30xBLV) && ((!(VBFlags & (TV_YPBPR | TV_PALM))) && (VBFlags & TV_PAL))) ) { ModeIndex = ModeIndex_1024x576[Depth]; } } break; case 1280: if(VDisplay == 720) { if((VBFlags & TV_HIVISION) || ((VBFlags & TV_YPBPR) && (VBFlags & (TV_YPBPR1080I | TV_YPBPR750P)))) { ModeIndex = ModeIndex_1280x720[Depth]; } } else if(VDisplay == 1024) { if((VBFlags & TV_HIVISION) || ((VBFlags & TV_YPBPR) && (VBFlags & TV_YPBPR1080I))) { ModeIndex = ModeIndex_1280x1024[Depth]; } } break; } } return ModeIndex; } unsigned short SiS_GetModeID_VGA2(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, int Depth, unsigned int VBFlags2) { if(!(VBFlags2 & VB2_SISVGA2BRIDGE)) return 0; if(HDisplay >= 1920) return 0; switch(HDisplay) { case 1600: if(VDisplay == 1200) { if(VGAEngine != SIS_315_VGA) return 0; if(!(VBFlags2 & VB2_30xB)) return 0; } break; case 1680: if(VDisplay == 1050) { if(VGAEngine != SIS_315_VGA) return 0; if(!(VBFlags2 & VB2_30xB)) return 0; } break; } return SiS_GetModeID(VGAEngine, 0, HDisplay, VDisplay, Depth, false, 0, 0); } /*********************************************/ /* HELPER: SetReg, GetReg */ /*********************************************/ void SiS_SetReg(SISIOADDRESS port, u8 index, u8 data) { outb(index, port); outb(data, port + 1); } void SiS_SetRegByte(SISIOADDRESS port, u8 data) { outb(data, port); } void SiS_SetRegShort(SISIOADDRESS port, u16 data) { outw(data, port); } void SiS_SetRegLong(SISIOADDRESS port, u32 data) { outl(data, port); } u8 SiS_GetReg(SISIOADDRESS port, u8 index) { outb(index, port); return inb(port + 1); } u8 SiS_GetRegByte(SISIOADDRESS port) { return inb(port); } u16 SiS_GetRegShort(SISIOADDRESS port) { return inw(port); } u32 SiS_GetRegLong(SISIOADDRESS port) { return inl(port); } void SiS_SetRegANDOR(SISIOADDRESS Port, u8 Index, u8 DataAND, u8 DataOR) { u8 temp; temp = SiS_GetReg(Port, Index); temp = (temp & (DataAND)) | DataOR; SiS_SetReg(Port, Index, temp); } void SiS_SetRegAND(SISIOADDRESS Port, u8 Index, u8 DataAND) { u8 temp; temp = SiS_GetReg(Port, Index); temp &= DataAND; SiS_SetReg(Port, Index, temp); } void SiS_SetRegOR(SISIOADDRESS Port, u8 Index, u8 DataOR) { u8 temp; temp = SiS_GetReg(Port, Index); temp |= DataOR; SiS_SetReg(Port, Index, temp); } /*********************************************/ /* HELPER: DisplayOn, DisplayOff */ /*********************************************/ void SiS_DisplayOn(struct SiS_Private *SiS_Pr) { SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x01,0xDF); } void SiS_DisplayOff(struct SiS_Private *SiS_Pr) { SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x01,0x20); } /*********************************************/ /* HELPER: Init Port Addresses */ /*********************************************/ void SiSRegInit(struct SiS_Private *SiS_Pr, SISIOADDRESS BaseAddr) { SiS_Pr->SiS_P3c4 = BaseAddr + 0x14; SiS_Pr->SiS_P3d4 = BaseAddr + 0x24; SiS_Pr->SiS_P3c0 = BaseAddr + 0x10; SiS_Pr->SiS_P3ce = BaseAddr + 0x1e; SiS_Pr->SiS_P3c2 = BaseAddr + 0x12; SiS_Pr->SiS_P3ca = BaseAddr + 0x1a; SiS_Pr->SiS_P3c6 = BaseAddr + 0x16; SiS_Pr->SiS_P3c7 = BaseAddr + 0x17; SiS_Pr->SiS_P3c8 = BaseAddr + 0x18; SiS_Pr->SiS_P3c9 = BaseAddr + 0x19; SiS_Pr->SiS_P3cb = BaseAddr + 0x1b; SiS_Pr->SiS_P3cc = BaseAddr + 0x1c; SiS_Pr->SiS_P3cd = BaseAddr + 0x1d; SiS_Pr->SiS_P3da = BaseAddr + 0x2a; SiS_Pr->SiS_Part1Port = BaseAddr + SIS_CRT2_PORT_04; SiS_Pr->SiS_Part2Port = BaseAddr + SIS_CRT2_PORT_10; SiS_Pr->SiS_Part3Port = BaseAddr + SIS_CRT2_PORT_12; SiS_Pr->SiS_Part4Port = BaseAddr + SIS_CRT2_PORT_14; SiS_Pr->SiS_Part5Port = BaseAddr + SIS_CRT2_PORT_14 + 2; SiS_Pr->SiS_DDC_Port = BaseAddr + 0x14; SiS_Pr->SiS_VidCapt = BaseAddr + SIS_VIDEO_CAPTURE; SiS_Pr->SiS_VidPlay = BaseAddr + SIS_VIDEO_PLAYBACK; } /*********************************************/ /* HELPER: GetSysFlags */ /*********************************************/ static void SiS_GetSysFlags(struct SiS_Private *SiS_Pr) { unsigned char cr5f, temp1, temp2; /* 661 and newer: NEVER write non-zero to SR11[7:4] */ /* (SR11 is used for DDC and in enable/disablebridge) */ SiS_Pr->SiS_SensibleSR11 = false; SiS_Pr->SiS_MyCR63 = 0x63; if(SiS_Pr->ChipType >= SIS_330) { SiS_Pr->SiS_MyCR63 = 0x53; if(SiS_Pr->ChipType >= SIS_661) { SiS_Pr->SiS_SensibleSR11 = true; } } /* You should use the macros, not these flags directly */ SiS_Pr->SiS_SysFlags = 0; if(SiS_Pr->ChipType == SIS_650) { cr5f = SiS_GetReg(SiS_Pr->SiS_P3d4,0x5f) & 0xf0; SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x5c,0x07); temp1 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x5c) & 0xf8; SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x5c,0xf8); temp2 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x5c) & 0xf8; if((!temp1) || (temp2)) { switch(cr5f) { case 0x80: case 0x90: case 0xc0: SiS_Pr->SiS_SysFlags |= SF_IsM650; break; case 0xa0: case 0xb0: case 0xe0: SiS_Pr->SiS_SysFlags |= SF_Is651; break; } } else { switch(cr5f) { case 0x90: temp1 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x5c) & 0xf8; switch(temp1) { case 0x00: SiS_Pr->SiS_SysFlags |= SF_IsM652; break; case 0x40: SiS_Pr->SiS_SysFlags |= SF_IsM653; break; default: SiS_Pr->SiS_SysFlags |= SF_IsM650; break; } break; case 0xb0: SiS_Pr->SiS_SysFlags |= SF_Is652; break; default: SiS_Pr->SiS_SysFlags |= SF_IsM650; break; } } } if(SiS_Pr->ChipType >= SIS_760 && SiS_Pr->ChipType <= SIS_761) { if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x78) & 0x30) { SiS_Pr->SiS_SysFlags |= SF_760LFB; } if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x79) & 0xf0) { SiS_Pr->SiS_SysFlags |= SF_760UMA; } } } /*********************************************/ /* HELPER: Init PCI & Engines */ /*********************************************/ static void SiSInitPCIetc(struct SiS_Private *SiS_Pr) { switch(SiS_Pr->ChipType) { #ifdef CONFIG_FB_SIS_300 case SIS_300: case SIS_540: case SIS_630: case SIS_730: /* Set - PCI LINEAR ADDRESSING ENABLE (0x80) * - RELOCATED VGA IO ENABLED (0x20) * - MMIO ENABLED (0x01) * Leave other bits untouched. */ SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x20,0xa1); /* - Enable 2D (0x40) * - Enable 3D (0x02) * - Enable 3D Vertex command fetch (0x10) ? * - Enable 3D command parser (0x08) ? */ SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x5A); break; #endif #ifdef CONFIG_FB_SIS_315 case SIS_315H: case SIS_315: case SIS_315PRO: case SIS_650: case SIS_740: case SIS_330: case SIS_661: case SIS_741: case SIS_660: case SIS_760: case SIS_761: case SIS_340: case XGI_40: /* See above */ SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x20,0xa1); /* - Enable 3D G/L transformation engine (0x80) * - Enable 2D (0x40) * - Enable 3D vertex command fetch (0x10) * - Enable 3D command parser (0x08) * - Enable 3D (0x02) */ SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0xDA); break; case XGI_20: case SIS_550: /* See above */ SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x20,0xa1); /* No 3D engine ! */ /* - Enable 2D (0x40) * - disable 3D */ SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x1E,0x60,0x40); break; #endif default: break; } } /*********************************************/ /* HELPER: SetLVDSetc */ /*********************************************/ static void SiSSetLVDSetc(struct SiS_Private *SiS_Pr) { unsigned short temp; SiS_Pr->SiS_IF_DEF_LVDS = 0; SiS_Pr->SiS_IF_DEF_TRUMPION = 0; SiS_Pr->SiS_IF_DEF_CH70xx = 0; SiS_Pr->SiS_IF_DEF_CONEX = 0; SiS_Pr->SiS_ChrontelInit = 0; if(SiS_Pr->ChipType == XGI_20) return; /* Check for SiS30x first */ temp = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x00); if((temp == 1) || (temp == 2)) return; switch(SiS_Pr->ChipType) { #ifdef CONFIG_FB_SIS_300 case SIS_540: case SIS_630: case SIS_730: temp = (SiS_GetReg(SiS_Pr->SiS_P3d4,0x37) & 0x0e) >> 1; if((temp >= 2) && (temp <= 5)) SiS_Pr->SiS_IF_DEF_LVDS = 1; if(temp == 3) SiS_Pr->SiS_IF_DEF_TRUMPION = 1; if((temp == 4) || (temp == 5)) { /* Save power status (and error check) - UNUSED */ SiS_Pr->SiS_Backup70xx = SiS_GetCH700x(SiS_Pr, 0x0e); SiS_Pr->SiS_IF_DEF_CH70xx = 1; } break; #endif #ifdef CONFIG_FB_SIS_315 case SIS_550: case SIS_650: case SIS_740: case SIS_330: temp = (SiS_GetReg(SiS_Pr->SiS_P3d4,0x37) & 0x0e) >> 1; if((temp >= 2) && (temp <= 3)) SiS_Pr->SiS_IF_DEF_LVDS = 1; if(temp == 3) SiS_Pr->SiS_IF_DEF_CH70xx = 2; break; case SIS_661: case SIS_741: case SIS_660: case SIS_760: case SIS_761: case SIS_340: case XGI_20: case XGI_40: temp = (SiS_GetReg(SiS_Pr->SiS_P3d4,0x38) & 0xe0) >> 5; if((temp >= 2) && (temp <= 3)) SiS_Pr->SiS_IF_DEF_LVDS = 1; if(temp == 3) SiS_Pr->SiS_IF_DEF_CH70xx = 2; if(temp == 4) SiS_Pr->SiS_IF_DEF_CONEX = 1; /* Not yet supported */ break; #endif default: break; } } /*********************************************/ /* HELPER: Enable DSTN/FSTN */ /*********************************************/ void SiS_SetEnableDstn(struct SiS_Private *SiS_Pr, int enable) { SiS_Pr->SiS_IF_DEF_DSTN = enable ? 1 : 0; } void SiS_SetEnableFstn(struct SiS_Private *SiS_Pr, int enable) { SiS_Pr->SiS_IF_DEF_FSTN = enable ? 1 : 0; } /*********************************************/ /* HELPER: Get modeflag */ /*********************************************/ unsigned short SiS_GetModeFlag(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { if(SiS_Pr->UseCustomMode) { return SiS_Pr->CModeFlag; } else if(ModeNo <= 0x13) { return SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; } else { return SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; } } /*********************************************/ /* HELPER: Determine ROM usage */ /*********************************************/ bool SiSDetermineROMLayout661(struct SiS_Private *SiS_Pr) { unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short romversoffs, romvmaj = 1, romvmin = 0; if(SiS_Pr->ChipType >= XGI_20) { /* XGI ROMs don't qualify */ return false; } else if(SiS_Pr->ChipType >= SIS_761) { /* I very much assume 761, 340 and newer will use new layout */ return true; } else if(SiS_Pr->ChipType >= SIS_661) { if((ROMAddr[0x1a] == 'N') && (ROMAddr[0x1b] == 'e') && (ROMAddr[0x1c] == 'w') && (ROMAddr[0x1d] == 'V')) { return true; } romversoffs = ROMAddr[0x16] | (ROMAddr[0x17] << 8); if(romversoffs) { if((ROMAddr[romversoffs+1] == '.') || (ROMAddr[romversoffs+4] == '.')) { romvmaj = ROMAddr[romversoffs] - '0'; romvmin = ((ROMAddr[romversoffs+2] -'0') * 10) + (ROMAddr[romversoffs+3] - '0'); } } if((romvmaj != 0) || (romvmin >= 92)) { return true; } } else if(IS_SIS650740) { if((ROMAddr[0x1a] == 'N') && (ROMAddr[0x1b] == 'e') && (ROMAddr[0x1c] == 'w') && (ROMAddr[0x1d] == 'V')) { return true; } } return false; } static void SiSDetermineROMUsage(struct SiS_Private *SiS_Pr) { unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short romptr = 0; SiS_Pr->SiS_UseROM = false; SiS_Pr->SiS_ROMNew = false; SiS_Pr->SiS_PWDOffset = 0; if(SiS_Pr->ChipType >= XGI_20) return; if((ROMAddr) && (SiS_Pr->UseROM)) { if(SiS_Pr->ChipType == SIS_300) { /* 300: We check if the code starts below 0x220 by * checking the jmp instruction at the beginning * of the BIOS image. */ if((ROMAddr[3] == 0xe9) && ((ROMAddr[5] << 8) | ROMAddr[4]) > 0x21a) SiS_Pr->SiS_UseROM = true; } else if(SiS_Pr->ChipType < SIS_315H) { /* Sony's VAIO BIOS 1.09 follows the standard, so perhaps * the others do as well */ SiS_Pr->SiS_UseROM = true; } else { /* 315/330 series stick to the standard(s) */ SiS_Pr->SiS_UseROM = true; if((SiS_Pr->SiS_ROMNew = SiSDetermineROMLayout661(SiS_Pr))) { SiS_Pr->SiS_EMIOffset = 14; SiS_Pr->SiS_PWDOffset = 17; SiS_Pr->SiS661LCD2TableSize = 36; /* Find out about LCD data table entry size */ if((romptr = SISGETROMW(0x0102))) { if(ROMAddr[romptr + (32 * 16)] == 0xff) SiS_Pr->SiS661LCD2TableSize = 32; else if(ROMAddr[romptr + (34 * 16)] == 0xff) SiS_Pr->SiS661LCD2TableSize = 34; else if(ROMAddr[romptr + (36 * 16)] == 0xff) /* 0.94, 2.05.00+ */ SiS_Pr->SiS661LCD2TableSize = 36; else if( (ROMAddr[romptr + (38 * 16)] == 0xff) || /* 2.00.00 - 2.02.00 */ (ROMAddr[0x6F] & 0x01) ) { /* 2.03.00 - <2.05.00 */ SiS_Pr->SiS661LCD2TableSize = 38; /* UMC data layout abandoned at 2.05.00 */ SiS_Pr->SiS_EMIOffset = 16; SiS_Pr->SiS_PWDOffset = 19; } } } } } } /*********************************************/ /* HELPER: SET SEGMENT REGISTERS */ /*********************************************/ static void SiS_SetSegRegLower(struct SiS_Private *SiS_Pr, unsigned short value) { unsigned short temp; value &= 0x00ff; temp = SiS_GetRegByte(SiS_Pr->SiS_P3cb) & 0xf0; temp |= (value >> 4); SiS_SetRegByte(SiS_Pr->SiS_P3cb, temp); temp = SiS_GetRegByte(SiS_Pr->SiS_P3cd) & 0xf0; temp |= (value & 0x0f); SiS_SetRegByte(SiS_Pr->SiS_P3cd, temp); } static void SiS_SetSegRegUpper(struct SiS_Private *SiS_Pr, unsigned short value) { unsigned short temp; value &= 0x00ff; temp = SiS_GetRegByte(SiS_Pr->SiS_P3cb) & 0x0f; temp |= (value & 0xf0); SiS_SetRegByte(SiS_Pr->SiS_P3cb, temp); temp = SiS_GetRegByte(SiS_Pr->SiS_P3cd) & 0x0f; temp |= (value << 4); SiS_SetRegByte(SiS_Pr->SiS_P3cd, temp); } static void SiS_SetSegmentReg(struct SiS_Private *SiS_Pr, unsigned short value) { SiS_SetSegRegLower(SiS_Pr, value); SiS_SetSegRegUpper(SiS_Pr, value); } static void SiS_ResetSegmentReg(struct SiS_Private *SiS_Pr) { SiS_SetSegmentReg(SiS_Pr, 0); } static void SiS_SetSegmentRegOver(struct SiS_Private *SiS_Pr, unsigned short value) { unsigned short temp = value >> 8; temp &= 0x07; temp |= (temp << 4); SiS_SetReg(SiS_Pr->SiS_P3c4,0x1d,temp); SiS_SetSegmentReg(SiS_Pr, value); } static void SiS_ResetSegmentRegOver(struct SiS_Private *SiS_Pr) { SiS_SetSegmentRegOver(SiS_Pr, 0); } static void SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr) { if((IS_SIS65x) || (SiS_Pr->ChipType >= SIS_661)) { SiS_ResetSegmentReg(SiS_Pr); SiS_ResetSegmentRegOver(SiS_Pr); } } /*********************************************/ /* HELPER: GetVBType */ /*********************************************/ static void SiS_GetVBType(struct SiS_Private *SiS_Pr) { unsigned short flag = 0, rev = 0, nolcd = 0; unsigned short p4_0f, p4_25, p4_27; SiS_Pr->SiS_VBType = 0; if((SiS_Pr->SiS_IF_DEF_LVDS) || (SiS_Pr->SiS_IF_DEF_CONEX)) return; if(SiS_Pr->ChipType == XGI_20) return; flag = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x00); if(flag > 3) return; rev = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x01); if(flag >= 2) { SiS_Pr->SiS_VBType = VB_SIS302B; } else if(flag == 1) { if(rev >= 0xC0) { SiS_Pr->SiS_VBType = VB_SIS301C; } else if(rev >= 0xB0) { SiS_Pr->SiS_VBType = VB_SIS301B; /* Check if 30xB DH version (no LCD support, use Panel Link instead) */ nolcd = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x23); if(!(nolcd & 0x02)) SiS_Pr->SiS_VBType |= VB_NoLCD; } else { SiS_Pr->SiS_VBType = VB_SIS301; } } if(SiS_Pr->SiS_VBType & (VB_SIS301B | VB_SIS301C | VB_SIS302B)) { if(rev >= 0xE0) { flag = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x39); if(flag == 0xff) SiS_Pr->SiS_VBType = VB_SIS302LV; else SiS_Pr->SiS_VBType = VB_SIS301C; /* VB_SIS302ELV; */ } else if(rev >= 0xD0) { SiS_Pr->SiS_VBType = VB_SIS301LV; } } if(SiS_Pr->SiS_VBType & (VB_SIS301C | VB_SIS301LV | VB_SIS302LV | VB_SIS302ELV)) { p4_0f = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x0f); p4_25 = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x25); p4_27 = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x27); SiS_SetRegAND(SiS_Pr->SiS_Part4Port,0x0f,0x7f); SiS_SetRegOR(SiS_Pr->SiS_Part4Port,0x25,0x08); SiS_SetRegAND(SiS_Pr->SiS_Part4Port,0x27,0xfd); if(SiS_GetReg(SiS_Pr->SiS_Part4Port,0x26) & 0x08) { SiS_Pr->SiS_VBType |= VB_UMC; } SiS_SetReg(SiS_Pr->SiS_Part4Port,0x27,p4_27); SiS_SetReg(SiS_Pr->SiS_Part4Port,0x25,p4_25); SiS_SetReg(SiS_Pr->SiS_Part4Port,0x0f,p4_0f); } } /*********************************************/ /* HELPER: Check RAM size */ /*********************************************/ static bool SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { unsigned short AdapterMemSize = SiS_Pr->VideoMemorySize / (1024*1024); unsigned short modeflag = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); unsigned short memorysize = ((modeflag & MemoryInfoFlag) >> MemorySizeShift) + 1; if(!AdapterMemSize) return true; if(AdapterMemSize < memorysize) return false; return true; } /*********************************************/ /* HELPER: Get DRAM type */ /*********************************************/ #ifdef CONFIG_FB_SIS_315 static unsigned char SiS_Get310DRAMType(struct SiS_Private *SiS_Pr) { unsigned char data; if((*SiS_Pr->pSiS_SoftSetting) & SoftDRAMType) { data = (*SiS_Pr->pSiS_SoftSetting) & 0x03; } else { if(SiS_Pr->ChipType >= XGI_20) { /* Do I need this? SR17 seems to be zero anyway... */ data = 0; } else if(SiS_Pr->ChipType >= SIS_340) { /* TODO */ data = 0; } if(SiS_Pr->ChipType >= SIS_661) { if(SiS_Pr->SiS_ROMNew) { data = ((SiS_GetReg(SiS_Pr->SiS_P3d4,0x78) & 0xc0) >> 6); } else { data = SiS_GetReg(SiS_Pr->SiS_P3d4,0x78) & 0x07; } } else if(IS_SIS550650740) { data = SiS_GetReg(SiS_Pr->SiS_P3c4,0x13) & 0x07; } else { /* 315, 330 */ data = SiS_GetReg(SiS_Pr->SiS_P3c4,0x3a) & 0x03; if(SiS_Pr->ChipType == SIS_330) { if(data > 1) { switch(SiS_GetReg(SiS_Pr->SiS_P3d4,0x5f) & 0x30) { case 0x00: data = 1; break; case 0x10: data = 3; break; case 0x20: data = 3; break; case 0x30: data = 2; break; } } else { data = 0; } } } } return data; } static unsigned short SiS_GetMCLK(struct SiS_Private *SiS_Pr) { unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short index; index = SiS_Get310DRAMType(SiS_Pr); if(SiS_Pr->ChipType >= SIS_661) { if(SiS_Pr->SiS_ROMNew) { return((unsigned short)(SISGETROMW((0x90 + (index * 5) + 3)))); } return(SiS_Pr->SiS_MCLKData_0[index].CLOCK); } else if(index >= 4) { return(SiS_Pr->SiS_MCLKData_1[index - 4].CLOCK); } else { return(SiS_Pr->SiS_MCLKData_0[index].CLOCK); } } #endif /*********************************************/ /* HELPER: ClearBuffer */ /*********************************************/ static void SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { unsigned char SISIOMEMTYPE *memaddr = SiS_Pr->VideoMemoryAddress; unsigned int memsize = SiS_Pr->VideoMemorySize; unsigned short SISIOMEMTYPE *pBuffer; int i; if(!memaddr || !memsize) return; if(SiS_Pr->SiS_ModeType >= ModeEGA) { if(ModeNo > 0x13) { memset_io(memaddr, 0, memsize); } else { pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; for(i = 0; i < 0x4000; i++) writew(0x0000, &pBuffer[i]); } } else if(SiS_Pr->SiS_ModeType < ModeCGA) { pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; for(i = 0; i < 0x4000; i++) writew(0x0720, &pBuffer[i]); } else { memset_io(memaddr, 0, 0x8000); } } /*********************************************/ /* HELPER: SearchModeID */ /*********************************************/ bool SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, unsigned short *ModeIdIndex) { unsigned char VGAINFO = SiS_Pr->SiS_VGAINFO; if((*ModeNo) <= 0x13) { if((*ModeNo) <= 0x05) (*ModeNo) |= 0x01; for((*ModeIdIndex) = 0; ;(*ModeIdIndex)++) { if(SiS_Pr->SiS_SModeIDTable[(*ModeIdIndex)].St_ModeID == (*ModeNo)) break; if(SiS_Pr->SiS_SModeIDTable[(*ModeIdIndex)].St_ModeID == 0xFF) return false; } if((*ModeNo) == 0x07) { if(VGAINFO & 0x10) (*ModeIdIndex)++; /* 400 lines */ /* else 350 lines */ } if((*ModeNo) <= 0x03) { if(!(VGAINFO & 0x80)) (*ModeIdIndex)++; if(VGAINFO & 0x10) (*ModeIdIndex)++; /* 400 lines */ /* else 350 lines */ } /* else 200 lines */ } else { for((*ModeIdIndex) = 0; ;(*ModeIdIndex)++) { if(SiS_Pr->SiS_EModeIDTable[(*ModeIdIndex)].Ext_ModeID == (*ModeNo)) break; if(SiS_Pr->SiS_EModeIDTable[(*ModeIdIndex)].Ext_ModeID == 0xFF) return false; } } return true; } /*********************************************/ /* HELPER: GetModePtr */ /*********************************************/ unsigned short SiS_GetModePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { unsigned short index; if(ModeNo <= 0x13) { index = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_StTableIndex; } else { if(SiS_Pr->SiS_ModeType <= ModeEGA) index = 0x1B; else index = 0x0F; } return index; } /*********************************************/ /* HELPERS: Get some indices */ /*********************************************/ unsigned short SiS_GetRefCRTVCLK(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide) { if(SiS_Pr->SiS_RefIndex[Index].Ext_InfoFlag & HaveWideTiming) { if(UseWide == 1) { return SiS_Pr->SiS_RefIndex[Index].Ext_CRTVCLK_WIDE; } else { return SiS_Pr->SiS_RefIndex[Index].Ext_CRTVCLK_NORM; } } else { return SiS_Pr->SiS_RefIndex[Index].Ext_CRTVCLK; } } unsigned short SiS_GetRefCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide) { if(SiS_Pr->SiS_RefIndex[Index].Ext_InfoFlag & HaveWideTiming) { if(UseWide == 1) { return SiS_Pr->SiS_RefIndex[Index].Ext_CRT1CRTC_WIDE; } else { return SiS_Pr->SiS_RefIndex[Index].Ext_CRT1CRTC_NORM; } } else { return SiS_Pr->SiS_RefIndex[Index].Ext_CRT1CRTC; } } /*********************************************/ /* HELPER: LowModeTests */ /*********************************************/ static bool SiS_DoLowModeTest(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { unsigned short temp, temp1, temp2; if((ModeNo != 0x03) && (ModeNo != 0x10) && (ModeNo != 0x12)) return true; temp = SiS_GetReg(SiS_Pr->SiS_P3d4,0x11); SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x11,0x80); temp1 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x00); SiS_SetReg(SiS_Pr->SiS_P3d4,0x00,0x55); temp2 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x00); SiS_SetReg(SiS_Pr->SiS_P3d4,0x00,temp1); SiS_SetReg(SiS_Pr->SiS_P3d4,0x11,temp); if((SiS_Pr->ChipType >= SIS_315H) || (SiS_Pr->ChipType == SIS_300)) { if(temp2 == 0x55) return false; else return true; } else { if(temp2 != 0x55) return true; else { SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); return false; } } } static void SiS_SetLowModeTest(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { if(SiS_DoLowModeTest(SiS_Pr, ModeNo)) { SiS_Pr->SiS_SetFlag |= LowModeTests; } } /*********************************************/ /* HELPER: OPEN/CLOSE CRT1 CRTC */ /*********************************************/ static void SiS_OpenCRTC(struct SiS_Private *SiS_Pr) { if(IS_SIS650) { SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x51,0x1f); if(IS_SIS651) SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x51,0x20); SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x56,0xe7); } else if(IS_SIS661741660760) { SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x61,0xf7); SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x51,0x1f); SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x56,0xe7); if(!SiS_Pr->SiS_ROMNew) { SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x3a,0xef); } } } static void SiS_CloseCRTC(struct SiS_Private *SiS_Pr) { #if 0 /* This locks some CRTC registers. We don't want that. */ unsigned short temp1 = 0, temp2 = 0; if(IS_SIS661741660760) { if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { temp1 = 0xa0; temp2 = 0x08; } SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x51,0x1f,temp1); SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x56,0xe7,temp2); } #endif } static void SiS_HandleCRT1(struct SiS_Private *SiS_Pr) { /* Enable CRT1 gating */ SiS_SetRegAND(SiS_Pr->SiS_P3d4,SiS_Pr->SiS_MyCR63,0xbf); #if 0 if(!(SiS_GetReg(SiS_Pr->SiS_P3c4,0x15) & 0x01)) { if((SiS_GetReg(SiS_Pr->SiS_P3c4,0x15) & 0x0a) || (SiS_GetReg(SiS_Pr->SiS_P3c4,0x16) & 0x01)) { SiS_SetRegOR(SiS_Pr->SiS_P3d4,SiS_Pr->SiS_MyCR63,0x40); } } #endif } /*********************************************/ /* HELPER: GetColorDepth */ /*********************************************/ unsigned short SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { static const unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 }; unsigned short modeflag; short index; /* Do NOT check UseCustomMode, will skrew up FIFO */ if(ModeNo == 0xfe) { modeflag = SiS_Pr->CModeFlag; } else if(ModeNo <= 0x13) { modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; } else { modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; } index = (modeflag & ModeTypeMask) - ModeEGA; if(index < 0) index = 0; return ColorDepth[index]; } /*********************************************/ /* HELPER: GetOffset */ /*********************************************/ unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RRTI) { unsigned short xres, temp, colordepth, infoflag; if(SiS_Pr->UseCustomMode) { infoflag = SiS_Pr->CInfoFlag; xres = SiS_Pr->CHDisplay; } else { infoflag = SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag; xres = SiS_Pr->SiS_RefIndex[RRTI].XRes; } colordepth = SiS_GetColorDepth(SiS_Pr, ModeNo, ModeIdIndex); temp = xres / 16; if(infoflag & InterlaceMode) temp <<= 1; temp *= colordepth; if(xres % 16) temp += (colordepth >> 1); return temp; } /*********************************************/ /* SEQ */ /*********************************************/ static void SiS_SetSeqRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char SRdata; int i; SiS_SetReg(SiS_Pr->SiS_P3c4,0x00,0x03); /* or "display off" */ SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[0] | 0x20; /* determine whether to force x8 dotclock */ if((SiS_Pr->SiS_VBType & VB_SISVB) || (SiS_Pr->SiS_IF_DEF_LVDS)) { if(SiS_Pr->SiS_VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) { if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) SRdata |= 0x01; } else if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) SRdata |= 0x01; } SiS_SetReg(SiS_Pr->SiS_P3c4,0x01,SRdata); for(i = 2; i <= 4; i++) { SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[i - 1]; SiS_SetReg(SiS_Pr->SiS_P3c4,i,SRdata); } } /*********************************************/ /* MISC */ /*********************************************/ static void SiS_SetMiscRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char Miscdata; Miscdata = SiS_Pr->SiS_StandTable[StandTableIndex].MISC; if(SiS_Pr->ChipType < SIS_661) { if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { Miscdata |= 0x0C; } } } SiS_SetRegByte(SiS_Pr->SiS_P3c2,Miscdata); } /*********************************************/ /* CRTC */ /*********************************************/ static void SiS_SetCRTCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char CRTCdata; unsigned short i; /* Unlock CRTC */ SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x11,0x7f); for(i = 0; i <= 0x18; i++) { CRTCdata = SiS_Pr->SiS_StandTable[StandTableIndex].CRTC[i]; SiS_SetReg(SiS_Pr->SiS_P3d4,i,CRTCdata); } if(SiS_Pr->ChipType >= SIS_661) { SiS_OpenCRTC(SiS_Pr); for(i = 0x13; i <= 0x14; i++) { CRTCdata = SiS_Pr->SiS_StandTable[StandTableIndex].CRTC[i]; SiS_SetReg(SiS_Pr->SiS_P3d4,i,CRTCdata); } } else if( ( (SiS_Pr->ChipType == SIS_630) || (SiS_Pr->ChipType == SIS_730) ) && (SiS_Pr->ChipRevision >= 0x30) ) { if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) { if(SiS_Pr->SiS_VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) { SiS_SetReg(SiS_Pr->SiS_P3d4,0x18,0xFE); } } } } /*********************************************/ /* ATT */ /*********************************************/ static void SiS_SetATTRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char ARdata; unsigned short i; for(i = 0; i <= 0x13; i++) { ARdata = SiS_Pr->SiS_StandTable[StandTableIndex].ATTR[i]; if(i == 0x13) { /* Pixel shift. If screen on LCD or TV is shifted left or right, * this might be the cause. */ if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) ARdata = 0; } if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { if(SiS_Pr->SiS_VBInfo & SetCRT2ToTV) { if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) ARdata = 0; } } } if(SiS_Pr->ChipType >= SIS_661) { if(SiS_Pr->SiS_VBInfo & (SetCRT2ToTV | SetCRT2ToLCD)) { if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) ARdata = 0; } } else if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) { if(SiS_Pr->ChipType >= SIS_315H) { if(IS_SIS550650740660) { /* 315, 330 don't do this */ if(SiS_Pr->SiS_VBType & VB_SIS30xB) { if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) ARdata = 0; } else { ARdata = 0; } } } else { if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) ARdata = 0; } } } SiS_GetRegByte(SiS_Pr->SiS_P3da); /* reset 3da */ SiS_SetRegByte(SiS_Pr->SiS_P3c0,i); /* set index */ SiS_SetRegByte(SiS_Pr->SiS_P3c0,ARdata); /* set data */ } SiS_GetRegByte(SiS_Pr->SiS_P3da); /* reset 3da */ SiS_SetRegByte(SiS_Pr->SiS_P3c0,0x14); /* set index */ SiS_SetRegByte(SiS_Pr->SiS_P3c0,0x00); /* set data */ SiS_GetRegByte(SiS_Pr->SiS_P3da); SiS_SetRegByte(SiS_Pr->SiS_P3c0,0x20); /* Enable Attribute */ SiS_GetRegByte(SiS_Pr->SiS_P3da); } /*********************************************/ /* GRC */ /*********************************************/ static void SiS_SetGRCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex) { unsigned char GRdata; unsigned short i; for(i = 0; i <= 0x08; i++) { GRdata = SiS_Pr->SiS_StandTable[StandTableIndex].GRC[i]; SiS_SetReg(SiS_Pr->SiS_P3ce,i,GRdata); } if(SiS_Pr->SiS_ModeType > ModeVGA) { /* 256 color disable */ SiS_SetRegAND(SiS_Pr->SiS_P3ce,0x05,0xBF); } } /*********************************************/ /* CLEAR EXTENDED REGISTERS */ /*********************************************/ static void SiS_ClearExt1Regs(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { unsigned short i; for(i = 0x0A; i <= 0x0E; i++) { SiS_SetReg(SiS_Pr->SiS_P3c4,i,0x00); } if(SiS_Pr->ChipType >= SIS_315H) { SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x37,0xFE); if(ModeNo <= 0x13) { if(ModeNo == 0x06 || ModeNo >= 0x0e) { SiS_SetReg(SiS_Pr->SiS_P3c4,0x0e,0x20); } } } } /*********************************************/ /* RESET VCLK */ /*********************************************/ static void SiS_ResetCRT1VCLK(struct SiS_Private *SiS_Pr) { if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->ChipType < SIS_661) { if(SiS_Pr->SiS_IF_DEF_LVDS == 0) return; } } else { if((SiS_Pr->SiS_IF_DEF_LVDS == 0) && (!(SiS_Pr->SiS_VBType & VB_SIS30xBLV)) ) { return; } } SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x31,0xcf,0x20); SiS_SetReg(SiS_Pr->SiS_P3c4,0x2B,SiS_Pr->SiS_VCLKData[1].SR2B); SiS_SetReg(SiS_Pr->SiS_P3c4,0x2C,SiS_Pr->SiS_VCLKData[1].SR2C); SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x80); SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x31,0xcf,0x10); SiS_SetReg(SiS_Pr->SiS_P3c4,0x2B,SiS_Pr->SiS_VCLKData[0].SR2B); SiS_SetReg(SiS_Pr->SiS_P3c4,0x2C,SiS_Pr->SiS_VCLKData[0].SR2C); SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x80); } /*********************************************/ /* SYNC */ /*********************************************/ static void SiS_SetCRT1Sync(struct SiS_Private *SiS_Pr, unsigned short RRTI) { unsigned short sync; if(SiS_Pr->UseCustomMode) { sync = SiS_Pr->CInfoFlag >> 8; } else { sync = SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag >> 8; } sync &= 0xC0; sync |= 0x2f; SiS_SetRegByte(SiS_Pr->SiS_P3c2,sync); } /*********************************************/ /* CRTC/2 */ /*********************************************/ static void SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RRTI) { unsigned short temp, i, j, modeflag; unsigned char *crt1data = NULL; modeflag = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); if(SiS_Pr->UseCustomMode) { crt1data = &SiS_Pr->CCRT1CRTC[0]; } else { temp = SiS_GetRefCRT1CRTC(SiS_Pr, RRTI, SiS_Pr->SiS_UseWide); /* Alternate for 1600x1200 LCDA */ if((temp == 0x20) && (SiS_Pr->Alternate1600x1200)) temp = 0x57; crt1data = (unsigned char *)&SiS_Pr->SiS_CRT1Table[temp].CR[0]; } /* unlock cr0-7 */ SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x11,0x7f); for(i = 0, j = 0; i <= 7; i++, j++) { SiS_SetReg(SiS_Pr->SiS_P3d4,j,crt1data[i]); } for(j = 0x10; i <= 10; i++, j++) { SiS_SetReg(SiS_Pr->SiS_P3d4,j,crt1data[i]); } for(j = 0x15; i <= 12; i++, j++) { SiS_SetReg(SiS_Pr->SiS_P3d4,j,crt1data[i]); } for(j = 0x0A; i <= 15; i++, j++) { SiS_SetReg(SiS_Pr->SiS_P3c4,j,crt1data[i]); } SiS_SetReg(SiS_Pr->SiS_P3c4,0x0E,crt1data[16] & 0xE0); temp = (crt1data[16] & 0x01) << 5; if(modeflag & DoubleScanMode) temp |= 0x80; SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x09,0x5F,temp); if(SiS_Pr->SiS_ModeType > ModeVGA) { SiS_SetReg(SiS_Pr->SiS_P3d4,0x14,0x4F); } #ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == XGI_20) { SiS_SetReg(SiS_Pr->SiS_P3d4,0x04,crt1data[4] - 1); if(!(temp = crt1data[5] & 0x1f)) { SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x0c,0xfb); } SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x05,0xe0,((temp - 1) & 0x1f)); temp = (crt1data[16] >> 5) + 3; if(temp > 7) temp -= 7; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0e,0x1f,(temp << 5)); } #endif } /*********************************************/ /* OFFSET & PITCH */ /*********************************************/ /* (partly overruled by SetPitch() in XF86) */ /*********************************************/ static void SiS_SetCRT1Offset(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RRTI) { unsigned short temp, DisplayUnit, infoflag; if(SiS_Pr->UseCustomMode) { infoflag = SiS_Pr->CInfoFlag; } else { infoflag = SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag; } DisplayUnit = SiS_GetOffset(SiS_Pr, ModeNo, ModeIdIndex, RRTI); temp = (DisplayUnit >> 8) & 0x0f; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0E,0xF0,temp); SiS_SetReg(SiS_Pr->SiS_P3d4,0x13,DisplayUnit & 0xFF); if(infoflag & InterlaceMode) DisplayUnit >>= 1; DisplayUnit <<= 5; temp = (DisplayUnit >> 8) + 1; if(DisplayUnit & 0xff) temp++; if(SiS_Pr->ChipType == XGI_20) { if(ModeNo == 0x4a || ModeNo == 0x49) temp--; } SiS_SetReg(SiS_Pr->SiS_P3c4,0x10,temp); } /*********************************************/ /* VCLK */ /*********************************************/ static void SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RRTI) { unsigned short index = 0, clka, clkb; if(SiS_Pr->UseCustomMode) { clka = SiS_Pr->CSR2B; clkb = SiS_Pr->CSR2C; } else { index = SiS_GetVCLK2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RRTI); if((SiS_Pr->SiS_VBType & VB_SIS30xBLV) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { /* Alternate for 1600x1200 LCDA */ if((index == 0x21) && (SiS_Pr->Alternate1600x1200)) index = 0x72; clka = SiS_Pr->SiS_VBVCLKData[index].Part4_A; clkb = SiS_Pr->SiS_VBVCLKData[index].Part4_B; } else { clka = SiS_Pr->SiS_VCLKData[index].SR2B; clkb = SiS_Pr->SiS_VCLKData[index].SR2C; } } SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x31,0xCF); SiS_SetReg(SiS_Pr->SiS_P3c4,0x2b,clka); SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,clkb); if(SiS_Pr->ChipType >= SIS_315H) { #ifdef CONFIG_FB_SIS_315 SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x01); if(SiS_Pr->ChipType == XGI_20) { unsigned short mf = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); if(mf & HalfDCLK) { SiS_SetReg(SiS_Pr->SiS_P3c4,0x2b,SiS_GetReg(SiS_Pr->SiS_P3c4,0x2b)); clkb = SiS_GetReg(SiS_Pr->SiS_P3c4,0x2c); clkb = (((clkb & 0x1f) << 1) + 1) | (clkb & 0xe0); SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,clkb); } } #endif } else { SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x80); } } /*********************************************/ /* FIFO */ /*********************************************/ #ifdef CONFIG_FB_SIS_300 void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, unsigned short *idx2) { unsigned short temp1, temp2; static const unsigned char ThTiming[8] = { 1, 2, 2, 3, 0, 1, 1, 2 }; temp1 = temp2 = (SiS_GetReg(SiS_Pr->SiS_P3c4,0x18) & 0x62) >> 1; (*idx2) = (unsigned short)(ThTiming[((temp2 >> 3) | temp1) & 0x07]); (*idx1) = (unsigned short)(SiS_GetReg(SiS_Pr->SiS_P3c4,0x16) >> 6) & 0x03; (*idx1) |= (unsigned short)(((SiS_GetReg(SiS_Pr->SiS_P3c4,0x14) >> 4) & 0x0c)); (*idx1) <<= 1; } static unsigned short SiS_GetFIFOThresholdA300(unsigned short idx1, unsigned short idx2) { static const unsigned char ThLowA[8 * 3] = { 61, 3,52, 5,68, 7,100,11, 43, 3,42, 5,54, 7, 78,11, 34, 3,37, 5,47, 7, 67,11 }; return (unsigned short)((ThLowA[idx1 + 1] * idx2) + ThLowA[idx1]); } unsigned short SiS_GetFIFOThresholdB300(unsigned short idx1, unsigned short idx2) { static const unsigned char ThLowB[8 * 3] = { 81, 4,72, 6,88, 8,120,12, 55, 4,54, 6,66, 8, 90,12, 42, 4,45, 6,55, 8, 75,12 }; return (unsigned short)((ThLowB[idx1 + 1] * idx2) + ThLowB[idx1]); } static unsigned short SiS_DoCalcDelay(struct SiS_Private *SiS_Pr, unsigned short MCLK, unsigned short VCLK, unsigned short colordepth, unsigned short key) { unsigned short idx1, idx2; unsigned int longtemp = VCLK * colordepth; SiS_GetFIFOThresholdIndex300(SiS_Pr, &idx1, &idx2); if(key == 0) { longtemp *= SiS_GetFIFOThresholdA300(idx1, idx2); } else { longtemp *= SiS_GetFIFOThresholdB300(idx1, idx2); } idx1 = longtemp % (MCLK * 16); longtemp /= (MCLK * 16); if(idx1) longtemp++; return (unsigned short)longtemp; } static unsigned short SiS_CalcDelay(struct SiS_Private *SiS_Pr, unsigned short VCLK, unsigned short colordepth, unsigned short MCLK) { unsigned short temp1, temp2; temp2 = SiS_DoCalcDelay(SiS_Pr, MCLK, VCLK, colordepth, 0); temp1 = SiS_DoCalcDelay(SiS_Pr, MCLK, VCLK, colordepth, 1); if(temp1 < 4) temp1 = 4; temp1 -= 4; if(temp2 < temp1) temp2 = temp1; return temp2; } static void SiS_SetCRT1FIFO_300(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short RefreshRateTableIndex) { unsigned short ThresholdLow = 0; unsigned short temp, index, VCLK, MCLK, colorth; static const unsigned short colortharray[6] = { 1, 1, 2, 2, 3, 4 }; if(ModeNo > 0x13) { /* Get VCLK */ if(SiS_Pr->UseCustomMode) { VCLK = SiS_Pr->CSRClock; } else { index = SiS_GetRefCRTVCLK(SiS_Pr, RefreshRateTableIndex, SiS_Pr->SiS_UseWide); VCLK = SiS_Pr->SiS_VCLKData[index].CLOCK; } /* Get half colordepth */ colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)]; /* Get MCLK */ index = SiS_GetReg(SiS_Pr->SiS_P3c4,0x3A) & 0x07; MCLK = SiS_Pr->SiS_MCLKData_0[index].CLOCK; temp = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35) & 0xc3; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x16,0x3c,temp); do { ThresholdLow = SiS_CalcDelay(SiS_Pr, VCLK, colorth, MCLK) + 1; if(ThresholdLow < 0x13) break; SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x16,0xfc); ThresholdLow = 0x13; temp = SiS_GetReg(SiS_Pr->SiS_P3c4,0x16) >> 6; if(!temp) break; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x16,0x3f,((temp - 1) << 6)); } while(0); } else ThresholdLow = 2; /* Write CRT/CPU threshold low, CRT/Engine threshold high */ temp = (ThresholdLow << 4) | 0x0f; SiS_SetReg(SiS_Pr->SiS_P3c4,0x08,temp); temp = (ThresholdLow & 0x10) << 1; if(ModeNo > 0x13) temp |= 0x40; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0f,0x9f,temp); /* What is this? */ SiS_SetReg(SiS_Pr->SiS_P3c4,0x3B,0x09); /* Write CRT/CPU threshold high */ temp = ThresholdLow + 3; if(temp > 0x0f) temp = 0x0f; SiS_SetReg(SiS_Pr->SiS_P3c4,0x09,temp); } unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index) { static const unsigned char LatencyFactor[] = { 97, 88, 86, 79, 77, 0, /* 64 bit BQ=2 */ 0, 87, 85, 78, 76, 54, /* 64 bit BQ=1 */ 97, 88, 86, 79, 77, 0, /* 128 bit BQ=2 */ 0, 79, 77, 70, 68, 48, /* 128 bit BQ=1 */ 80, 72, 69, 63, 61, 0, /* 64 bit BQ=2 */ 0, 70, 68, 61, 59, 37, /* 64 bit BQ=1 */ 86, 77, 75, 68, 66, 0, /* 128 bit BQ=2 */ 0, 68, 66, 59, 57, 37 /* 128 bit BQ=1 */ }; static const unsigned char LatencyFactor730[] = { 69, 63, 61, 86, 79, 77, 103, 96, 94, 120,113,111, 137,130,128 }; if(SiS_Pr->ChipType == SIS_730) { return (unsigned short)LatencyFactor730[index]; } else { return (unsigned short)LatencyFactor[index]; } } static unsigned short SiS_CalcDelay2(struct SiS_Private *SiS_Pr, unsigned char key) { unsigned short index; if(SiS_Pr->ChipType == SIS_730) { index = ((key & 0x0f) * 3) + ((key & 0xc0) >> 6); } else { index = (key & 0xe0) >> 5; if(key & 0x10) index += 6; if(!(key & 0x01)) index += 24; if(SiS_GetReg(SiS_Pr->SiS_P3c4,0x14) & 0x80) index += 12; } return SiS_GetLatencyFactor630(SiS_Pr, index); } static void SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short RefreshRateTableIndex) { unsigned short ThresholdLow = 0; unsigned short i, data, VCLK, MCLK16, colorth = 0; unsigned int templ, datal; const unsigned char *queuedata = NULL; static const unsigned char FQBQData[21] = { 0x01,0x21,0x41,0x61,0x81, 0x31,0x51,0x71,0x91,0xb1, 0x00,0x20,0x40,0x60,0x80, 0x30,0x50,0x70,0x90,0xb0, 0xff }; static const unsigned char FQBQData730[16] = { 0x34,0x74,0xb4, 0x23,0x63,0xa3, 0x12,0x52,0x92, 0x01,0x41,0x81, 0x00,0x40,0x80, 0xff }; static const unsigned short colortharray[6] = { 1, 1, 2, 2, 3, 4 }; i = 0; if(ModeNo > 0x13) { /* Get VCLK */ if(SiS_Pr->UseCustomMode) { VCLK = SiS_Pr->CSRClock; } else { data = SiS_GetRefCRTVCLK(SiS_Pr, RefreshRateTableIndex, SiS_Pr->SiS_UseWide); VCLK = SiS_Pr->SiS_VCLKData[data].CLOCK; } /* Get MCLK * 16 */ data = SiS_GetReg(SiS_Pr->SiS_P3c4,0x1A) & 0x07; MCLK16 = SiS_Pr->SiS_MCLKData_0[data].CLOCK * 16; /* Get half colordepth */ colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)]; if(SiS_Pr->ChipType == SIS_730) { queuedata = &FQBQData730[0]; } else { queuedata = &FQBQData[0]; } do { templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth; datal = templ % MCLK16; templ = (templ / MCLK16) + 1; if(datal) templ++; if(templ > 0x13) { if(queuedata[i + 1] == 0xFF) { ThresholdLow = 0x13; break; } i++; } else { ThresholdLow = templ; break; } } while(queuedata[i] != 0xFF); } else { if(SiS_Pr->ChipType != SIS_730) i = 9; ThresholdLow = 0x02; } /* Write CRT/CPU threshold low, CRT/Engine threshold high */ data = ((ThresholdLow & 0x0f) << 4) | 0x0f; SiS_SetReg(SiS_Pr->SiS_P3c4,0x08,data); data = (ThresholdLow & 0x10) << 1; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xDF,data); /* What is this? */ SiS_SetReg(SiS_Pr->SiS_P3c4,0x3B,0x09); /* Write CRT/CPU threshold high (gap = 3) */ data = ThresholdLow + 3; if(data > 0x0f) data = 0x0f; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x09,0x80,data); /* Write foreground and background queue */ templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); if(SiS_Pr->ChipType == SIS_730) { templ &= 0xfffff9ff; templ |= ((queuedata[i] & 0xc0) << 3); } else { templ &= 0xf0ffffff; if( (ModeNo <= 0x13) && (SiS_Pr->ChipType == SIS_630) && (SiS_Pr->ChipRevision >= 0x30) ) { templ |= 0x0b000000; } else { templ |= ((queuedata[i] & 0xf0) << 20); } } sisfb_write_nbridge_pci_dword(SiS_Pr, 0x50, templ); templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xA0); /* GUI grant timer (PCI config 0xA3) */ if(SiS_Pr->ChipType == SIS_730) { templ &= 0x00ffffff; datal = queuedata[i] << 8; templ |= (((datal & 0x0f00) | ((datal & 0x3000) >> 8)) << 20); } else { templ &= 0xf0ffffff; templ |= ((queuedata[i] & 0x0f) << 24); } sisfb_write_nbridge_pci_dword(SiS_Pr, 0xA0, templ); } #endif /* CONFIG_FB_SIS_300 */ #ifdef CONFIG_FB_SIS_315 static void SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { unsigned short modeflag; /* disable auto-threshold */ SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x3D,0xFE); modeflag = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); SiS_SetReg(SiS_Pr->SiS_P3c4,0x08,0xAE); SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x09,0xF0); if(ModeNo > 0x13) { if(SiS_Pr->ChipType >= XGI_20) { SiS_SetReg(SiS_Pr->SiS_P3c4,0x08,0x34); SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x3D,0x01); } else if(SiS_Pr->ChipType >= SIS_661) { if(!(modeflag & HalfDCLK)) { SiS_SetReg(SiS_Pr->SiS_P3c4,0x08,0x34); SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x3D,0x01); } } else { if((!(modeflag & DoubleScanMode)) || (!(modeflag & HalfDCLK))) { SiS_SetReg(SiS_Pr->SiS_P3c4,0x08,0x34); SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x3D,0x01); } } } } #endif /*********************************************/ /* MODE REGISTERS */ /*********************************************/ static void SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short RefreshRateTableIndex, unsigned short ModeIdIndex) { unsigned short data = 0, VCLK = 0, index = 0; if(ModeNo > 0x13) { if(SiS_Pr->UseCustomMode) { VCLK = SiS_Pr->CSRClock; } else { index = SiS_GetVCLK2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); VCLK = SiS_Pr->SiS_VCLKData[index].CLOCK; } } if(SiS_Pr->ChipType < SIS_315H) { #ifdef CONFIG_FB_SIS_300 if(VCLK > 150) data |= 0x80; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0x7B,data); data = 0x00; if(VCLK >= 150) data |= 0x08; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xF7,data); #endif } else if(SiS_Pr->ChipType < XGI_20) { #ifdef CONFIG_FB_SIS_315 if(VCLK >= 166) data |= 0x0c; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); if(VCLK >= 166) { SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x1f,0xe7); } #endif } else { #ifdef CONFIG_FB_SIS_315 if(VCLK >= 200) data |= 0x0c; if(SiS_Pr->ChipType == XGI_20) data &= ~0x04; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); if(SiS_Pr->ChipType != XGI_20) { data = SiS_GetReg(SiS_Pr->SiS_P3c4,0x1f) & 0xe7; if(VCLK < 200) data |= 0x10; SiS_SetReg(SiS_Pr->SiS_P3c4,0x1f,data); } #endif } /* DAC speed */ if(SiS_Pr->ChipType >= SIS_661) { SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0xE8,0x10); } else { data = 0x03; if(VCLK >= 260) data = 0x00; else if(VCLK >= 160) data = 0x01; else if(VCLK >= 135) data = 0x02; if(SiS_Pr->ChipType == SIS_540) { if((VCLK == 203) || (VCLK < 234)) data = 0x02; } if(SiS_Pr->ChipType < SIS_315H) { SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0xFC,data); } else { if(SiS_Pr->ChipType > SIS_315PRO) { if(ModeNo > 0x13) data &= 0xfc; } SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0xF8,data); } } } static void SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RRTI) { unsigned short data, infoflag = 0, modeflag, resindex; #ifdef CONFIG_FB_SIS_315 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short data2, data3; #endif modeflag = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); if(SiS_Pr->UseCustomMode) { infoflag = SiS_Pr->CInfoFlag; } else { resindex = SiS_GetResInfo(SiS_Pr, ModeNo, ModeIdIndex); if(ModeNo > 0x13) { infoflag = SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag; } } /* Disable DPMS */ SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x1F,0x3F); data = 0; if(ModeNo > 0x13) { if(SiS_Pr->SiS_ModeType > ModeEGA) { data |= 0x02; data |= ((SiS_Pr->SiS_ModeType - ModeVGA) << 2); } if(infoflag & InterlaceMode) data |= 0x20; } SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x06,0xC0,data); if(SiS_Pr->ChipType != SIS_300) { data = 0; if(infoflag & InterlaceMode) { /* data = (Hsync / 8) - ((Htotal / 8) / 2) + 3 */ int hrs = (SiS_GetReg(SiS_Pr->SiS_P3d4,0x04) | ((SiS_GetReg(SiS_Pr->SiS_P3c4,0x0b) & 0xc0) << 2)) - 3; int hto = (SiS_GetReg(SiS_Pr->SiS_P3d4,0x00) | ((SiS_GetReg(SiS_Pr->SiS_P3c4,0x0b) & 0x03) << 8)) + 5; data = hrs - (hto >> 1) + 3; } SiS_SetReg(SiS_Pr->SiS_P3d4,0x19,data); SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x1a,0xFC,((data >> 8) & 0x03)); } if(modeflag & HalfDCLK) { SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x01,0x08); } data = 0; if(modeflag & LineCompareOff) data = 0x08; if(SiS_Pr->ChipType == SIS_300) { SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xF7,data); } else { if(SiS_Pr->ChipType >= XGI_20) data |= 0x20; if(SiS_Pr->SiS_ModeType == ModeEGA) { if(ModeNo > 0x13) { data |= 0x40; } } SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xB7,data); } #ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x31,0xfb); } if(SiS_Pr->ChipType == SIS_315PRO) { data = SiS_Pr->SiS_SR15[(2 * 4) + SiS_Get310DRAMType(SiS_Pr)]; if(SiS_Pr->SiS_ModeType == ModeText) { data &= 0xc7; } else { data2 = SiS_GetOffset(SiS_Pr, ModeNo, ModeIdIndex, RRTI) >> 1; if(infoflag & InterlaceMode) data2 >>= 1; data3 = SiS_GetColorDepth(SiS_Pr, ModeNo, ModeIdIndex) >> 1; if(data3) data2 /= data3; if(data2 >= 0x50) { data &= 0x0f; data |= 0x50; } } SiS_SetReg(SiS_Pr->SiS_P3c4,0x17,data); } else if((SiS_Pr->ChipType == SIS_330) || (SiS_Pr->SiS_SysFlags & SF_760LFB)) { data = SiS_Get310DRAMType(SiS_Pr); if(SiS_Pr->ChipType == SIS_330) { data = SiS_Pr->SiS_SR15[(2 * 4) + data]; } else { if(SiS_Pr->SiS_ROMNew) data = ROMAddr[0xf6]; else if(SiS_Pr->SiS_UseROM) data = ROMAddr[0x100 + data]; else data = 0xba; } if(SiS_Pr->SiS_ModeType <= ModeEGA) { data &= 0xc7; } else { if(SiS_Pr->UseCustomMode) { data2 = SiS_Pr->CSRClock; } else { data2 = SiS_GetVCLK2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RRTI); data2 = SiS_Pr->SiS_VCLKData[data2].CLOCK; } data3 = SiS_GetColorDepth(SiS_Pr, ModeNo, ModeIdIndex) >> 1; if(data3) data2 *= data3; data2 = ((unsigned int)(SiS_GetMCLK(SiS_Pr) * 1024)) / data2; if(SiS_Pr->ChipType == SIS_330) { if(SiS_Pr->SiS_ModeType != Mode16Bpp) { if (data2 >= 0x19c) data = 0xba; else if(data2 >= 0x140) data = 0x7a; else if(data2 >= 0x101) data = 0x3a; else if(data2 >= 0xf5) data = 0x32; else if(data2 >= 0xe2) data = 0x2a; else if(data2 >= 0xc4) data = 0x22; else if(data2 >= 0xac) data = 0x1a; else if(data2 >= 0x9e) data = 0x12; else if(data2 >= 0x8e) data = 0x0a; else data = 0x02; } else { if(data2 >= 0x127) data = 0xba; else data = 0x7a; } } else { /* 76x+LFB */ if (data2 >= 0x190) data = 0xba; else if(data2 >= 0xff) data = 0x7a; else if(data2 >= 0xd3) data = 0x3a; else if(data2 >= 0xa9) data = 0x1a; else if(data2 >= 0x93) data = 0x0a; else data = 0x02; } } SiS_SetReg(SiS_Pr->SiS_P3c4,0x17,data); } /* XGI: Nothing. */ /* TODO: Check SiS340 */ #endif data = 0x60; if(SiS_Pr->SiS_ModeType != ModeText) { data ^= 0x60; if(SiS_Pr->SiS_ModeType != ModeEGA) { data ^= 0xA0; } } SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x21,0x1F,data); SiS_SetVCLKState(SiS_Pr, ModeNo, RRTI, ModeIdIndex); #ifdef CONFIG_FB_SIS_315 if(((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->ChipType < SIS_661)) || (SiS_Pr->ChipType == XGI_40)) { if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) { SiS_SetReg(SiS_Pr->SiS_P3d4,0x52,0x2c); } else { SiS_SetReg(SiS_Pr->SiS_P3d4,0x52,0x6c); } } else if(SiS_Pr->ChipType == XGI_20) { if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) { SiS_SetReg(SiS_Pr->SiS_P3d4,0x52,0x33); } else { SiS_SetReg(SiS_Pr->SiS_P3d4,0x52,0x73); } SiS_SetReg(SiS_Pr->SiS_P3d4,0x51,0x02); } #endif } #ifdef CONFIG_FB_SIS_315 static void SiS_SetupDualChip(struct SiS_Private *SiS_Pr) { #if 0 /* TODO: Find out about IOAddress2 */ SISIOADDRESS P2_3c2 = SiS_Pr->IOAddress2 + 0x12; SISIOADDRESS P2_3c4 = SiS_Pr->IOAddress2 + 0x14; SISIOADDRESS P2_3ce = SiS_Pr->IOAddress2 + 0x1e; int i; if((SiS_Pr->ChipRevision != 0) || (!(SiS_GetReg(SiS_Pr->SiS_P3c4,0x3a) & 0x04))) return; for(i = 0; i <= 4; i++) { /* SR00 - SR04 */ SiS_SetReg(P2_3c4,i,SiS_GetReg(SiS_Pr->SiS_P3c4,i)); } for(i = 0; i <= 8; i++) { /* GR00 - GR08 */ SiS_SetReg(P2_3ce,i,SiS_GetReg(SiS_Pr->SiS_P3ce,i)); } SiS_SetReg(P2_3c4,0x05,0x86); SiS_SetReg(P2_3c4,0x06,SiS_GetReg(SiS_Pr->SiS_P3c4,0x06)); /* SR06 */ SiS_SetReg(P2_3c4,0x21,SiS_GetReg(SiS_Pr->SiS_P3c4,0x21)); /* SR21 */ SiS_SetRegByte(P2_3c2,SiS_GetRegByte(SiS_Pr->SiS_P3cc)); /* MISC */ SiS_SetReg(P2_3c4,0x05,0x00); #endif } #endif /*********************************************/ /* LOAD DAC */ /*********************************************/ static void SiS_WriteDAC(struct SiS_Private *SiS_Pr, SISIOADDRESS DACData, unsigned short shiftflag, unsigned short dl, unsigned short ah, unsigned short al, unsigned short dh) { unsigned short d1, d2, d3; switch(dl) { case 0: d1 = dh; d2 = ah; d3 = al; break; case 1: d1 = ah; d2 = al; d3 = dh; break; default: d1 = al; d2 = dh; d3 = ah; } SiS_SetRegByte(DACData, (d1 << shiftflag)); SiS_SetRegByte(DACData, (d2 << shiftflag)); SiS_SetRegByte(DACData, (d3 << shiftflag)); } void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { unsigned short data, data2, time, i, j, k, m, n, o; unsigned short si, di, bx, sf; SISIOADDRESS DACAddr, DACData; const unsigned char *table = NULL; data = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex) & DACInfoFlag; j = time = 64; if(data == 0x00) table = SiS_MDA_DAC; else if(data == 0x08) table = SiS_CGA_DAC; else if(data == 0x10) table = SiS_EGA_DAC; else if(data == 0x18) { j = 16; time = 256; table = SiS_VGA_DAC; } if( ( (SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) && /* 301B-DH LCD */ (SiS_Pr->SiS_VBType & VB_NoLCD) ) || (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) || /* LCDA */ (!(SiS_Pr->SiS_SetFlag & ProgrammingCRT2)) ) { /* Programming CRT1 */ SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); DACAddr = SiS_Pr->SiS_P3c8; DACData = SiS_Pr->SiS_P3c9; sf = 0; } else { DACAddr = SiS_Pr->SiS_Part5Port; DACData = SiS_Pr->SiS_Part5Port + 1; sf = 2; } SiS_SetRegByte(DACAddr,0x00); for(i = 0; i < j; i++) { data = table[i]; for(k = 0; k < 3; k++) { data2 = 0; if(data & 0x01) data2 += 0x2A; if(data & 0x02) data2 += 0x15; SiS_SetRegByte(DACData, (data2 << sf)); data >>= 2; } } if(time == 256) { for(i = 16; i < 32; i++) { data = table[i] << sf; for(k = 0; k < 3; k++) SiS_SetRegByte(DACData, data); } si = 32; for(m = 0; m < 9; m++) { di = si; bx = si + 4; for(n = 0; n < 3; n++) { for(o = 0; o < 5; o++) { SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[bx], table[si]); si++; } si -= 2; for(o = 0; o < 3; o++) { SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[si], table[bx]); si--; } } /* for n < 3 */ si += 5; } /* for m < 9 */ } } /*********************************************/ /* SET CRT1 REGISTER GROUP */ /*********************************************/ static void SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { unsigned short StandTableIndex, RefreshRateTableIndex; SiS_Pr->SiS_CRT1Mode = ModeNo; StandTableIndex = SiS_GetModePtr(SiS_Pr, ModeNo, ModeIdIndex); if(SiS_Pr->SiS_SetFlag & LowModeTests) { if(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2)) { SiS_DisableBridge(SiS_Pr); } } SiS_ResetSegmentRegisters(SiS_Pr); SiS_SetSeqRegs(SiS_Pr, StandTableIndex); SiS_SetMiscRegs(SiS_Pr, StandTableIndex); SiS_SetCRTCRegs(SiS_Pr, StandTableIndex); SiS_SetATTRegs(SiS_Pr, StandTableIndex); SiS_SetGRCRegs(SiS_Pr, StandTableIndex); SiS_ClearExt1Regs(SiS_Pr, ModeNo); SiS_ResetCRT1VCLK(SiS_Pr); SiS_Pr->SiS_SelectCRT2Rate = 0; SiS_Pr->SiS_SetFlag &= (~ProgrammingCRT2); if(SiS_Pr->SiS_VBInfo & SetSimuScanMode) { if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) { SiS_Pr->SiS_SetFlag |= ProgrammingCRT2; } } if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { SiS_Pr->SiS_SetFlag |= ProgrammingCRT2; } RefreshRateTableIndex = SiS_GetRatePtr(SiS_Pr, ModeNo, ModeIdIndex); if(!(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { SiS_Pr->SiS_SetFlag &= ~ProgrammingCRT2; } if(RefreshRateTableIndex != 0xFFFF) { SiS_SetCRT1Sync(SiS_Pr, RefreshRateTableIndex); SiS_SetCRT1CRTC(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); SiS_SetCRT1Offset(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); SiS_SetCRT1VCLK(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); } switch(SiS_Pr->ChipType) { #ifdef CONFIG_FB_SIS_300 case SIS_300: SiS_SetCRT1FIFO_300(SiS_Pr, ModeNo, RefreshRateTableIndex); break; case SIS_540: case SIS_630: case SIS_730: SiS_SetCRT1FIFO_630(SiS_Pr, ModeNo, RefreshRateTableIndex); break; #endif default: #ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == XGI_20) { unsigned char sr2b = 0, sr2c = 0; switch(ModeNo) { case 0x00: case 0x01: sr2b = 0x4e; sr2c = 0xe9; break; case 0x04: case 0x05: case 0x0d: sr2b = 0x1b; sr2c = 0xe3; break; } if(sr2b) { SiS_SetReg(SiS_Pr->SiS_P3c4,0x2b,sr2b); SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,sr2c); SiS_SetRegByte(SiS_Pr->SiS_P3c2,(SiS_GetRegByte(SiS_Pr->SiS_P3cc) | 0x0c)); } } SiS_SetCRT1FIFO_310(SiS_Pr, ModeNo, ModeIdIndex); #endif break; } SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); #ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == XGI_40) { SiS_SetupDualChip(SiS_Pr); } #endif SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); if(SiS_Pr->SiS_flag_clearbuffer) { SiS_ClearBuffer(SiS_Pr, ModeNo); } if(!(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2 | SetCRT2ToLCDA))) { SiS_WaitRetrace1(SiS_Pr); SiS_DisplayOn(SiS_Pr); } } /*********************************************/ /* HELPER: VIDEO BRIDGE PROG CLK */ /*********************************************/ static void SiS_InitVB(struct SiS_Private *SiS_Pr) { unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; SiS_Pr->Init_P4_0E = 0; if(SiS_Pr->SiS_ROMNew) { SiS_Pr->Init_P4_0E = ROMAddr[0x82]; } else if(SiS_Pr->ChipType >= XGI_40) { if(SiS_Pr->SiS_XGIROM) { SiS_Pr->Init_P4_0E = ROMAddr[0x80]; } } } static void SiS_ResetVB(struct SiS_Private *SiS_Pr) { #ifdef CONFIG_FB_SIS_315 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short temp; /* VB programming clock */ if(SiS_Pr->SiS_UseROM) { if(SiS_Pr->ChipType < SIS_330) { temp = ROMAddr[VB310Data_1_2_Offset] | 0x40; if(SiS_Pr->SiS_ROMNew) temp = ROMAddr[0x80] | 0x40; SiS_SetReg(SiS_Pr->SiS_Part1Port,0x02,temp); } else if(SiS_Pr->ChipType >= SIS_661 && SiS_Pr->ChipType < XGI_20) { temp = ROMAddr[0x7e] | 0x40; if(SiS_Pr->SiS_ROMNew) temp = ROMAddr[0x80] | 0x40; SiS_SetReg(SiS_Pr->SiS_Part1Port,0x02,temp); } } else if(SiS_Pr->ChipType >= XGI_40) { temp = 0x40; if(SiS_Pr->SiS_XGIROM) temp |= ROMAddr[0x7e]; /* Can we do this on any chipset? */ SiS_SetReg(SiS_Pr->SiS_Part1Port,0x02,temp); } #endif } /*********************************************/ /* HELPER: SET VIDEO/CAPTURE REGISTERS */ /*********************************************/ static void SiS_StrangeStuff(struct SiS_Private *SiS_Pr) { /* SiS65x and XGI set up some sort of "lock mode" for text * which locks CRT2 in some way to CRT1 timing. Disable * this here. */ #ifdef CONFIG_FB_SIS_315 if((IS_SIS651) || (IS_SISM650) || SiS_Pr->ChipType == SIS_340 || SiS_Pr->ChipType == XGI_40) { SiS_SetReg(SiS_Pr->SiS_VidCapt, 0x3f, 0x00); /* Fiddle with capture regs */ SiS_SetReg(SiS_Pr->SiS_VidCapt, 0x00, 0x00); SiS_SetReg(SiS_Pr->SiS_VidPlay, 0x00, 0x86); /* (BIOS does NOT unlock) */ SiS_SetRegAND(SiS_Pr->SiS_VidPlay, 0x30, 0xfe); /* Fiddle with video regs */ SiS_SetRegAND(SiS_Pr->SiS_VidPlay, 0x3f, 0xef); } /* !!! This does not support modes < 0x13 !!! */ #endif } /*********************************************/ /* HELPER: SET AGP TIMING FOR SiS760 */ /*********************************************/ static void SiS_Handle760(struct SiS_Private *SiS_Pr) { #ifdef CONFIG_FB_SIS_315 unsigned int somebase; unsigned char temp1, temp2, temp3; if( (SiS_Pr->ChipType != SIS_760) || ((SiS_GetReg(SiS_Pr->SiS_P3d4, 0x5c) & 0xf8) != 0x80) || (!(SiS_Pr->SiS_SysFlags & SF_760LFB)) || (!(SiS_Pr->SiS_SysFlags & SF_760UMA)) ) return; somebase = sisfb_read_mio_pci_word(SiS_Pr, 0x74); somebase &= 0xffff; if(somebase == 0) return; temp3 = SiS_GetRegByte((somebase + 0x85)) & 0xb7; if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) { temp1 = 0x21; temp2 = 0x03; temp3 |= 0x08; } else { temp1 = 0x25; temp2 = 0x0b; } sisfb_write_nbridge_pci_byte(SiS_Pr, 0x7e, temp1); sisfb_write_nbridge_pci_byte(SiS_Pr, 0x8d, temp2); SiS_SetRegByte((somebase + 0x85), temp3); #endif } /*********************************************/ /* SiSSetMode() */ /*********************************************/ bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; unsigned short RealModeNo, ModeIdIndex; unsigned char backupreg = 0; unsigned short KeepLockReg; SiS_Pr->UseCustomMode = false; SiS_Pr->CRT1UsesCustomMode = false; SiS_Pr->SiS_flag_clearbuffer = 0; if(SiS_Pr->UseCustomMode) { ModeNo = 0xfe; } else { if(!(ModeNo & 0x80)) SiS_Pr->SiS_flag_clearbuffer = 1; ModeNo &= 0x7f; } /* Don't use FSTN mode for CRT1 */ RealModeNo = ModeNo; if(ModeNo == 0x5b) ModeNo = 0x56; SiSInitPtr(SiS_Pr); SiSRegInit(SiS_Pr, BaseAddr); SiS_GetSysFlags(SiS_Pr); SiS_Pr->SiS_VGAINFO = 0x11; KeepLockReg = SiS_GetReg(SiS_Pr->SiS_P3c4,0x05); SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); SiSInitPCIetc(SiS_Pr); SiSSetLVDSetc(SiS_Pr); SiSDetermineROMUsage(SiS_Pr); SiS_UnLockCRT2(SiS_Pr); if(!SiS_Pr->UseCustomMode) { if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false; } else { ModeIdIndex = 0; } SiS_GetVBType(SiS_Pr); /* Init/restore some VB registers */ SiS_InitVB(SiS_Pr); if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { if(SiS_Pr->ChipType >= SIS_315H) { SiS_ResetVB(SiS_Pr); SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x32,0x10); SiS_SetRegOR(SiS_Pr->SiS_Part2Port,0x00,0x0c); backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); } else { backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); } } /* Get VB information (connectors, connected devices) */ SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, (SiS_Pr->UseCustomMode) ? 0 : 1); SiS_SetYPbPr(SiS_Pr); SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex); SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); SiS_SetLowModeTest(SiS_Pr, ModeNo); /* Check memory size (kernel framebuffer driver only) */ if(!SiS_CheckMemorySize(SiS_Pr, ModeNo, ModeIdIndex)) { return false; } SiS_OpenCRTC(SiS_Pr); if(SiS_Pr->UseCustomMode) { SiS_Pr->CRT1UsesCustomMode = true; SiS_Pr->CSRClock_CRT1 = SiS_Pr->CSRClock; SiS_Pr->CModeFlag_CRT1 = SiS_Pr->CModeFlag; } else { SiS_Pr->CRT1UsesCustomMode = false; } /* Set mode on CRT1 */ if( (SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SetCRT2ToLCDA)) || (!(SiS_Pr->SiS_VBInfo & SwitchCRT2)) ) { SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex); } /* Set mode on CRT2 */ if(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2 | SetCRT2ToLCDA)) { if( (SiS_Pr->SiS_VBType & VB_SISVB) || (SiS_Pr->SiS_IF_DEF_LVDS == 1) || (SiS_Pr->SiS_IF_DEF_CH70xx != 0) || (SiS_Pr->SiS_IF_DEF_TRUMPION != 0) ) { SiS_SetCRT2Group(SiS_Pr, RealModeNo); } } SiS_HandleCRT1(SiS_Pr); SiS_StrangeStuff(SiS_Pr); SiS_DisplayOn(SiS_Pr); SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); #ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { if(!(SiS_IsDualEdge(SiS_Pr))) { SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x13,0xfb); } } } #endif if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { if(SiS_Pr->ChipType >= SIS_315H) { #ifdef CONFIG_FB_SIS_315 if(!SiS_Pr->SiS_ROMNew) { if(SiS_IsVAMode(SiS_Pr)) { SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); } else { SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x35,0xFE); } } SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg); if((IS_SIS650) && (SiS_GetReg(SiS_Pr->SiS_P3d4,0x30) & 0xfc)) { if((ModeNo == 0x03) || (ModeNo == 0x10)) { SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x51,0x80); SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x56,0x08); } } if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x30) & SetCRT2ToLCD) { SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x38,0xfc); } #endif } else if((SiS_Pr->ChipType == SIS_630) || (SiS_Pr->ChipType == SIS_730)) { SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg); } } SiS_CloseCRTC(SiS_Pr); SiS_Handle760(SiS_Pr); /* We never lock registers in XF86 */ if(KeepLockReg != 0xA1) SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x00); return true; } #ifndef GETBITSTR #define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l)) #define GENMASK(mask) BITMASK(1?mask,0?mask) #define GETBITS(var,mask) (((var) & GENMASK(mask)) >> (0?mask)) #define GETBITSTR(val,from,to) ((GETBITS(val,from)) << (0?to)) #endif void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth) { int x = 1; /* Fix sync */ SiS_Pr->CCRT1CRTC[0] = ((SiS_Pr->CHTotal >> 3) - 5) & 0xff; /* CR0 */ SiS_Pr->CCRT1CRTC[1] = (SiS_Pr->CHDisplay >> 3) - 1; /* CR1 */ SiS_Pr->CCRT1CRTC[2] = (SiS_Pr->CHBlankStart >> 3) - 1; /* CR2 */ SiS_Pr->CCRT1CRTC[3] = (((SiS_Pr->CHBlankEnd >> 3) - 1) & 0x1F) | 0x80; /* CR3 */ SiS_Pr->CCRT1CRTC[4] = (SiS_Pr->CHSyncStart >> 3) + 3; /* CR4 */ SiS_Pr->CCRT1CRTC[5] = ((((SiS_Pr->CHBlankEnd >> 3) - 1) & 0x20) << 2) | /* CR5 */ (((SiS_Pr->CHSyncEnd >> 3) + 3) & 0x1F); SiS_Pr->CCRT1CRTC[6] = (SiS_Pr->CVTotal - 2) & 0xFF; /* CR6 */ SiS_Pr->CCRT1CRTC[7] = (((SiS_Pr->CVTotal - 2) & 0x100) >> 8) /* CR7 */ | (((SiS_Pr->CVDisplay - 1) & 0x100) >> 7) | (((SiS_Pr->CVSyncStart - x) & 0x100) >> 6) | (((SiS_Pr->CVBlankStart- 1) & 0x100) >> 5) | 0x10 | (((SiS_Pr->CVTotal - 2) & 0x200) >> 4) | (((SiS_Pr->CVDisplay - 1) & 0x200) >> 3) | (((SiS_Pr->CVSyncStart - x) & 0x200) >> 2); SiS_Pr->CCRT1CRTC[16] = ((((SiS_Pr->CVBlankStart - 1) & 0x200) >> 4) >> 5); /* CR9 */ if(depth != 8) { if(SiS_Pr->CHDisplay >= 1600) SiS_Pr->CCRT1CRTC[16] |= 0x60; /* SRE */ else if(SiS_Pr->CHDisplay >= 640) SiS_Pr->CCRT1CRTC[16] |= 0x40; } SiS_Pr->CCRT1CRTC[8] = (SiS_Pr->CVSyncStart - x) & 0xFF; /* CR10 */ SiS_Pr->CCRT1CRTC[9] = ((SiS_Pr->CVSyncEnd - x) & 0x0F) | 0x80; /* CR11 */ SiS_Pr->CCRT1CRTC[10] = (SiS_Pr->CVDisplay - 1) & 0xFF; /* CR12 */ SiS_Pr->CCRT1CRTC[11] = (SiS_Pr->CVBlankStart - 1) & 0xFF; /* CR15 */ SiS_Pr->CCRT1CRTC[12] = (SiS_Pr->CVBlankEnd - 1) & 0xFF; /* CR16 */ SiS_Pr->CCRT1CRTC[13] = /* SRA */ GETBITSTR((SiS_Pr->CVTotal -2), 10:10, 0:0) | GETBITSTR((SiS_Pr->CVDisplay -1), 10:10, 1:1) | GETBITSTR((SiS_Pr->CVBlankStart-1), 10:10, 2:2) | GETBITSTR((SiS_Pr->CVSyncStart -x), 10:10, 3:3) | GETBITSTR((SiS_Pr->CVBlankEnd -1), 8:8, 4:4) | GETBITSTR((SiS_Pr->CVSyncEnd ), 4:4, 5:5) ; SiS_Pr->CCRT1CRTC[14] = /* SRB */ GETBITSTR((SiS_Pr->CHTotal >> 3) - 5, 9:8, 1:0) | GETBITSTR((SiS_Pr->CHDisplay >> 3) - 1, 9:8, 3:2) | GETBITSTR((SiS_Pr->CHBlankStart >> 3) - 1, 9:8, 5:4) | GETBITSTR((SiS_Pr->CHSyncStart >> 3) + 3, 9:8, 7:6) ; SiS_Pr->CCRT1CRTC[15] = /* SRC */ GETBITSTR((SiS_Pr->CHBlankEnd >> 3) - 1, 7:6, 1:0) | GETBITSTR((SiS_Pr->CHSyncEnd >> 3) + 3, 5:5, 2:2) ; } void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { unsigned short modeflag, tempax, tempbx = 0, remaining = 0; unsigned short VGAHDE = SiS_Pr->SiS_VGAHDE; int i, j; /* 1:1 data: use data set by setcrt1crtc() */ if(SiS_Pr->SiS_LCDInfo & LCDPass11) return; modeflag = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); if(modeflag & HalfDCLK) VGAHDE >>= 1; SiS_Pr->CHDisplay = VGAHDE; SiS_Pr->CHBlankStart = VGAHDE; SiS_Pr->CVDisplay = SiS_Pr->SiS_VGAVDE; SiS_Pr->CVBlankStart = SiS_Pr->SiS_VGAVDE; if(SiS_Pr->ChipType < SIS_315H) { #ifdef CONFIG_FB_SIS_300 tempbx = SiS_Pr->SiS_VGAHT; if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempbx = SiS_Pr->PanelHT; } if(modeflag & HalfDCLK) tempbx >>= 1; remaining = tempbx % 8; #endif } else { #ifdef CONFIG_FB_SIS_315 /* OK for LCDA, LVDS */ tempbx = SiS_Pr->PanelHT - SiS_Pr->PanelXRes; tempax = SiS_Pr->SiS_VGAHDE; /* not /2 ! */ if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempax = SiS_Pr->PanelXRes; } tempbx += tempax; if(modeflag & HalfDCLK) tempbx -= VGAHDE; #endif } SiS_Pr->CHTotal = SiS_Pr->CHBlankEnd = tempbx; if(SiS_Pr->ChipType < SIS_315H) { #ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_VGAHDE == SiS_Pr->PanelXRes) { SiS_Pr->CHSyncStart = SiS_Pr->SiS_VGAHDE + ((SiS_Pr->PanelHRS + 1) & ~1); SiS_Pr->CHSyncEnd = SiS_Pr->CHSyncStart + SiS_Pr->PanelHRE; if(modeflag & HalfDCLK) { SiS_Pr->CHSyncStart >>= 1; SiS_Pr->CHSyncEnd >>= 1; } } else if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempax = (SiS_Pr->PanelXRes - SiS_Pr->SiS_VGAHDE) >> 1; tempbx = (SiS_Pr->PanelHRS + 1) & ~1; if(modeflag & HalfDCLK) { tempax >>= 1; tempbx >>= 1; } SiS_Pr->CHSyncStart = (VGAHDE + tempax + tempbx + 7) & ~7; tempax = SiS_Pr->PanelHRE + 7; if(modeflag & HalfDCLK) tempax >>= 1; SiS_Pr->CHSyncEnd = (SiS_Pr->CHSyncStart + tempax) & ~7; } else { SiS_Pr->CHSyncStart = SiS_Pr->SiS_VGAHDE; if(modeflag & HalfDCLK) { SiS_Pr->CHSyncStart >>= 1; tempax = ((SiS_Pr->CHTotal - SiS_Pr->CHSyncStart) / 3) << 1; SiS_Pr->CHSyncEnd = SiS_Pr->CHSyncStart + tempax; } else { SiS_Pr->CHSyncEnd = (SiS_Pr->CHSyncStart + (SiS_Pr->CHTotal / 10) + 7) & ~7; SiS_Pr->CHSyncStart += 8; } } #endif } else { #ifdef CONFIG_FB_SIS_315 tempax = VGAHDE; if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempbx = SiS_Pr->PanelXRes; if(modeflag & HalfDCLK) tempbx >>= 1; tempax += ((tempbx - tempax) >> 1); } tempax += SiS_Pr->PanelHRS; SiS_Pr->CHSyncStart = tempax; tempax += SiS_Pr->PanelHRE; SiS_Pr->CHSyncEnd = tempax; #endif } tempbx = SiS_Pr->PanelVT - SiS_Pr->PanelYRes; tempax = SiS_Pr->SiS_VGAVDE; if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempax = SiS_Pr->PanelYRes; } else if(SiS_Pr->ChipType < SIS_315H) { #ifdef CONFIG_FB_SIS_300 /* Stupid hack for 640x400/320x200 */ if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) { if((tempax + tempbx) == 438) tempbx += 16; } else if((SiS_Pr->SiS_LCDResInfo == Panel_800x600) || (SiS_Pr->SiS_LCDResInfo == Panel_1024x600)) { tempax = 0; tempbx = SiS_Pr->SiS_VGAVT; } #endif } SiS_Pr->CVTotal = SiS_Pr->CVBlankEnd = tempbx + tempax; tempax = SiS_Pr->SiS_VGAVDE; if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempax += (SiS_Pr->PanelYRes - tempax) >> 1; } tempax += SiS_Pr->PanelVRS; SiS_Pr->CVSyncStart = tempax; tempax += SiS_Pr->PanelVRE; SiS_Pr->CVSyncEnd = tempax; if(SiS_Pr->ChipType < SIS_315H) { SiS_Pr->CVSyncStart--; SiS_Pr->CVSyncEnd--; } SiS_CalcCRRegisters(SiS_Pr, 8); SiS_Pr->CCRT1CRTC[15] &= ~0xF8; SiS_Pr->CCRT1CRTC[15] |= (remaining << 4); SiS_Pr->CCRT1CRTC[16] &= ~0xE0; SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x11,0x7f); for(i = 0, j = 0; i <= 7; i++, j++) { SiS_SetReg(SiS_Pr->SiS_P3d4,j,SiS_Pr->CCRT1CRTC[i]); } for(j = 0x10; i <= 10; i++, j++) { SiS_SetReg(SiS_Pr->SiS_P3d4,j,SiS_Pr->CCRT1CRTC[i]); } for(j = 0x15; i <= 12; i++, j++) { SiS_SetReg(SiS_Pr->SiS_P3d4,j,SiS_Pr->CCRT1CRTC[i]); } for(j = 0x0A; i <= 15; i++, j++) { SiS_SetReg(SiS_Pr->SiS_P3c4,j,SiS_Pr->CCRT1CRTC[i]); } tempax = SiS_Pr->CCRT1CRTC[16] & 0xE0; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0E,0x1F,tempax); tempax = (SiS_Pr->CCRT1CRTC[16] & 0x01) << 5; if(modeflag & DoubleScanMode) tempax |= 0x80; SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x09,0x5F,tempax); } void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, int yres, struct fb_var_screeninfo *var, bool writeres ) { unsigned short HRE, HBE, HRS, HBS, HDE, HT; unsigned short VRE, VBE, VRS, VBS, VDE, VT; unsigned char sr_data, cr_data, cr_data2; int A, B, C, D, E, F, temp; sr_data = crdata[14]; /* Horizontal total */ HT = crdata[0] | ((unsigned short)(sr_data & 0x03) << 8); A = HT + 5; /* Horizontal display enable end */ HDE = crdata[1] | ((unsigned short)(sr_data & 0x0C) << 6); E = HDE + 1; /* Horizontal retrace (=sync) start */ HRS = crdata[4] | ((unsigned short)(sr_data & 0xC0) << 2); F = HRS - E - 3; /* Horizontal blank start */ HBS = crdata[2] | ((unsigned short)(sr_data & 0x30) << 4); sr_data = crdata[15]; cr_data = crdata[5]; /* Horizontal blank end */ HBE = (crdata[3] & 0x1f) | ((unsigned short)(cr_data & 0x80) >> 2) | ((unsigned short)(sr_data & 0x03) << 6); /* Horizontal retrace (=sync) end */ HRE = (cr_data & 0x1f) | ((sr_data & 0x04) << 3); temp = HBE - ((E - 1) & 255); B = (temp > 0) ? temp : (temp + 256); temp = HRE - ((E + F + 3) & 63); C = (temp > 0) ? temp : (temp + 64); D = B - F - C; if(writeres) var->xres = xres = E * 8; var->left_margin = D * 8; var->right_margin = F * 8; var->hsync_len = C * 8; /* Vertical */ sr_data = crdata[13]; cr_data = crdata[7]; /* Vertical total */ VT = crdata[6] | ((unsigned short)(cr_data & 0x01) << 8) | ((unsigned short)(cr_data & 0x20) << 4) | ((unsigned short)(sr_data & 0x01) << 10); A = VT + 2; /* Vertical display enable end */ VDE = crdata[10] | ((unsigned short)(cr_data & 0x02) << 7) | ((unsigned short)(cr_data & 0x40) << 3) | ((unsigned short)(sr_data & 0x02) << 9); E = VDE + 1; /* Vertical retrace (=sync) start */ VRS = crdata[8] | ((unsigned short)(cr_data & 0x04) << 6) | ((unsigned short)(cr_data & 0x80) << 2) | ((unsigned short)(sr_data & 0x08) << 7); F = VRS + 1 - E; cr_data2 = (crdata[16] & 0x01) << 5; /* Vertical blank start */ VBS = crdata[11] | ((unsigned short)(cr_data & 0x08) << 5) | ((unsigned short)(cr_data2 & 0x20) << 4) | ((unsigned short)(sr_data & 0x04) << 8); /* Vertical blank end */ VBE = crdata[12] | ((unsigned short)(sr_data & 0x10) << 4); temp = VBE - ((E - 1) & 511); B = (temp > 0) ? temp : (temp + 512); /* Vertical retrace (=sync) end */ VRE = (crdata[9] & 0x0f) | ((sr_data & 0x20) >> 1); temp = VRE - ((E + F - 1) & 31); C = (temp > 0) ? temp : (temp + 32); D = B - F - C; if(writeres) var->yres = yres = E; var->upper_margin = D; var->lower_margin = F; var->vsync_len = C; if((xres == 320) && ((yres == 200) || (yres == 240))) { /* Terrible hack, but correct CRTC data for * these modes only produces a black screen... * (HRE is 0, leading into a too large C and * a negative D. The CRT controller does not * seem to like correcting HRE to 50) */ var->left_margin = (400 - 376); var->right_margin = (328 - 320); var->hsync_len = (376 - 328); } }
gpl-2.0
invisiblek/android_kernel_htc_msm8960
tools/power/cpupower/bench/system.c
9944
4287
/* cpufreq-bench CPUFreq microbenchmark * * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <stdio.h> #include <time.h> #include <sys/time.h> #include <sys/types.h> #include <unistd.h> #include <sched.h> #include <cpufreq.h> #include "config.h" #include "system.h" /** * returns time since epoch in µs * * @retval time **/ long long int get_time() { struct timeval now; gettimeofday(&now, NULL); return (long long int)(now.tv_sec * 1000000LL + now.tv_usec); } /** * sets the cpufreq governor * * @param governor cpufreq governor name * @param cpu cpu for which the governor should be set * * @retval 0 on success * @retval -1 when failed **/ int set_cpufreq_governor(char *governor, unsigned int cpu) { dprintf("set %s as cpufreq governor\n", governor); if (cpufreq_cpu_exists(cpu) != 0) { perror("cpufreq_cpu_exists"); fprintf(stderr, "error: cpu %u does not exist\n", cpu); return -1; } if (cpufreq_modify_policy_governor(cpu, governor) != 0) { perror("cpufreq_modify_policy_governor"); fprintf(stderr, "error: unable to set %s governor\n", governor); return -1; } return 0; } /** * sets cpu affinity for the process * * @param cpu cpu# to which the affinity should be set * * @retval 0 on success * @retval -1 when setting the affinity failed **/ int set_cpu_affinity(unsigned int cpu) { cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); dprintf("set affinity to cpu #%u\n", cpu); if (sched_setaffinity(getpid(), sizeof(cpu_set_t), &cpuset) < 0) { perror("sched_setaffinity"); fprintf(stderr, "warning: unable to set cpu affinity\n"); return -1; } return 0; } /** * sets the process priority parameter * * @param priority priority value * * @retval 0 on success * @retval -1 when setting the priority failed **/ int set_process_priority(int priority) { struct sched_param param; dprintf("set scheduler priority to %i\n", priority); param.sched_priority = priority; if (sched_setscheduler(0, SCHEDULER, &param) < 0) { perror("sched_setscheduler"); fprintf(stderr, "warning: unable to set scheduler priority\n"); return -1; } return 0; } /** * notifies the user that the benchmark may run some time * * @param config benchmark config values * **/ void prepare_user(const struct config *config) { unsigned long sleep_time = 0; unsigned long load_time = 0; unsigned int round; for (round = 0; round < config->rounds; round++) { sleep_time += 2 * config->cycles * (config->sleep + config->sleep_step * round); load_time += 2 * config->cycles * (config->load + config->load_step * round) + (config->load + config->load_step * round * 4); } if (config->verbose || config->output != stdout) printf("approx. test duration: %im\n", (int)((sleep_time + load_time) / 60000000)); } /** * sets up the cpu affinity and scheduler priority * * @param config benchmark config values * **/ void prepare_system(const struct config *config) { if (config->verbose) printf("set cpu affinity to cpu #%u\n", config->cpu); set_cpu_affinity(config->cpu); switch (config->prio) { case SCHED_HIGH: if (config->verbose) printf("high priority condition requested\n"); set_process_priority(PRIORITY_HIGH); break; case SCHED_LOW: if (config->verbose) printf("low priority condition requested\n"); set_process_priority(PRIORITY_LOW); break; default: if (config->verbose) printf("default priority condition requested\n"); set_process_priority(PRIORITY_DEFAULT); } }
gpl-2.0
UnknownzD/I9103_TW_ICS_Kernel
sound/pcmcia/pdaudiocf/pdaudiocf_irq.c
12760
9233
/* * Driver for Sound Core PDAudioCF soundcard * * Copyright (c) 2003 by Jaroslav Kysela <perex@perex.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <sound/core.h> #include "pdaudiocf.h" #include <sound/initval.h> #include <asm/irq_regs.h> /* * */ irqreturn_t pdacf_interrupt(int irq, void *dev) { struct snd_pdacf *chip = dev; unsigned short stat; if ((chip->chip_status & (PDAUDIOCF_STAT_IS_STALE| PDAUDIOCF_STAT_IS_CONFIGURED| PDAUDIOCF_STAT_IS_SUSPENDED)) != PDAUDIOCF_STAT_IS_CONFIGURED) return IRQ_HANDLED; /* IRQ_NONE here? */ stat = inw(chip->port + PDAUDIOCF_REG_ISR); if (stat & (PDAUDIOCF_IRQLVL|PDAUDIOCF_IRQOVR)) { if (stat & PDAUDIOCF_IRQOVR) /* should never happen */ snd_printk(KERN_ERR "PDAUDIOCF SRAM buffer overrun detected!\n"); if (chip->pcm_substream) tasklet_schedule(&chip->tq); if (!(stat & PDAUDIOCF_IRQAKM)) stat |= PDAUDIOCF_IRQAKM; /* check rate */ } if (get_irq_regs() != NULL) snd_ak4117_check_rate_and_errors(chip->ak4117, 0); return IRQ_HANDLED; } static inline void pdacf_transfer_mono16(u16 *dst, u16 xor, unsigned int size, unsigned long rdp_port) { while (size-- > 0) { *dst++ = inw(rdp_port) ^ xor; inw(rdp_port); } } static inline void pdacf_transfer_mono32(u32 *dst, u32 xor, unsigned int size, unsigned long rdp_port) { register u16 val1, val2; while (size-- > 0) { val1 = inw(rdp_port); val2 = inw(rdp_port); inw(rdp_port); *dst++ = ((((u32)val2 & 0xff) << 24) | ((u32)val1 << 8)) ^ xor; } } static inline void pdacf_transfer_stereo16(u16 *dst, u16 xor, unsigned int size, unsigned long rdp_port) { while (size-- > 0) { *dst++ = inw(rdp_port) ^ xor; *dst++ = inw(rdp_port) ^ xor; } } static inline void pdacf_transfer_stereo32(u32 *dst, u32 xor, unsigned int size, unsigned long rdp_port) { register u16 val1, val2, val3; while (size-- > 0) { val1 = inw(rdp_port); val2 = inw(rdp_port); val3 = inw(rdp_port); *dst++ = ((((u32)val2 & 0xff) << 24) | ((u32)val1 << 8)) ^ xor; *dst++ = (((u32)val3 << 16) | (val2 & 0xff00)) ^ xor; } } static inline void pdacf_transfer_mono16sw(u16 *dst, u16 xor, unsigned int size, unsigned long rdp_port) { while (size-- > 0) { *dst++ = swab16(inw(rdp_port) ^ xor); inw(rdp_port); } } static inline void pdacf_transfer_mono32sw(u32 *dst, u32 xor, unsigned int size, unsigned long rdp_port) { register u16 val1, val2; while (size-- > 0) { val1 = inw(rdp_port); val2 = inw(rdp_port); inw(rdp_port); *dst++ = swab32((((val2 & 0xff) << 24) | ((u32)val1 << 8)) ^ xor); } } static inline void pdacf_transfer_stereo16sw(u16 *dst, u16 xor, unsigned int size, unsigned long rdp_port) { while (size-- > 0) { *dst++ = swab16(inw(rdp_port) ^ xor); *dst++ = swab16(inw(rdp_port) ^ xor); } } static inline void pdacf_transfer_stereo32sw(u32 *dst, u32 xor, unsigned int size, unsigned long rdp_port) { register u16 val1, val2, val3; while (size-- > 0) { val1 = inw(rdp_port); val2 = inw(rdp_port); val3 = inw(rdp_port); *dst++ = swab32((((val2 & 0xff) << 24) | ((u32)val1 << 8)) ^ xor); *dst++ = swab32((((u32)val3 << 16) | (val2 & 0xff00)) ^ xor); } } static inline void pdacf_transfer_mono24le(u8 *dst, u16 xor, unsigned int size, unsigned long rdp_port) { register u16 val1, val2; register u32 xval1; while (size-- > 0) { val1 = inw(rdp_port); val2 = inw(rdp_port); inw(rdp_port); xval1 = (((val2 & 0xff) << 8) | (val1 << 16)) ^ xor; *dst++ = (u8)(xval1 >> 8); *dst++ = (u8)(xval1 >> 16); *dst++ = (u8)(xval1 >> 24); } } static inline void pdacf_transfer_mono24be(u8 *dst, u16 xor, unsigned int size, unsigned long rdp_port) { register u16 val1, val2; register u32 xval1; while (size-- > 0) { val1 = inw(rdp_port); val2 = inw(rdp_port); inw(rdp_port); xval1 = (((val2 & 0xff) << 8) | (val1 << 16)) ^ xor; *dst++ = (u8)(xval1 >> 24); *dst++ = (u8)(xval1 >> 16); *dst++ = (u8)(xval1 >> 8); } } static inline void pdacf_transfer_stereo24le(u8 *dst, u32 xor, unsigned int size, unsigned long rdp_port) { register u16 val1, val2, val3; register u32 xval1, xval2; while (size-- > 0) { val1 = inw(rdp_port); val2 = inw(rdp_port); val3 = inw(rdp_port); xval1 = ((((u32)val2 & 0xff) << 24) | ((u32)val1 << 8)) ^ xor; xval2 = (((u32)val3 << 16) | (val2 & 0xff00)) ^ xor; *dst++ = (u8)(xval1 >> 8); *dst++ = (u8)(xval1 >> 16); *dst++ = (u8)(xval1 >> 24); *dst++ = (u8)(xval2 >> 8); *dst++ = (u8)(xval2 >> 16); *dst++ = (u8)(xval2 >> 24); } } static inline void pdacf_transfer_stereo24be(u8 *dst, u32 xor, unsigned int size, unsigned long rdp_port) { register u16 val1, val2, val3; register u32 xval1, xval2; while (size-- > 0) { val1 = inw(rdp_port); val2 = inw(rdp_port); val3 = inw(rdp_port); xval1 = ((((u32)val2 & 0xff) << 24) | ((u32)val1 << 8)) ^ xor; xval2 = (((u32)val3 << 16) | (val2 & 0xff00)) ^ xor; *dst++ = (u8)(xval1 >> 24); *dst++ = (u8)(xval1 >> 16); *dst++ = (u8)(xval1 >> 8); *dst++ = (u8)(xval2 >> 24); *dst++ = (u8)(xval2 >> 16); *dst++ = (u8)(xval2 >> 8); } } static void pdacf_transfer(struct snd_pdacf *chip, unsigned int size, unsigned int off) { unsigned long rdp_port = chip->port + PDAUDIOCF_REG_MD; unsigned int xor = chip->pcm_xor; if (chip->pcm_sample == 3) { if (chip->pcm_little) { if (chip->pcm_channels == 1) { pdacf_transfer_mono24le((char *)chip->pcm_area + (off * 3), xor, size, rdp_port); } else { pdacf_transfer_stereo24le((char *)chip->pcm_area + (off * 6), xor, size, rdp_port); } } else { if (chip->pcm_channels == 1) { pdacf_transfer_mono24be((char *)chip->pcm_area + (off * 3), xor, size, rdp_port); } else { pdacf_transfer_stereo24be((char *)chip->pcm_area + (off * 6), xor, size, rdp_port); } } return; } if (chip->pcm_swab == 0) { if (chip->pcm_channels == 1) { if (chip->pcm_frame == 2) { pdacf_transfer_mono16((u16 *)chip->pcm_area + off, xor, size, rdp_port); } else { pdacf_transfer_mono32((u32 *)chip->pcm_area + off, xor, size, rdp_port); } } else { if (chip->pcm_frame == 2) { pdacf_transfer_stereo16((u16 *)chip->pcm_area + (off * 2), xor, size, rdp_port); } else { pdacf_transfer_stereo32((u32 *)chip->pcm_area + (off * 2), xor, size, rdp_port); } } } else { if (chip->pcm_channels == 1) { if (chip->pcm_frame == 2) { pdacf_transfer_mono16sw((u16 *)chip->pcm_area + off, xor, size, rdp_port); } else { pdacf_transfer_mono32sw((u32 *)chip->pcm_area + off, xor, size, rdp_port); } } else { if (chip->pcm_frame == 2) { pdacf_transfer_stereo16sw((u16 *)chip->pcm_area + (off * 2), xor, size, rdp_port); } else { pdacf_transfer_stereo32sw((u32 *)chip->pcm_area + (off * 2), xor, size, rdp_port); } } } } void pdacf_tasklet(unsigned long private_data) { struct snd_pdacf *chip = (struct snd_pdacf *) private_data; int size, off, cont, rdp, wdp; if ((chip->chip_status & (PDAUDIOCF_STAT_IS_STALE|PDAUDIOCF_STAT_IS_CONFIGURED)) != PDAUDIOCF_STAT_IS_CONFIGURED) return; if (chip->pcm_substream == NULL || chip->pcm_substream->runtime == NULL || !snd_pcm_running(chip->pcm_substream)) return; rdp = inw(chip->port + PDAUDIOCF_REG_RDP); wdp = inw(chip->port + PDAUDIOCF_REG_WDP); /* printk(KERN_DEBUG "TASKLET: rdp = %x, wdp = %x\n", rdp, wdp); */ size = wdp - rdp; if (size < 0) size += 0x10000; if (size == 0) size = 0x10000; size /= chip->pcm_frame; if (size > 64) size -= 32; #if 0 chip->pcm_hwptr += size; chip->pcm_hwptr %= chip->pcm_size; chip->pcm_tdone += size; if (chip->pcm_frame == 2) { unsigned long rdp_port = chip->port + PDAUDIOCF_REG_MD; while (size-- > 0) { inw(rdp_port); inw(rdp_port); } } else { unsigned long rdp_port = chip->port + PDAUDIOCF_REG_MD; while (size-- > 0) { inw(rdp_port); inw(rdp_port); inw(rdp_port); } } #else off = chip->pcm_hwptr + chip->pcm_tdone; off %= chip->pcm_size; chip->pcm_tdone += size; while (size > 0) { cont = chip->pcm_size - off; if (cont > size) cont = size; pdacf_transfer(chip, cont, off); off += cont; off %= chip->pcm_size; size -= cont; } #endif spin_lock(&chip->reg_lock); while (chip->pcm_tdone >= chip->pcm_period) { chip->pcm_hwptr += chip->pcm_period; chip->pcm_hwptr %= chip->pcm_size; chip->pcm_tdone -= chip->pcm_period; spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(chip->pcm_substream); spin_lock(&chip->reg_lock); } spin_unlock(&chip->reg_lock); /* printk(KERN_DEBUG "TASKLET: end\n"); */ }
gpl-2.0
LegacyHuawei/android_kernel_huawei_msm7x30
net/irda/wrapper.c
13272
13361
/********************************************************************* * * Filename: wrapper.c * Version: 1.2 * Description: IrDA SIR async wrapper layer * Status: Stable * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Mon Aug 4 20:40:53 1997 * Modified at: Fri Jan 28 13:21:09 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * Modified at: Fri May 28 3:11 CST 1999 * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl> * * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>, * All Rights Reserved. * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/skbuff.h> #include <linux/string.h> #include <linux/module.h> #include <asm/byteorder.h> #include <net/irda/irda.h> #include <net/irda/wrapper.h> #include <net/irda/crc.h> #include <net/irda/irlap.h> #include <net/irda/irlap_frame.h> #include <net/irda/irda_device.h> /************************** FRAME WRAPPING **************************/ /* * Unwrap and unstuff SIR frames * * Note : at FIR and MIR, HDLC framing is used and usually handled * by the controller, so we come here only for SIR... Jean II */ /* * Function stuff_byte (byte, buf) * * Byte stuff one single byte and put the result in buffer pointed to by * buf. The buffer must at all times be able to have two bytes inserted. * * This is in a tight loop, better inline it, so need to be prior to callers. * (2000 bytes on P6 200MHz, non-inlined ~370us, inline ~170us) - Jean II */ static inline int stuff_byte(__u8 byte, __u8 *buf) { switch (byte) { case BOF: /* FALLTHROUGH */ case EOF: /* FALLTHROUGH */ case CE: /* Insert transparently coded */ buf[0] = CE; /* Send link escape */ buf[1] = byte^IRDA_TRANS; /* Complement bit 5 */ return 2; /* break; */ default: /* Non-special value, no transparency required */ buf[0] = byte; return 1; /* break; */ } } /* * Function async_wrap (skb, *tx_buff, buffsize) * * Makes a new buffer with wrapping and stuffing, should check that * we don't get tx buffer overflow. */ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize) { struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb; int xbofs; int i; int n; union { __u16 value; __u8 bytes[2]; } fcs; /* Initialize variables */ fcs.value = INIT_FCS; n = 0; /* * Send XBOF's for required min. turn time and for the negotiated * additional XBOFS */ if (cb->magic != LAP_MAGIC) { /* * This will happen for all frames sent from user-space. * Nothing to worry about, but we set the default number of * BOF's */ IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __func__); xbofs = 10; } else xbofs = cb->xbofs + cb->xbofs_delay; IRDA_DEBUG(4, "%s(), xbofs=%d\n", __func__, xbofs); /* Check that we never use more than 115 + 48 xbofs */ if (xbofs > 163) { IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __func__, xbofs); xbofs = 163; } memset(tx_buff + n, XBOF, xbofs); n += xbofs; /* Start of packet character BOF */ tx_buff[n++] = BOF; /* Insert frame and calc CRC */ for (i=0; i < skb->len; i++) { /* * Check for the possibility of tx buffer overflow. We use * bufsize-5 since the maximum number of bytes that can be * transmitted after this point is 5. */ if(n >= (buffsize-5)) { IRDA_ERROR("%s(), tx buffer overflow (n=%d)\n", __func__, n); return n; } n += stuff_byte(skb->data[i], tx_buff+n); fcs.value = irda_fcs(fcs.value, skb->data[i]); } /* Insert CRC in little endian format (LSB first) */ fcs.value = ~fcs.value; #ifdef __LITTLE_ENDIAN n += stuff_byte(fcs.bytes[0], tx_buff+n); n += stuff_byte(fcs.bytes[1], tx_buff+n); #else /* ifdef __BIG_ENDIAN */ n += stuff_byte(fcs.bytes[1], tx_buff+n); n += stuff_byte(fcs.bytes[0], tx_buff+n); #endif tx_buff[n++] = EOF; return n; } EXPORT_SYMBOL(async_wrap_skb); /************************* FRAME UNWRAPPING *************************/ /* * Unwrap and unstuff SIR frames * * Complete rewrite by Jean II : * More inline, faster, more compact, more logical. Jean II * (16 bytes on P6 200MHz, old 5 to 7 us, new 4 to 6 us) * (24 bytes on P6 200MHz, old 9 to 10 us, new 7 to 8 us) * (for reference, 115200 b/s is 1 byte every 69 us) * And reduce wrapper.o by ~900B in the process ;-) * * Then, we have the addition of ZeroCopy, which is optional * (i.e. the driver must initiate it) and improve final processing. * (2005 B frame + EOF on P6 200MHz, without 30 to 50 us, with 10 to 25 us) * * Note : at FIR and MIR, HDLC framing is used and usually handled * by the controller, so we come here only for SIR... Jean II */ /* * We can also choose where we want to do the CRC calculation. We can * do it "inline", as we receive the bytes, or "postponed", when * receiving the End-Of-Frame. * (16 bytes on P6 200MHz, inlined 4 to 6 us, postponed 4 to 5 us) * (24 bytes on P6 200MHz, inlined 7 to 8 us, postponed 5 to 7 us) * With ZeroCopy : * (2005 B frame on P6 200MHz, inlined 10 to 25 us, postponed 140 to 180 us) * Without ZeroCopy : * (2005 B frame on P6 200MHz, inlined 30 to 50 us, postponed 150 to 180 us) * (Note : numbers taken with irq disabled) * * From those numbers, it's not clear which is the best strategy, because * we end up running through a lot of data one way or another (i.e. cache * misses). I personally prefer to avoid the huge latency spike of the * "postponed" solution, because it come just at the time when we have * lot's of protocol processing to do and it will hurt our ability to * reach low link turnaround times... Jean II */ //#define POSTPONE_RX_CRC /* * Function async_bump (buf, len, stats) * * Got a frame, make a copy of it, and pass it up the stack! We can try * to inline it since it's only called from state_inside_frame */ static inline void async_bump(struct net_device *dev, struct net_device_stats *stats, iobuff_t *rx_buff) { struct sk_buff *newskb; struct sk_buff *dataskb; int docopy; /* Check if we need to copy the data to a new skb or not. * If the driver doesn't use ZeroCopy Rx, we have to do it. * With ZeroCopy Rx, the rx_buff already point to a valid * skb. But, if the frame is small, it is more efficient to * copy it to save memory (copy will be fast anyway - that's * called Rx-copy-break). Jean II */ docopy = ((rx_buff->skb == NULL) || (rx_buff->len < IRDA_RX_COPY_THRESHOLD)); /* Allocate a new skb */ newskb = dev_alloc_skb(docopy ? rx_buff->len + 1 : rx_buff->truesize); if (!newskb) { stats->rx_dropped++; /* We could deliver the current skb if doing ZeroCopy Rx, * but this would stall the Rx path. Better drop the * packet... Jean II */ return; } /* Align IP header to 20 bytes (i.e. increase skb->data) * Note this is only useful with IrLAN, as PPP has a variable * header size (2 or 1 bytes) - Jean II */ skb_reserve(newskb, 1); if(docopy) { /* Copy data without CRC (length already checked) */ skb_copy_to_linear_data(newskb, rx_buff->data, rx_buff->len - 2); /* Deliver this skb */ dataskb = newskb; } else { /* We are using ZeroCopy. Deliver old skb */ dataskb = rx_buff->skb; /* And hook the new skb to the rx_buff */ rx_buff->skb = newskb; rx_buff->head = newskb->data; /* NOT newskb->head */ //printk(KERN_DEBUG "ZeroCopy : len = %d, dataskb = %p, newskb = %p\n", rx_buff->len, dataskb, newskb); } /* Set proper length on skb (without CRC) */ skb_put(dataskb, rx_buff->len - 2); /* Feed it to IrLAP layer */ dataskb->dev = dev; skb_reset_mac_header(dataskb); dataskb->protocol = htons(ETH_P_IRDA); netif_rx(dataskb); stats->rx_packets++; stats->rx_bytes += rx_buff->len; /* Clean up rx_buff (redundant with async_unwrap_bof() ???) */ rx_buff->data = rx_buff->head; rx_buff->len = 0; } /* * Function async_unwrap_bof(dev, byte) * * Handle Beginning Of Frame character received within a frame * */ static inline void async_unwrap_bof(struct net_device *dev, struct net_device_stats *stats, iobuff_t *rx_buff, __u8 byte) { switch(rx_buff->state) { case LINK_ESCAPE: case INSIDE_FRAME: /* Not supposed to happen, the previous frame is not * finished - Jean II */ IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n", __func__); stats->rx_errors++; stats->rx_missed_errors++; irda_device_set_media_busy(dev, TRUE); break; case OUTSIDE_FRAME: case BEGIN_FRAME: default: /* We may receive multiple BOF at the start of frame */ break; } /* Now receiving frame */ rx_buff->state = BEGIN_FRAME; rx_buff->in_frame = TRUE; /* Time to initialize receive buffer */ rx_buff->data = rx_buff->head; rx_buff->len = 0; rx_buff->fcs = INIT_FCS; } /* * Function async_unwrap_eof(dev, byte) * * Handle End Of Frame character received within a frame * */ static inline void async_unwrap_eof(struct net_device *dev, struct net_device_stats *stats, iobuff_t *rx_buff, __u8 byte) { #ifdef POSTPONE_RX_CRC int i; #endif switch(rx_buff->state) { case OUTSIDE_FRAME: /* Probably missed the BOF */ stats->rx_errors++; stats->rx_missed_errors++; irda_device_set_media_busy(dev, TRUE); break; case BEGIN_FRAME: case LINK_ESCAPE: case INSIDE_FRAME: default: /* Note : in the case of BEGIN_FRAME and LINK_ESCAPE, * the fcs will most likely not match and generate an * error, as expected - Jean II */ rx_buff->state = OUTSIDE_FRAME; rx_buff->in_frame = FALSE; #ifdef POSTPONE_RX_CRC /* If we haven't done the CRC as we receive bytes, we * must do it now... Jean II */ for(i = 0; i < rx_buff->len; i++) rx_buff->fcs = irda_fcs(rx_buff->fcs, rx_buff->data[i]); #endif /* Test FCS and signal success if the frame is good */ if (rx_buff->fcs == GOOD_FCS) { /* Deliver frame */ async_bump(dev, stats, rx_buff); break; } else { /* Wrong CRC, discard frame! */ irda_device_set_media_busy(dev, TRUE); IRDA_DEBUG(1, "%s(), crc error\n", __func__); stats->rx_errors++; stats->rx_crc_errors++; } break; } } /* * Function async_unwrap_ce(dev, byte) * * Handle Character Escape character received within a frame * */ static inline void async_unwrap_ce(struct net_device *dev, struct net_device_stats *stats, iobuff_t *rx_buff, __u8 byte) { switch(rx_buff->state) { case OUTSIDE_FRAME: /* Activate carrier sense */ irda_device_set_media_busy(dev, TRUE); break; case LINK_ESCAPE: IRDA_WARNING("%s: state not defined\n", __func__); break; case BEGIN_FRAME: case INSIDE_FRAME: default: /* Stuffed byte coming */ rx_buff->state = LINK_ESCAPE; break; } } /* * Function async_unwrap_other(dev, byte) * * Handle other characters received within a frame * */ static inline void async_unwrap_other(struct net_device *dev, struct net_device_stats *stats, iobuff_t *rx_buff, __u8 byte) { switch(rx_buff->state) { /* This is on the critical path, case are ordered by * probability (most frequent first) - Jean II */ case INSIDE_FRAME: /* Must be the next byte of the frame */ if (rx_buff->len < rx_buff->truesize) { rx_buff->data[rx_buff->len++] = byte; #ifndef POSTPONE_RX_CRC rx_buff->fcs = irda_fcs(rx_buff->fcs, byte); #endif } else { IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", __func__); rx_buff->state = OUTSIDE_FRAME; } break; case LINK_ESCAPE: /* * Stuffed char, complement bit 5 of byte * following CE, IrLAP p.114 */ byte ^= IRDA_TRANS; if (rx_buff->len < rx_buff->truesize) { rx_buff->data[rx_buff->len++] = byte; #ifndef POSTPONE_RX_CRC rx_buff->fcs = irda_fcs(rx_buff->fcs, byte); #endif rx_buff->state = INSIDE_FRAME; } else { IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", __func__); rx_buff->state = OUTSIDE_FRAME; } break; case OUTSIDE_FRAME: /* Activate carrier sense */ if(byte != XBOF) irda_device_set_media_busy(dev, TRUE); break; case BEGIN_FRAME: default: rx_buff->data[rx_buff->len++] = byte; #ifndef POSTPONE_RX_CRC rx_buff->fcs = irda_fcs(rx_buff->fcs, byte); #endif rx_buff->state = INSIDE_FRAME; break; } } /* * Function async_unwrap_char (dev, rx_buff, byte) * * Parse and de-stuff frame received from the IrDA-port * * This is the main entry point for SIR drivers. */ void async_unwrap_char(struct net_device *dev, struct net_device_stats *stats, iobuff_t *rx_buff, __u8 byte) { switch(byte) { case CE: async_unwrap_ce(dev, stats, rx_buff, byte); break; case BOF: async_unwrap_bof(dev, stats, rx_buff, byte); break; case EOF: async_unwrap_eof(dev, stats, rx_buff, byte); break; default: async_unwrap_other(dev, stats, rx_buff, byte); break; } } EXPORT_SYMBOL(async_unwrap_char);
gpl-2.0
ayushtyagi28/android_kernel_cyanogen_msm8994
drivers/usb/gadget/f_rmnet.c
217
40545
/* * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/spinlock.h> #include <linux/usb_bam.h> #include "usb_gadget_xport.h" #include "u_ether.h" #include "u_rmnet.h" #include "gadget_chips.h" static unsigned int rmnet_dl_max_pkt_per_xfer = 7; module_param(rmnet_dl_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(rmnet_dl_max_pkt_per_xfer, "Maximum packets per transfer for DL aggregation"); #define RMNET_NOTIFY_INTERVAL 5 #define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification) #define ACM_CTRL_DTR (1 << 0) /* TODO: use separate structures for data and * control paths */ struct f_rmnet { struct gether gether_port; struct grmnet port; int ifc_id; u8 port_num; atomic_t online; atomic_t ctrl_online; struct usb_composite_dev *cdev; spinlock_t lock; /* usb eps*/ struct usb_ep *notify; struct usb_request *notify_req; /* control info */ struct list_head cpkt_resp_q; unsigned long notify_count; unsigned long cpkts_len; const struct usb_endpoint_descriptor *in_ep_desc_backup; const struct usb_endpoint_descriptor *out_ep_desc_backup; }; static unsigned int nr_rmnet_ports; static unsigned int no_ctrl_smd_ports; static unsigned int no_ctrl_qti_ports; static unsigned int no_ctrl_hsic_ports; static unsigned int no_ctrl_hsuart_ports; static unsigned int no_rmnet_data_bam_ports; static unsigned int no_data_bam2bam_ports; static unsigned int no_data_hsic_ports; static unsigned int no_data_hsuart_ports; static struct rmnet_ports { enum transport_type data_xport; enum transport_type ctrl_xport; unsigned data_xport_num; unsigned ctrl_xport_num; unsigned port_num; struct f_rmnet *port; } rmnet_ports[NR_RMNET_PORTS]; static struct usb_interface_descriptor rmnet_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 3, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC, .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC, /* .iInterface = DYNAMIC */ }; /* Full speed support */ static struct usb_endpoint_descriptor rmnet_fs_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE), .bInterval = 1 << RMNET_NOTIFY_INTERVAL, }; static struct usb_endpoint_descriptor rmnet_fs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(64), }; static struct usb_endpoint_descriptor rmnet_fs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(64), }; static struct usb_descriptor_header *rmnet_fs_function[] = { (struct usb_descriptor_header *) &rmnet_interface_desc, (struct usb_descriptor_header *) &rmnet_fs_notify_desc, (struct usb_descriptor_header *) &rmnet_fs_in_desc, (struct usb_descriptor_header *) &rmnet_fs_out_desc, NULL, }; /* High speed support */ static struct usb_endpoint_descriptor rmnet_hs_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE), .bInterval = RMNET_NOTIFY_INTERVAL + 4, }; static struct usb_endpoint_descriptor rmnet_hs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_endpoint_descriptor rmnet_hs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_descriptor_header *rmnet_hs_function[] = { (struct usb_descriptor_header *) &rmnet_interface_desc, (struct usb_descriptor_header *) &rmnet_hs_notify_desc, (struct usb_descriptor_header *) &rmnet_hs_in_desc, (struct usb_descriptor_header *) &rmnet_hs_out_desc, NULL, }; /* Super speed support */ static struct usb_endpoint_descriptor rmnet_ss_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE), .bInterval = RMNET_NOTIFY_INTERVAL + 4, }; static struct usb_ss_ep_comp_descriptor rmnet_ss_notify_comp_desc = { .bLength = sizeof rmnet_ss_notify_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 3 values can be tweaked if necessary */ /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ .wBytesPerInterval = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE), }; static struct usb_endpoint_descriptor rmnet_ss_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor rmnet_ss_in_comp_desc = { .bLength = sizeof rmnet_ss_in_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 2 values can be tweaked if necessary */ /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ }; static struct usb_endpoint_descriptor rmnet_ss_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor rmnet_ss_out_comp_desc = { .bLength = sizeof rmnet_ss_out_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 2 values can be tweaked if necessary */ /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ }; static struct usb_descriptor_header *rmnet_ss_function[] = { (struct usb_descriptor_header *) &rmnet_interface_desc, (struct usb_descriptor_header *) &rmnet_ss_notify_desc, (struct usb_descriptor_header *) &rmnet_ss_notify_comp_desc, (struct usb_descriptor_header *) &rmnet_ss_in_desc, (struct usb_descriptor_header *) &rmnet_ss_in_comp_desc, (struct usb_descriptor_header *) &rmnet_ss_out_desc, (struct usb_descriptor_header *) &rmnet_ss_out_comp_desc, NULL, }; /* String descriptors */ static struct usb_string rmnet_string_defs[] = { [0].s = "RmNet", { } /* end of list */ }; static struct usb_gadget_strings rmnet_string_table = { .language = 0x0409, /* en-us */ .strings = rmnet_string_defs, }; static struct usb_gadget_strings *rmnet_strings[] = { &rmnet_string_table, NULL, }; static void frmnet_ctrl_response_available(struct f_rmnet *dev); /* ------- misc functions --------------------*/ static inline struct f_rmnet *func_to_rmnet(struct usb_function *f) { return container_of(f, struct f_rmnet, gether_port.func); } static inline struct f_rmnet *port_to_rmnet(struct grmnet *r) { return container_of(r, struct f_rmnet, port); } static struct usb_request * frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags) { struct usb_request *req; req = usb_ep_alloc_request(ep, flags); if (!req) return ERR_PTR(-ENOMEM); req->buf = kmalloc(len, flags); if (!req->buf) { usb_ep_free_request(ep, req); return ERR_PTR(-ENOMEM); } req->length = len; return req; } void frmnet_free_req(struct usb_ep *ep, struct usb_request *req) { kfree(req->buf); usb_ep_free_request(ep, req); } static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags) { struct rmnet_ctrl_pkt *pkt; pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags); if (!pkt) return ERR_PTR(-ENOMEM); pkt->buf = kmalloc(len, flags); if (!pkt->buf) { kfree(pkt); return ERR_PTR(-ENOMEM); } pkt->len = len; return pkt; } static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt) { kfree(pkt->buf); kfree(pkt); } /* -------------------------------------------*/ static int rmnet_gport_setup(void) { int ret; int port_idx; int i; u8 base; pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u data hsuart ports: %u" " smd ports: %u ctrl hsic ports: %u ctrl hsuart ports: %u" " nr_rmnet_ports: %u\n", __func__, no_rmnet_data_bam_ports, no_data_bam2bam_ports, no_data_hsic_ports, no_data_hsuart_ports, no_ctrl_smd_ports, no_ctrl_hsic_ports, no_ctrl_hsuart_ports, nr_rmnet_ports); if (no_rmnet_data_bam_ports) { ret = gbam_setup(no_rmnet_data_bam_ports); if (ret < 0) return ret; } if (no_data_bam2bam_ports) { ret = gbam2bam_setup(no_data_bam2bam_ports); if (ret < 0) return ret; } if (no_ctrl_smd_ports) { ret = gsmd_ctrl_setup(FRMNET_CTRL_CLIENT, no_ctrl_smd_ports, &base); if (ret) return ret; for (i = 0; i < nr_rmnet_ports; i++) if (rmnet_ports[i].port) rmnet_ports[i].port->port_num += base; } if (no_data_hsic_ports) { port_idx = ghsic_data_setup(no_data_hsic_ports, USB_GADGET_RMNET); if (port_idx < 0) return port_idx; for (i = 0; i < nr_rmnet_ports; i++) { if (rmnet_ports[i].data_xport == USB_GADGET_XPORT_HSIC) { rmnet_ports[i].data_xport_num = port_idx; port_idx++; } } } if (no_ctrl_hsic_ports) { port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports, USB_GADGET_RMNET); if (port_idx < 0) return port_idx; for (i = 0; i < nr_rmnet_ports; i++) { if (rmnet_ports[i].ctrl_xport == USB_GADGET_XPORT_HSIC) { rmnet_ports[i].ctrl_xport_num = port_idx; port_idx++; } } } if (no_data_hsuart_ports) { port_idx = ghsuart_data_setup(no_data_hsuart_ports, USB_GADGET_RMNET); if (port_idx < 0) return port_idx; for (i = 0; i < nr_rmnet_ports; i++) { if (rmnet_ports[i].data_xport == USB_GADGET_XPORT_HSUART) { rmnet_ports[i].data_xport_num = port_idx; port_idx++; } } } if (no_ctrl_hsuart_ports) { port_idx = ghsuart_ctrl_setup(no_ctrl_hsuart_ports, USB_GADGET_RMNET); if (port_idx < 0) return port_idx; for (i = 0; i < nr_rmnet_ports; i++) { if (rmnet_ports[i].ctrl_xport == USB_GADGET_XPORT_HSUART) { rmnet_ports[i].ctrl_xport_num = port_idx; port_idx++; } } } return 0; } static int gport_rmnet_connect(struct f_rmnet *dev, unsigned intf) { int ret; unsigned port_num; enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport; enum transport_type dxport = rmnet_ports[dev->port_num].data_xport; int src_connection_idx = 0, dst_connection_idx = 0; struct usb_gadget *gadget = dev->cdev->gadget; void *net; pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n", __func__, xport_to_str(cxport), xport_to_str(dxport), dev, dev->port_num); port_num = rmnet_ports[dev->port_num].ctrl_xport_num; switch (cxport) { case USB_GADGET_XPORT_SMD: ret = gsmd_ctrl_connect(&dev->port, port_num); if (ret) { pr_err("%s: gsmd_ctrl_connect failed: err:%d\n", __func__, ret); return ret; } break; case USB_GADGET_XPORT_QTI: ret = gqti_ctrl_connect(&dev->port, port_num, dev->ifc_id, dxport, USB_GADGET_RMNET); if (ret) { pr_err("%s: gqti_ctrl_connect failed: err:%d\n", __func__, ret); return ret; } break; case USB_GADGET_XPORT_HSIC: ret = ghsic_ctrl_connect(&dev->port, port_num); if (ret) { pr_err("%s: ghsic_ctrl_connect failed: err:%d\n", __func__, ret); return ret; } break; case USB_GADGET_XPORT_HSUART: ret = ghsuart_ctrl_connect(&dev->port, port_num); if (ret) { pr_err("%s: ghsuart_ctrl_connect failed: err:%d\n", __func__, ret); return ret; } break; case USB_GADGET_XPORT_NONE: break; default: pr_err("%s: Un-supported transport: %s\n", __func__, xport_to_str(cxport)); return -ENODEV; } port_num = rmnet_ports[dev->port_num].data_xport_num; switch (dxport) { case USB_GADGET_XPORT_BAM2BAM: src_connection_idx = usb_bam_get_connection_idx(gadget->name, A2_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE, port_num); dst_connection_idx = usb_bam_get_connection_idx(gadget->name, A2_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE, port_num); if (dst_connection_idx < 0 || src_connection_idx < 0) { pr_err("%s: usb_bam_get_connection_idx failed\n", __func__); gsmd_ctrl_disconnect(&dev->port, port_num); return -EINVAL; } case USB_GADGET_XPORT_BAM: ret = gbam_connect(&dev->port, port_num, dxport, src_connection_idx, dst_connection_idx); if (ret) { pr_err("%s: gbam_connect failed: err:%d\n", __func__, ret); gsmd_ctrl_disconnect(&dev->port, port_num); return ret; } break; case USB_GADGET_XPORT_BAM2BAM_IPA: src_connection_idx = usb_bam_get_connection_idx(gadget->name, IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE, port_num); dst_connection_idx = usb_bam_get_connection_idx(gadget->name, IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE, port_num); if (dst_connection_idx < 0 || src_connection_idx < 0) { pr_err("%s: usb_bam_get_connection_idx failed\n", __func__); gsmd_ctrl_disconnect(&dev->port, port_num); return -EINVAL; } ret = gbam_connect(&dev->port, port_num, dxport, src_connection_idx, dst_connection_idx); if (ret) { pr_err("%s: gbam_connect failed: err:%d\n", __func__, ret); if (cxport == USB_GADGET_XPORT_QTI) gqti_ctrl_disconnect(&dev->port, port_num); else gsmd_ctrl_disconnect(&dev->port, port_num); return ret; } break; case USB_GADGET_XPORT_HSIC: ret = ghsic_data_connect(&dev->port, port_num); if (ret) { pr_err("%s: ghsic_data_connect failed: err:%d\n", __func__, ret); ghsic_ctrl_disconnect(&dev->port, port_num); return ret; } break; case USB_GADGET_XPORT_HSUART: ret = ghsuart_data_connect(&dev->port, port_num); if (ret) { pr_err("%s: ghsuart_data_connect failed: err:%d\n", __func__, ret); ghsuart_ctrl_disconnect(&dev->port, port_num); return ret; } break; case USB_GADGET_XPORT_ETHER: gether_enable_sg(&dev->gether_port, true); net = gether_connect(&dev->gether_port); if (IS_ERR(net)) { pr_err("%s: gether_connect failed: err:%ld\n", __func__, PTR_ERR(net)); if (cxport == USB_GADGET_XPORT_QTI) gqti_ctrl_disconnect(&dev->port, port_num); else gsmd_ctrl_disconnect(&dev->port, port_num); return PTR_ERR(net); } gether_update_dl_max_pkts_per_xfer(&dev->gether_port, rmnet_dl_max_pkt_per_xfer); gether_update_dl_max_xfer_size(&dev->gether_port, 16384); break; case USB_GADGET_XPORT_NONE: break; default: pr_err("%s: Un-supported transport: %s\n", __func__, xport_to_str(dxport)); return -ENODEV; } return 0; } static int gport_rmnet_disconnect(struct f_rmnet *dev) { unsigned port_num; enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport; enum transport_type dxport = rmnet_ports[dev->port_num].data_xport; pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n", __func__, xport_to_str(cxport), xport_to_str(dxport), dev, dev->port_num); port_num = rmnet_ports[dev->port_num].ctrl_xport_num; switch (cxport) { case USB_GADGET_XPORT_SMD: gsmd_ctrl_disconnect(&dev->port, port_num); break; case USB_GADGET_XPORT_QTI: gqti_ctrl_disconnect(&dev->port, port_num); break; case USB_GADGET_XPORT_HSIC: ghsic_ctrl_disconnect(&dev->port, port_num); break; case USB_GADGET_XPORT_HSUART: ghsuart_ctrl_disconnect(&dev->port, port_num); break; case USB_GADGET_XPORT_NONE: break; default: pr_err("%s: Un-supported transport: %s\n", __func__, xport_to_str(cxport)); return -ENODEV; } port_num = rmnet_ports[dev->port_num].data_xport_num; switch (dxport) { case USB_GADGET_XPORT_BAM: case USB_GADGET_XPORT_BAM2BAM: case USB_GADGET_XPORT_BAM2BAM_IPA: gbam_disconnect(&dev->port, port_num, dxport); break; case USB_GADGET_XPORT_HSIC: ghsic_data_disconnect(&dev->port, port_num); break; case USB_GADGET_XPORT_HSUART: ghsuart_data_disconnect(&dev->port, port_num); break; case USB_GADGET_XPORT_ETHER: gether_disconnect(&dev->gether_port); break; case USB_GADGET_XPORT_NONE: break; default: pr_err("%s: Un-supported transport: %s\n", __func__, xport_to_str(dxport)); return -ENODEV; } return 0; } static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_rmnet *dev = func_to_rmnet(f); pr_debug("%s: portno:%d\n", __func__, dev->port_num); if (gadget_is_superspeed(c->cdev->gadget)) usb_free_descriptors(f->ss_descriptors); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->fs_descriptors); frmnet_free_req(dev->notify, dev->notify_req); kfree(f->name); } static void frmnet_purge_responses(struct f_rmnet *dev) { unsigned long flags; struct rmnet_ctrl_pkt *cpkt; pr_debug("%s: port#%d\n", __func__, dev->port_num); spin_lock_irqsave(&dev->lock, flags); while (!list_empty(&dev->cpkt_resp_q)) { cpkt = list_first_entry(&dev->cpkt_resp_q, struct rmnet_ctrl_pkt, list); list_del(&cpkt->list); rmnet_free_ctrl_pkt(cpkt); } dev->notify_count = 0; spin_unlock_irqrestore(&dev->lock, flags); } static void frmnet_suspend(struct usb_function *f) { struct f_rmnet *dev = func_to_rmnet(f); unsigned port_num; enum transport_type dxport = rmnet_ports[dev->port_num].data_xport; bool remote_wakeup_allowed; if (f->config->cdev->gadget->speed == USB_SPEED_SUPER) remote_wakeup_allowed = f->func_wakeup_allowed; else remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup; pr_debug("%s: data xport: %s dev: %p portno: %d remote_wakeup: %d\n", __func__, xport_to_str(dxport), dev, dev->port_num, remote_wakeup_allowed); usb_ep_fifo_flush(dev->notify); frmnet_purge_responses(dev); port_num = rmnet_ports[dev->port_num].data_xport_num; switch (dxport) { case USB_GADGET_XPORT_BAM: break; case USB_GADGET_XPORT_BAM2BAM: case USB_GADGET_XPORT_BAM2BAM_IPA: if (remote_wakeup_allowed) { gbam_suspend(&dev->port, port_num, dxport); } else { /* * When remote wakeup is disabled, IPA is disconnected * because it cannot send new data until the USB bus is * resumed. Endpoint descriptors info is saved before it * gets reset by the BAM disconnect API. This lets us * restore this info when the USB bus is resumed. */ dev->in_ep_desc_backup = dev->port.in->desc; dev->out_ep_desc_backup = dev->port.out->desc; pr_debug("in_ep_desc_bkup = %p, out_ep_desc_bkup = %p", dev->in_ep_desc_backup, dev->out_ep_desc_backup); pr_debug("%s(): Disconnecting\n", __func__); gport_rmnet_disconnect(dev); } break; case USB_GADGET_XPORT_HSIC: break; case USB_GADGET_XPORT_HSUART: break; case USB_GADGET_XPORT_ETHER: break; case USB_GADGET_XPORT_NONE: break; default: pr_err("%s: Un-supported transport: %s\n", __func__, xport_to_str(dxport)); } } static void frmnet_resume(struct usb_function *f) { struct f_rmnet *dev = func_to_rmnet(f); unsigned port_num; enum transport_type dxport = rmnet_ports[dev->port_num].data_xport; int ret; bool remote_wakeup_allowed; if (f->config->cdev->gadget->speed == USB_SPEED_SUPER) remote_wakeup_allowed = f->func_wakeup_allowed; else remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup; pr_debug("%s: data xport: %s dev: %p portno: %d remote_wakeup: %d\n", __func__, xport_to_str(dxport), dev, dev->port_num, remote_wakeup_allowed); port_num = rmnet_ports[dev->port_num].data_xport_num; switch (dxport) { case USB_GADGET_XPORT_BAM: break; case USB_GADGET_XPORT_BAM2BAM: case USB_GADGET_XPORT_BAM2BAM_IPA: if (remote_wakeup_allowed) { gbam_resume(&dev->port, port_num, dxport); } else { dev->port.in->desc = dev->in_ep_desc_backup; dev->port.out->desc = dev->out_ep_desc_backup; pr_debug("%s(): Connecting\n", __func__); ret = gport_rmnet_connect(dev, dev->ifc_id); if (ret) { pr_err("%s: gport_rmnet_connect failed: err:%d\n", __func__, ret); } } break; case USB_GADGET_XPORT_HSIC: break; case USB_GADGET_XPORT_HSUART: break; case USB_GADGET_XPORT_ETHER: break; case USB_GADGET_XPORT_NONE: break; default: pr_err("%s: Un-supported transport: %s\n", __func__, xport_to_str(dxport)); } } static void frmnet_disable(struct usb_function *f) { struct f_rmnet *dev = func_to_rmnet(f); enum transport_type dxport = rmnet_ports[dev->port_num].data_xport; struct usb_composite_dev *cdev = dev->cdev; pr_debug("%s: port#%d\n", __func__, dev->port_num); usb_ep_disable(dev->notify); dev->notify->driver_data = NULL; atomic_set(&dev->online, 0); frmnet_purge_responses(dev); if (dxport == USB_GADGET_XPORT_BAM2BAM_IPA && gadget_is_dwc3(cdev->gadget)) { msm_ep_unconfig(dev->port.out); msm_ep_unconfig(dev->port.in); } gport_rmnet_disconnect(dev); } static int frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_rmnet *dev = func_to_rmnet(f); enum transport_type dxport = rmnet_ports[dev->port_num].data_xport; struct usb_composite_dev *cdev = dev->cdev; int ret; struct list_head *cpkt; pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num); if (dev->notify->driver_data) { pr_debug("%s: reset port:%d\n", __func__, dev->port_num); usb_ep_disable(dev->notify); } ret = config_ep_by_speed(cdev->gadget, f, dev->notify); if (ret) { dev->notify->desc = NULL; ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n", dev->notify->name, ret); return ret; } ret = usb_ep_enable(dev->notify); if (ret) { pr_err("%s: usb ep#%s enable failed, err#%d\n", __func__, dev->notify->name, ret); return ret; } dev->notify->driver_data = dev; if (!dev->port.in->desc || !dev->port.out->desc) { if (config_ep_by_speed(cdev->gadget, f, dev->port.in) || config_ep_by_speed(cdev->gadget, f, dev->port.out)) { dev->port.in->desc = NULL; dev->port.out->desc = NULL; return -EINVAL; } dev->port.gadget = dev->cdev->gadget; ret = gport_rmnet_connect(dev, intf); } if (dxport == USB_GADGET_XPORT_BAM2BAM_IPA && gadget_is_dwc3(cdev->gadget)) { if (msm_ep_config(dev->port.in) || msm_ep_config(dev->port.out)) { pr_err("%s: msm_ep_config failed\n", __func__); return -EINVAL; } } else pr_debug("Rmnet is being used with non DWC3 core\n"); atomic_set(&dev->online, 1); /* In case notifications were aborted, but there are pending control packets in the response queue, re-add the notifications */ list_for_each(cpkt, &dev->cpkt_resp_q) frmnet_ctrl_response_available(dev); return ret; } static void frmnet_ctrl_response_available(struct f_rmnet *dev) { struct usb_request *req = dev->notify_req; struct usb_cdc_notification *event; unsigned long flags; int ret; struct rmnet_ctrl_pkt *cpkt; pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num); spin_lock_irqsave(&dev->lock, flags); if (!atomic_read(&dev->online) || !req || !req->buf) { spin_unlock_irqrestore(&dev->lock, flags); return; } if (++dev->notify_count != 1) { spin_unlock_irqrestore(&dev->lock, flags); return; } event = req->buf; event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; event->wValue = cpu_to_le16(0); event->wIndex = cpu_to_le16(dev->ifc_id); event->wLength = cpu_to_le16(0); spin_unlock_irqrestore(&dev->lock, flags); ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC); if (ret) { spin_lock_irqsave(&dev->lock, flags); if (!list_empty(&dev->cpkt_resp_q)) { if (dev->notify_count > 0) dev->notify_count--; else { pr_debug("%s: Invalid notify_count=%lu to decrement\n", __func__, dev->notify_count); spin_unlock_irqrestore(&dev->lock, flags); return; } cpkt = list_first_entry(&dev->cpkt_resp_q, struct rmnet_ctrl_pkt, list); list_del(&cpkt->list); rmnet_free_ctrl_pkt(cpkt); } spin_unlock_irqrestore(&dev->lock, flags); pr_debug("ep enqueue error %d\n", ret); } } static void frmnet_connect(struct grmnet *gr) { struct f_rmnet *dev; if (!gr) { pr_err("%s: Invalid grmnet:%p\n", __func__, gr); return; } dev = port_to_rmnet(gr); atomic_set(&dev->ctrl_online, 1); } static void frmnet_disconnect(struct grmnet *gr) { struct f_rmnet *dev; struct usb_cdc_notification *event; int status; if (!gr) { pr_err("%s: Invalid grmnet:%p\n", __func__, gr); return; } dev = port_to_rmnet(gr); atomic_set(&dev->ctrl_online, 0); if (!atomic_read(&dev->online)) { pr_debug("%s: nothing to do\n", __func__); return; } usb_ep_fifo_flush(dev->notify); event = dev->notify_req->buf; event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; event->wValue = cpu_to_le16(0); event->wIndex = cpu_to_le16(dev->ifc_id); event->wLength = cpu_to_le16(0); status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC); if (status < 0) { if (!atomic_read(&dev->online)) return; pr_err("%s: rmnet notify ep enqueue error %d\n", __func__, status); } frmnet_purge_responses(dev); } static int frmnet_send_cpkt_response(void *gr, void *buf, size_t len) { struct f_rmnet *dev; struct rmnet_ctrl_pkt *cpkt; unsigned long flags; if (!gr || !buf) { pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n", __func__, gr, buf); return -ENODEV; } cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC); if (IS_ERR(cpkt)) { pr_err("%s: Unable to allocate ctrl pkt\n", __func__); return -ENOMEM; } memcpy(cpkt->buf, buf, len); cpkt->len = len; dev = port_to_rmnet(gr); pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num); if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) { rmnet_free_ctrl_pkt(cpkt); return 0; } spin_lock_irqsave(&dev->lock, flags); list_add_tail(&cpkt->list, &dev->cpkt_resp_q); spin_unlock_irqrestore(&dev->lock, flags); frmnet_ctrl_response_available(dev); return 0; } static void frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req) { struct f_rmnet *dev = req->context; struct usb_composite_dev *cdev; unsigned port_num; if (!dev) { pr_err("%s: rmnet dev is null\n", __func__); return; } pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num); cdev = dev->cdev; if (dev->port.send_encap_cmd) { port_num = rmnet_ports[dev->port_num].ctrl_xport_num; dev->port.send_encap_cmd(port_num, req->buf, req->actual); } } static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req) { struct f_rmnet *dev = req->context; int status = req->status; unsigned long flags; struct rmnet_ctrl_pkt *cpkt; pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num); switch (status) { case -ECONNRESET: case -ESHUTDOWN: /* connection gone */ spin_lock_irqsave(&dev->lock, flags); dev->notify_count = 0; spin_unlock_irqrestore(&dev->lock, flags); break; default: pr_err("rmnet notify ep error %d\n", status); /* FALLTHROUGH */ case 0: if (!atomic_read(&dev->ctrl_online)) break; spin_lock_irqsave(&dev->lock, flags); if (dev->notify_count > 0) { dev->notify_count--; if (dev->notify_count == 0) { spin_unlock_irqrestore(&dev->lock, flags); break; } } else { pr_debug("%s: Invalid notify_count=%lu to decrement\n", __func__, dev->notify_count); spin_unlock_irqrestore(&dev->lock, flags); break; } spin_unlock_irqrestore(&dev->lock, flags); status = usb_ep_queue(dev->notify, req, GFP_ATOMIC); if (status) { spin_lock_irqsave(&dev->lock, flags); if (!list_empty(&dev->cpkt_resp_q)) { if (dev->notify_count > 0) dev->notify_count--; else { pr_err("%s: Invalid notify_count=%lu to decrement\n", __func__, dev->notify_count); spin_unlock_irqrestore(&dev->lock, flags); break; } cpkt = list_first_entry(&dev->cpkt_resp_q, struct rmnet_ctrl_pkt, list); list_del(&cpkt->list); rmnet_free_ctrl_pkt(cpkt); } spin_unlock_irqrestore(&dev->lock, flags); pr_debug("ep enqueue error %d\n", status); } break; } } static int frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_rmnet *dev = func_to_rmnet(f); struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req = cdev->req; unsigned port_num; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); int ret = -EOPNOTSUPP; pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num); if (!atomic_read(&dev->online)) { pr_warning("%s: usb cable is not connected\n", __func__); return -ENOTCONN; } switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SEND_ENCAPSULATED_COMMAND: pr_debug("%s: USB_CDC_SEND_ENCAPSULATED_COMMAND\n" , __func__); ret = w_length; req->complete = frmnet_cmd_complete; req->context = dev; break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_ENCAPSULATED_RESPONSE: pr_debug("%s: USB_CDC_GET_ENCAPSULATED_RESPONSE\n", __func__); if (w_value) { pr_err("%s: invalid w_value = %04x\n", __func__ , w_value); goto invalid; } else { unsigned len; struct rmnet_ctrl_pkt *cpkt; spin_lock(&dev->lock); if (list_empty(&dev->cpkt_resp_q)) { pr_err("ctrl resp queue empty " " req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); ret = 0; spin_unlock(&dev->lock); goto invalid; } cpkt = list_first_entry(&dev->cpkt_resp_q, struct rmnet_ctrl_pkt, list); list_del(&cpkt->list); spin_unlock(&dev->lock); len = min_t(unsigned, w_length, cpkt->len); memcpy(req->buf, cpkt->buf, len); ret = len; rmnet_free_ctrl_pkt(cpkt); } break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_REQ_SET_CONTROL_LINE_STATE: pr_debug("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE: DTR:%d\n", __func__, w_value & ACM_CTRL_DTR ? 1 : 0); if (dev->port.notify_modem) { port_num = rmnet_ports[dev->port_num].ctrl_xport_num; dev->port.notify_modem(&dev->port, port_num, w_value); } ret = 0; break; default: invalid: DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (ret >= 0) { VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = (ret < w_length); req->length = ret; ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (ret < 0) ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret); } return ret; } static int frmnet_bind(struct usb_configuration *c, struct usb_function *f) { struct f_rmnet *dev = func_to_rmnet(f); struct usb_ep *ep; struct usb_composite_dev *cdev = c->cdev; int ret = -ENODEV; pr_debug("%s: start binding\n", __func__); dev->ifc_id = usb_interface_id(c, f); if (dev->ifc_id < 0) { pr_err("%s: unable to allocate ifc id, err:%d\n", __func__, dev->ifc_id); return dev->ifc_id; } rmnet_interface_desc.bInterfaceNumber = dev->ifc_id; ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc); if (!ep) { pr_err("%s: usb epin autoconfig failed\n", __func__); return -ENODEV; } dev->port.in = ep; /* Update same for u_ether which uses gether port struct */ dev->gether_port.in_ep = ep; ep->driver_data = cdev; ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc); if (!ep) { pr_err("%s: usb epout autoconfig failed\n", __func__); ret = -ENODEV; goto ep_auto_out_fail; } dev->port.out = ep; /* Update same for u_ether which uses gether port struct */ dev->gether_port.out_ep = ep; ep->driver_data = cdev; ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc); if (!ep) { pr_err("%s: usb epnotify autoconfig failed\n", __func__); ret = -ENODEV; goto ep_auto_notify_fail; } dev->notify = ep; ep->driver_data = cdev; dev->notify_req = frmnet_alloc_req(ep, sizeof(struct usb_cdc_notification), GFP_KERNEL); if (IS_ERR(dev->notify_req)) { pr_err("%s: unable to allocate memory for notify req\n", __func__); ret = -ENOMEM; goto ep_notify_alloc_fail; } dev->notify_req->complete = frmnet_notify_complete; dev->notify_req->context = dev; ret = -ENOMEM; f->fs_descriptors = usb_copy_descriptors(rmnet_fs_function); if (!f->fs_descriptors) { pr_err("%s: no descriptors,usb_copy descriptors(fs)failed\n", __func__); goto fail; } if (gadget_is_dualspeed(cdev->gadget)) { rmnet_hs_in_desc.bEndpointAddress = rmnet_fs_in_desc.bEndpointAddress; rmnet_hs_out_desc.bEndpointAddress = rmnet_fs_out_desc.bEndpointAddress; rmnet_hs_notify_desc.bEndpointAddress = rmnet_fs_notify_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function); if (!f->hs_descriptors) { pr_err("%s: no hs_descriptors,usb_copy descriptors(hs)failed\n", __func__); goto fail; } } if (gadget_is_superspeed(cdev->gadget)) { rmnet_ss_in_desc.bEndpointAddress = rmnet_fs_in_desc.bEndpointAddress; rmnet_ss_out_desc.bEndpointAddress = rmnet_fs_out_desc.bEndpointAddress; rmnet_ss_notify_desc.bEndpointAddress = rmnet_fs_notify_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->ss_descriptors = usb_copy_descriptors(rmnet_ss_function); if (!f->ss_descriptors) { pr_err("%s: no ss_descriptors,usb_copy descriptors(ss)failed\n", __func__); goto fail; } } pr_debug("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n", __func__, dev->port_num, gadget_is_dualspeed(cdev->gadget) ? "dual" : "full", dev->port.in->name, dev->port.out->name); return 0; fail: if (f->ss_descriptors) usb_free_descriptors(f->ss_descriptors); if (f->hs_descriptors) usb_free_descriptors(f->hs_descriptors); if (f->fs_descriptors) usb_free_descriptors(f->fs_descriptors); if (dev->notify_req) frmnet_free_req(dev->notify, dev->notify_req); ep_notify_alloc_fail: dev->notify->driver_data = NULL; dev->notify = NULL; ep_auto_notify_fail: dev->port.out->driver_data = NULL; dev->port.out = NULL; ep_auto_out_fail: dev->port.in->driver_data = NULL; dev->port.in = NULL; return ret; } static int frmnet_bind_config(struct usb_configuration *c, unsigned portno) { int status; struct f_rmnet *dev; struct usb_function *f; unsigned long flags; pr_debug("%s: usb config:%p\n", __func__, c); if (portno >= nr_rmnet_ports) { pr_err("%s: supporting ports#%u port_id:%u\n", __func__, nr_rmnet_ports, portno); return -ENODEV; } dev = rmnet_ports[portno].port; if (rmnet_ports[portno].data_xport == USB_GADGET_XPORT_ETHER) { struct eth_dev *edev = gether_setup_name(c->cdev->gadget, NULL, "usb_rmnet"); if (IS_ERR(edev)) { pr_err("%s: gether_setup failed\n", __func__); return PTR_ERR(edev); } dev->gether_port.ioport = edev; } if (rmnet_string_defs[0].id == 0) { status = usb_string_id(c->cdev); if (status < 0) { pr_err("%s: failed to get string id, err:%d\n", __func__, status); return status; } rmnet_string_defs[0].id = status; } spin_lock_irqsave(&dev->lock, flags); dev->cdev = c->cdev; f = &dev->gether_port.func; f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno); spin_unlock_irqrestore(&dev->lock, flags); if (!f->name) { pr_err("%s: cannot allocate memory for name\n", __func__); return -ENOMEM; } f->strings = rmnet_strings; f->bind = frmnet_bind; f->unbind = frmnet_unbind; f->disable = frmnet_disable; f->set_alt = frmnet_set_alt; f->setup = frmnet_setup; f->suspend = frmnet_suspend; f->resume = frmnet_resume; dev->port.send_cpkt_response = frmnet_send_cpkt_response; dev->port.disconnect = frmnet_disconnect; dev->port.connect = frmnet_connect; dev->gether_port.cdc_filter = 0; status = usb_add_function(c, f); if (status) { pr_err("%s: usb add function failed: %d\n", __func__, status); kfree(f->name); return status; } pr_debug("%s: complete\n", __func__); return status; } static void frmnet_unbind_config(void) { int i; for (i = 0; i < nr_rmnet_ports; i++) if (rmnet_ports[i].data_xport == USB_GADGET_XPORT_ETHER) { gether_cleanup(rmnet_ports[i].port->gether_port.ioport); rmnet_ports[i].port->gether_port.ioport = NULL; } } static void frmnet_cleanup(void) { int i; for (i = 0; i < nr_rmnet_ports; i++) kfree(rmnet_ports[i].port); gbam_cleanup(); nr_rmnet_ports = 0; no_ctrl_smd_ports = 0; no_ctrl_qti_ports = 0; no_rmnet_data_bam_ports = 0; no_data_bam2bam_ports = 0; no_ctrl_hsic_ports = 0; no_data_hsic_ports = 0; no_ctrl_hsuart_ports = 0; no_data_hsuart_ports = 0; } static int frmnet_init_port(const char *ctrl_name, const char *data_name, const char *port_name) { struct f_rmnet *dev; struct rmnet_ports *rmnet_port; int ret; int i; if (nr_rmnet_ports >= NR_RMNET_PORTS) { pr_err("%s: Max-%d instances supported\n", __func__, NR_RMNET_PORTS); return -EINVAL; } pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n", __func__, nr_rmnet_ports, ctrl_name, data_name); dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL); if (!dev) { pr_err("%s: Unable to allocate rmnet device\n", __func__); return -ENOMEM; } dev->port_num = nr_rmnet_ports; spin_lock_init(&dev->lock); INIT_LIST_HEAD(&dev->cpkt_resp_q); rmnet_port = &rmnet_ports[nr_rmnet_ports]; rmnet_port->port = dev; rmnet_port->port_num = nr_rmnet_ports; rmnet_port->ctrl_xport = str_to_xport(ctrl_name); rmnet_port->data_xport = str_to_xport(data_name); switch (rmnet_port->ctrl_xport) { case USB_GADGET_XPORT_SMD: rmnet_port->ctrl_xport_num = no_ctrl_smd_ports; no_ctrl_smd_ports++; break; case USB_GADGET_XPORT_QTI: rmnet_port->ctrl_xport_num = no_ctrl_qti_ports; no_ctrl_qti_ports++; break; case USB_GADGET_XPORT_HSIC: ghsic_ctrl_set_port_name(port_name, ctrl_name); rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports; no_ctrl_hsic_ports++; break; case USB_GADGET_XPORT_HSUART: rmnet_port->ctrl_xport_num = no_ctrl_hsuart_ports; no_ctrl_hsuart_ports++; break; case USB_GADGET_XPORT_NONE: break; default: pr_err("%s: Un-supported transport: %u\n", __func__, rmnet_port->ctrl_xport); ret = -ENODEV; goto fail_probe; } switch (rmnet_port->data_xport) { case USB_GADGET_XPORT_BAM: rmnet_port->data_xport_num = no_rmnet_data_bam_ports; no_rmnet_data_bam_ports++; break; case USB_GADGET_XPORT_BAM2BAM: case USB_GADGET_XPORT_BAM2BAM_IPA: rmnet_port->data_xport_num = no_data_bam2bam_ports; no_data_bam2bam_ports++; break; case USB_GADGET_XPORT_HSIC: ghsic_data_set_port_name(port_name, data_name); rmnet_port->data_xport_num = no_data_hsic_ports; no_data_hsic_ports++; break; case USB_GADGET_XPORT_HSUART: rmnet_port->data_xport_num = no_data_hsuart_ports; no_data_hsuart_ports++; break; case USB_GADGET_XPORT_ETHER: case USB_GADGET_XPORT_NONE: break; default: pr_err("%s: Un-supported transport: %u\n", __func__, rmnet_port->data_xport); ret = -ENODEV; goto fail_probe; } nr_rmnet_ports++; return 0; fail_probe: for (i = 0; i < nr_rmnet_ports; i++) kfree(rmnet_ports[i].port); nr_rmnet_ports = 0; no_ctrl_smd_ports = 0; no_ctrl_qti_ports = 0; no_rmnet_data_bam_ports = 0; no_ctrl_hsic_ports = 0; no_data_hsic_ports = 0; no_ctrl_hsuart_ports = 0; no_data_hsuart_ports = 0; return ret; }
gpl-2.0
progmanos/WX_435_Kernel-Tickerguy
drivers/xen/sys-hypervisor.c
473
9301
/* * copyright (c) 2006 IBM Corporation * Authored by: Mike D. Day <ncmike@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/kobject.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <xen/xenbus.h> #include <xen/interface/xen.h> #include <xen/interface/version.h> #define HYPERVISOR_ATTR_RO(_name) \ static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) #define HYPERVISOR_ATTR_RW(_name) \ static struct hyp_sysfs_attr _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) struct hyp_sysfs_attr { struct attribute attr; ssize_t (*show)(struct hyp_sysfs_attr *, char *); ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); void *hyp_attr_data; }; static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer) { return sprintf(buffer, "xen\n"); } HYPERVISOR_ATTR_RO(type); static int __init xen_sysfs_type_init(void) { return sysfs_create_file(hypervisor_kobj, &type_attr.attr); } static void xen_sysfs_type_destroy(void) { sysfs_remove_file(hypervisor_kobj, &type_attr.attr); } /* xen version attributes */ static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer) { int version = HYPERVISOR_xen_version(XENVER_version, NULL); if (version) return sprintf(buffer, "%d\n", version >> 16); return -ENODEV; } HYPERVISOR_ATTR_RO(major); static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer) { int version = HYPERVISOR_xen_version(XENVER_version, NULL); if (version) return sprintf(buffer, "%d\n", version & 0xff); return -ENODEV; } HYPERVISOR_ATTR_RO(minor); static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; char *extra; extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL); if (extra) { ret = HYPERVISOR_xen_version(XENVER_extraversion, extra); if (!ret) ret = sprintf(buffer, "%s\n", extra); kfree(extra); } return ret; } HYPERVISOR_ATTR_RO(extra); static struct attribute *version_attrs[] = { &major_attr.attr, &minor_attr.attr, &extra_attr.attr, NULL }; static struct attribute_group version_group = { .name = "version", .attrs = version_attrs, }; static int __init xen_sysfs_version_init(void) { return sysfs_create_group(hypervisor_kobj, &version_group); } static void xen_sysfs_version_destroy(void) { sysfs_remove_group(hypervisor_kobj, &version_group); } /* UUID */ static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer) { char *vm, *val; int ret; extern int xenstored_ready; if (!xenstored_ready) return -EBUSY; vm = xenbus_read(XBT_NIL, "vm", "", NULL); if (IS_ERR(vm)) return PTR_ERR(vm); val = xenbus_read(XBT_NIL, vm, "uuid", NULL); kfree(vm); if (IS_ERR(val)) return PTR_ERR(val); ret = sprintf(buffer, "%s\n", val); kfree(val); return ret; } HYPERVISOR_ATTR_RO(uuid); static int __init xen_sysfs_uuid_init(void) { return sysfs_create_file(hypervisor_kobj, &uuid_attr.attr); } static void xen_sysfs_uuid_destroy(void) { sysfs_remove_file(hypervisor_kobj, &uuid_attr.attr); } /* xen compilation attributes */ static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; struct xen_compile_info *info; info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL); if (info) { ret = HYPERVISOR_xen_version(XENVER_compile_info, info); if (!ret) ret = sprintf(buffer, "%s\n", info->compiler); kfree(info); } return ret; } HYPERVISOR_ATTR_RO(compiler); static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; struct xen_compile_info *info; info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL); if (info) { ret = HYPERVISOR_xen_version(XENVER_compile_info, info); if (!ret) ret = sprintf(buffer, "%s\n", info->compile_by); kfree(info); } return ret; } HYPERVISOR_ATTR_RO(compiled_by); static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; struct xen_compile_info *info; info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL); if (info) { ret = HYPERVISOR_xen_version(XENVER_compile_info, info); if (!ret) ret = sprintf(buffer, "%s\n", info->compile_date); kfree(info); } return ret; } HYPERVISOR_ATTR_RO(compile_date); static struct attribute *xen_compile_attrs[] = { &compiler_attr.attr, &compiled_by_attr.attr, &compile_date_attr.attr, NULL }; static struct attribute_group xen_compilation_group = { .name = "compilation", .attrs = xen_compile_attrs, }; int __init static xen_compilation_init(void) { return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); } static void xen_compilation_destroy(void) { sysfs_remove_group(hypervisor_kobj, &xen_compilation_group); } /* xen properties info */ static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; char *caps; caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL); if (caps) { ret = HYPERVISOR_xen_version(XENVER_capabilities, caps); if (!ret) ret = sprintf(buffer, "%s\n", caps); kfree(caps); } return ret; } HYPERVISOR_ATTR_RO(capabilities); static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; char *cset; cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL); if (cset) { ret = HYPERVISOR_xen_version(XENVER_changeset, cset); if (!ret) ret = sprintf(buffer, "%s\n", cset); kfree(cset); } return ret; } HYPERVISOR_ATTR_RO(changeset); static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret = -ENOMEM; struct xen_platform_parameters *parms; parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL); if (parms) { ret = HYPERVISOR_xen_version(XENVER_platform_parameters, parms); if (!ret) ret = sprintf(buffer, "%lx\n", parms->virt_start); kfree(parms); } return ret; } HYPERVISOR_ATTR_RO(virtual_start); static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer) { int ret; ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL); if (ret > 0) ret = sprintf(buffer, "%x\n", ret); return ret; } HYPERVISOR_ATTR_RO(pagesize); static ssize_t xen_feature_show(int index, char *buffer) { ssize_t ret; struct xen_feature_info info; info.submap_idx = index; ret = HYPERVISOR_xen_version(XENVER_get_features, &info); if (!ret) ret = sprintf(buffer, "%08x", info.submap); return ret; } static ssize_t features_show(struct hyp_sysfs_attr *attr, char *buffer) { ssize_t len; int i; len = 0; for (i = XENFEAT_NR_SUBMAPS-1; i >= 0; i--) { int ret = xen_feature_show(i, buffer + len); if (ret < 0) { if (len == 0) len = ret; break; } len += ret; } if (len > 0) buffer[len++] = '\n'; return len; } HYPERVISOR_ATTR_RO(features); static struct attribute *xen_properties_attrs[] = { &capabilities_attr.attr, &changeset_attr.attr, &virtual_start_attr.attr, &pagesize_attr.attr, &features_attr.attr, NULL }; static struct attribute_group xen_properties_group = { .name = "properties", .attrs = xen_properties_attrs, }; static int __init xen_properties_init(void) { return sysfs_create_group(hypervisor_kobj, &xen_properties_group); } static void xen_properties_destroy(void) { sysfs_remove_group(hypervisor_kobj, &xen_properties_group); } static int __init hyper_sysfs_init(void) { int ret; if (!xen_domain()) return -ENODEV; ret = xen_sysfs_type_init(); if (ret) goto out; ret = xen_sysfs_version_init(); if (ret) goto version_out; ret = xen_compilation_init(); if (ret) goto comp_out; ret = xen_sysfs_uuid_init(); if (ret) goto uuid_out; ret = xen_properties_init(); if (ret) goto prop_out; goto out; prop_out: xen_sysfs_uuid_destroy(); uuid_out: xen_compilation_destroy(); comp_out: xen_sysfs_version_destroy(); version_out: xen_sysfs_type_destroy(); out: return ret; } static void __exit hyper_sysfs_exit(void) { xen_properties_destroy(); xen_compilation_destroy(); xen_sysfs_uuid_destroy(); xen_sysfs_version_destroy(); xen_sysfs_type_destroy(); } module_init(hyper_sysfs_init); module_exit(hyper_sysfs_exit); static ssize_t hyp_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct hyp_sysfs_attr *hyp_attr; hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr); if (hyp_attr->show) return hyp_attr->show(hyp_attr, buffer); return 0; } static ssize_t hyp_sysfs_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t len) { struct hyp_sysfs_attr *hyp_attr; hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr); if (hyp_attr->store) return hyp_attr->store(hyp_attr, buffer, len); return 0; } static struct sysfs_ops hyp_sysfs_ops = { .show = hyp_sysfs_show, .store = hyp_sysfs_store, }; static struct kobj_type hyp_sysfs_kobj_type = { .sysfs_ops = &hyp_sysfs_ops, }; static int __init hypervisor_subsys_init(void) { if (!xen_domain()) return -ENODEV; hypervisor_kobj->ktype = &hyp_sysfs_kobj_type; return 0; } device_initcall(hypervisor_subsys_init);
gpl-2.0
alexax66/kernel_samsung_a3xelte
crypto/crypto_null.c
729
4493
/* * Cryptographic API. * * Null algorithms, aka Much Ado About Nothing. * * These are needed for IPsec, and may be useful in general for * testing & debugging. * * The null cipher is compliant with RFC2410. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/string.h> #define NULL_KEY_SIZE 0 #define NULL_BLOCK_SIZE 1 #define NULL_DIGEST_SIZE 0 #define NULL_IV_SIZE 0 static int null_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { if (slen > *dlen) return -EINVAL; memcpy(dst, src, slen); *dlen = slen; return 0; } static int null_init(struct shash_desc *desc) { return 0; } static int null_update(struct shash_desc *desc, const u8 *data, unsigned int len) { return 0; } static int null_final(struct shash_desc *desc, u8 *out) { return 0; } static int null_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return 0; } static int null_hash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { return 0; } static int null_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { return 0; } static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { memcpy(dst, src, NULL_BLOCK_SIZE); } static int skcipher_null_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while (walk.nbytes) { if (walk.src.virt.addr != walk.dst.virt.addr) memcpy(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); err = blkcipher_walk_done(desc, &walk, 0); } return err; } static struct shash_alg digest_null = { .digestsize = NULL_DIGEST_SIZE, .setkey = null_hash_setkey, .init = null_init, .update = null_update, .finup = null_digest, .digest = null_digest, .final = null_final, .base = { .cra_name = "digest_null", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = NULL_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static struct crypto_alg null_algs[3] = { { .cra_name = "cipher_null", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = NULL_BLOCK_SIZE, .cra_ctxsize = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = NULL_KEY_SIZE, .cia_max_keysize = NULL_KEY_SIZE, .cia_setkey = null_setkey, .cia_encrypt = null_crypt, .cia_decrypt = null_crypt } } }, { .cra_name = "ecb(cipher_null)", .cra_driver_name = "ecb-cipher_null", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = NULL_BLOCK_SIZE, .cra_type = &crypto_blkcipher_type, .cra_ctxsize = 0, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = NULL_KEY_SIZE, .max_keysize = NULL_KEY_SIZE, .ivsize = NULL_IV_SIZE, .setkey = null_setkey, .encrypt = skcipher_null_crypt, .decrypt = skcipher_null_crypt } } }, { .cra_name = "compress_null", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_blocksize = NULL_BLOCK_SIZE, .cra_ctxsize = 0, .cra_module = THIS_MODULE, .cra_u = { .compress = { .coa_compress = null_compress, .coa_decompress = null_compress } } } }; MODULE_ALIAS_CRYPTO("compress_null"); MODULE_ALIAS_CRYPTO("digest_null"); MODULE_ALIAS_CRYPTO("cipher_null"); static int __init crypto_null_mod_init(void) { int ret = 0; ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs)); if (ret < 0) goto out; ret = crypto_register_shash(&digest_null); if (ret < 0) goto out_unregister_algs; return 0; out_unregister_algs: crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); out: return ret; } static void __exit crypto_null_mod_fini(void) { crypto_unregister_shash(&digest_null); crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); } module_init(crypto_null_mod_init); module_exit(crypto_null_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Null Cryptographic Algorithms");
gpl-2.0
vmayoral/ubuntu-utopic
arch/cris/arch-v32/kernel/irq.c
985
12698
/* * Copyright (C) 2003, Axis Communications AB. */ #include <asm/irq.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/profile.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/threads.h> #include <linux/spinlock.h> #include <linux/kernel_stat.h> #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <hwregs/intr_vect.h> #include <hwregs/intr_vect_defs.h> #define CPU_FIXED -1 /* IRQ masks (refer to comment for crisv32_do_multiple) */ #if TIMER0_INTR_VECT - FIRST_IRQ < 32 #define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ)) #undef TIMER_VECT1 #else #define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ - 32)) #define TIMER_VECT1 #endif #ifdef CONFIG_ETRAX_KGDB #if defined(CONFIG_ETRAX_KGDB_PORT0) #define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ)) #elif defined(CONFIG_ETRAX_KGDB_PORT1) #define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ)) #elif defined(CONFIG_ETRAX_KGB_PORT2) #define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ)) #elif defined(CONFIG_ETRAX_KGDB_PORT3) #define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ)) #endif #endif DEFINE_SPINLOCK(irq_lock); struct cris_irq_allocation { int cpu; /* The CPU to which the IRQ is currently allocated. */ cpumask_t mask; /* The CPUs to which the IRQ may be allocated. */ }; struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] = { [0 ... NR_REAL_IRQS - 1] = {0, CPU_MASK_ALL} }; static unsigned long irq_regs[NR_CPUS] = { regi_irq, #ifdef CONFIG_SMP regi_irq2, #endif }; #if NR_REAL_IRQS > 32 #define NBR_REGS 2 #else #define NBR_REGS 1 #endif unsigned long cpu_irq_counters[NR_CPUS]; unsigned long irq_counters[NR_REAL_IRQS]; /* From irq.c. */ extern void weird_irq(void); /* From entry.S. */ extern void system_call(void); extern void nmi_interrupt(void); extern void multiple_interrupt(void); extern void gdb_handle_exception(void); extern void i_mmu_refill(void); extern void i_mmu_invalid(void); extern void i_mmu_access(void); extern void i_mmu_execute(void); extern void d_mmu_refill(void); extern void d_mmu_invalid(void); extern void d_mmu_access(void); extern void d_mmu_write(void); /* From kgdb.c. */ extern void kgdb_init(void); extern void breakpoint(void); /* From traps.c. */ extern void breakh_BUG(void); /* * Build the IRQ handler stubs using macros from irq.h. */ #ifdef CONFIG_CRIS_MACH_ARTPEC3 BUILD_TIMER_IRQ(0x31, 0) #else BUILD_IRQ(0x31) #endif BUILD_IRQ(0x32) BUILD_IRQ(0x33) BUILD_IRQ(0x34) BUILD_IRQ(0x35) BUILD_IRQ(0x36) BUILD_IRQ(0x37) BUILD_IRQ(0x38) BUILD_IRQ(0x39) BUILD_IRQ(0x3a) BUILD_IRQ(0x3b) BUILD_IRQ(0x3c) BUILD_IRQ(0x3d) BUILD_IRQ(0x3e) BUILD_IRQ(0x3f) BUILD_IRQ(0x40) BUILD_IRQ(0x41) BUILD_IRQ(0x42) BUILD_IRQ(0x43) BUILD_IRQ(0x44) BUILD_IRQ(0x45) BUILD_IRQ(0x46) BUILD_IRQ(0x47) BUILD_IRQ(0x48) BUILD_IRQ(0x49) BUILD_IRQ(0x4a) #ifdef CONFIG_ETRAXFS BUILD_TIMER_IRQ(0x4b, 0) #else BUILD_IRQ(0x4b) #endif BUILD_IRQ(0x4c) BUILD_IRQ(0x4d) BUILD_IRQ(0x4e) BUILD_IRQ(0x4f) BUILD_IRQ(0x50) #if MACH_IRQS > 32 BUILD_IRQ(0x51) BUILD_IRQ(0x52) BUILD_IRQ(0x53) BUILD_IRQ(0x54) BUILD_IRQ(0x55) BUILD_IRQ(0x56) BUILD_IRQ(0x57) BUILD_IRQ(0x58) BUILD_IRQ(0x59) BUILD_IRQ(0x5a) BUILD_IRQ(0x5b) BUILD_IRQ(0x5c) BUILD_IRQ(0x5d) BUILD_IRQ(0x5e) BUILD_IRQ(0x5f) BUILD_IRQ(0x60) BUILD_IRQ(0x61) BUILD_IRQ(0x62) BUILD_IRQ(0x63) BUILD_IRQ(0x64) BUILD_IRQ(0x65) BUILD_IRQ(0x66) BUILD_IRQ(0x67) BUILD_IRQ(0x68) BUILD_IRQ(0x69) BUILD_IRQ(0x6a) BUILD_IRQ(0x6b) BUILD_IRQ(0x6c) BUILD_IRQ(0x6d) BUILD_IRQ(0x6e) BUILD_IRQ(0x6f) BUILD_IRQ(0x70) #endif /* Pointers to the low-level handlers. */ static void (*interrupt[MACH_IRQS])(void) = { IRQ0x31_interrupt, IRQ0x32_interrupt, IRQ0x33_interrupt, IRQ0x34_interrupt, IRQ0x35_interrupt, IRQ0x36_interrupt, IRQ0x37_interrupt, IRQ0x38_interrupt, IRQ0x39_interrupt, IRQ0x3a_interrupt, IRQ0x3b_interrupt, IRQ0x3c_interrupt, IRQ0x3d_interrupt, IRQ0x3e_interrupt, IRQ0x3f_interrupt, IRQ0x40_interrupt, IRQ0x41_interrupt, IRQ0x42_interrupt, IRQ0x43_interrupt, IRQ0x44_interrupt, IRQ0x45_interrupt, IRQ0x46_interrupt, IRQ0x47_interrupt, IRQ0x48_interrupt, IRQ0x49_interrupt, IRQ0x4a_interrupt, IRQ0x4b_interrupt, IRQ0x4c_interrupt, IRQ0x4d_interrupt, IRQ0x4e_interrupt, IRQ0x4f_interrupt, IRQ0x50_interrupt, #if MACH_IRQS > 32 IRQ0x51_interrupt, IRQ0x52_interrupt, IRQ0x53_interrupt, IRQ0x54_interrupt, IRQ0x55_interrupt, IRQ0x56_interrupt, IRQ0x57_interrupt, IRQ0x58_interrupt, IRQ0x59_interrupt, IRQ0x5a_interrupt, IRQ0x5b_interrupt, IRQ0x5c_interrupt, IRQ0x5d_interrupt, IRQ0x5e_interrupt, IRQ0x5f_interrupt, IRQ0x60_interrupt, IRQ0x61_interrupt, IRQ0x62_interrupt, IRQ0x63_interrupt, IRQ0x64_interrupt, IRQ0x65_interrupt, IRQ0x66_interrupt, IRQ0x67_interrupt, IRQ0x68_interrupt, IRQ0x69_interrupt, IRQ0x6a_interrupt, IRQ0x6b_interrupt, IRQ0x6c_interrupt, IRQ0x6d_interrupt, IRQ0x6e_interrupt, IRQ0x6f_interrupt, IRQ0x70_interrupt, #endif }; void block_irq(int irq, int cpu) { int intr_mask; unsigned long flags; spin_lock_irqsave(&irq_lock, flags); /* Remember, 1 let thru, 0 block. */ if (irq - FIRST_IRQ < 32) { intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 0); intr_mask &= ~(1 << (irq - FIRST_IRQ)); REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 0, intr_mask); } else { intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 1); intr_mask &= ~(1 << (irq - FIRST_IRQ - 32)); REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 1, intr_mask); } spin_unlock_irqrestore(&irq_lock, flags); } void unblock_irq(int irq, int cpu) { int intr_mask; unsigned long flags; spin_lock_irqsave(&irq_lock, flags); /* Remember, 1 let thru, 0 block. */ if (irq - FIRST_IRQ < 32) { intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 0); intr_mask |= (1 << (irq - FIRST_IRQ)); REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 0, intr_mask); } else { intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 1); intr_mask |= (1 << (irq - FIRST_IRQ - 32)); REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 1, intr_mask); } spin_unlock_irqrestore(&irq_lock, flags); } /* Find out which CPU the irq should be allocated to. */ static int irq_cpu(int irq) { int cpu; unsigned long flags; spin_lock_irqsave(&irq_lock, flags); cpu = irq_allocations[irq - FIRST_IRQ].cpu; /* Fixed interrupts stay on the local CPU. */ if (cpu == CPU_FIXED) { spin_unlock_irqrestore(&irq_lock, flags); return smp_processor_id(); } /* Let the interrupt stay if possible */ if (cpumask_test_cpu(cpu, &irq_allocations[irq - FIRST_IRQ].mask)) goto out; /* IRQ must be moved to another CPU. */ cpu = cpumask_first(&irq_allocations[irq - FIRST_IRQ].mask); irq_allocations[irq - FIRST_IRQ].cpu = cpu; out: spin_unlock_irqrestore(&irq_lock, flags); return cpu; } void crisv32_mask_irq(int irq) { int cpu; for (cpu = 0; cpu < NR_CPUS; cpu++) block_irq(irq, cpu); } void crisv32_unmask_irq(int irq) { unblock_irq(irq, irq_cpu(irq)); } static void enable_crisv32_irq(struct irq_data *data) { crisv32_unmask_irq(data->irq); } static void disable_crisv32_irq(struct irq_data *data) { crisv32_mask_irq(data->irq); } static int set_affinity_crisv32_irq(struct irq_data *data, const struct cpumask *dest, bool force) { unsigned long flags; spin_lock_irqsave(&irq_lock, flags); irq_allocations[data->irq - FIRST_IRQ].mask = *dest; spin_unlock_irqrestore(&irq_lock, flags); return 0; } static struct irq_chip crisv32_irq_type = { .name = "CRISv32", .irq_shutdown = disable_crisv32_irq, .irq_enable = enable_crisv32_irq, .irq_disable = disable_crisv32_irq, .irq_set_affinity = set_affinity_crisv32_irq, }; void set_exception_vector(int n, irqvectptr addr) { etrax_irv->v[n] = (irqvectptr) addr; } extern void do_IRQ(int irq, struct pt_regs * regs); void crisv32_do_IRQ(int irq, int block, struct pt_regs* regs) { /* Interrupts that may not be moved to another CPU may * skip blocking. This is currently only valid for the * timer IRQ and the IPI and is used for the timer * interrupt to avoid watchdog starvation. */ if (!block) { do_IRQ(irq, regs); return; } block_irq(irq, smp_processor_id()); do_IRQ(irq, regs); unblock_irq(irq, irq_cpu(irq)); } /* If multiple interrupts occur simultaneously we get a multiple * interrupt from the CPU and software has to sort out which * interrupts that happened. There are two special cases here: * * 1. Timer interrupts may never be blocked because of the * watchdog (refer to comment in include/asr/arch/irq.h) * 2. GDB serial port IRQs are unhandled here and will be handled * as a single IRQ when it strikes again because the GDB * stubb wants to save the registers in its own fashion. */ void crisv32_do_multiple(struct pt_regs* regs) { int cpu; int mask; int masked[NBR_REGS]; int bit; int i; cpu = smp_processor_id(); /* An extra irq_enter here to prevent softIRQs to run after * each do_IRQ. This will decrease the interrupt latency. */ irq_enter(); for (i = 0; i < NBR_REGS; i++) { /* Get which IRQs that happened. */ masked[i] = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], r_masked_vect, i); /* Calculate new IRQ mask with these IRQs disabled. */ mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i); mask &= ~masked[i]; /* Timer IRQ is never masked */ #ifdef TIMER_VECT1 if ((i == 1) && (masked[0] & TIMER_MASK)) mask |= TIMER_MASK; #else if ((i == 0) && (masked[0] & TIMER_MASK)) mask |= TIMER_MASK; #endif /* Block all the IRQs */ REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask); /* Check for timer IRQ and handle it special. */ #ifdef TIMER_VECT1 if ((i == 1) && (masked[i] & TIMER_MASK)) { masked[i] &= ~TIMER_MASK; do_IRQ(TIMER0_INTR_VECT, regs); } #else if ((i == 0) && (masked[i] & TIMER_MASK)) { masked[i] &= ~TIMER_MASK; do_IRQ(TIMER0_INTR_VECT, regs); } #endif } #ifdef IGNORE_MASK /* Remove IRQs that can't be handled as multiple. */ masked[0] &= ~IGNORE_MASK; #endif /* Handle the rest of the IRQs. */ for (i = 0; i < NBR_REGS; i++) { for (bit = 0; bit < 32; bit++) { if (masked[i] & (1 << bit)) do_IRQ(bit + FIRST_IRQ + i*32, regs); } } /* Unblock all the IRQs. */ for (i = 0; i < NBR_REGS; i++) { mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i); mask |= masked[i]; REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask); } /* This irq_exit() will trigger the soft IRQs. */ irq_exit(); } /* * This is called by start_kernel. It fixes the IRQ masks and setup the * interrupt vector table to point to bad_interrupt pointers. */ void __init init_IRQ(void) { int i; int j; reg_intr_vect_rw_mask vect_mask = {0}; /* Clear all interrupts masks. */ for (i = 0; i < NBR_REGS; i++) REG_WR_VECT(intr_vect, regi_irq, rw_mask, i, vect_mask); for (i = 0; i < 256; i++) etrax_irv->v[i] = weird_irq; /* Point all IRQ's to bad handlers. */ for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) { irq_set_chip_and_handler(j, &crisv32_irq_type, handle_simple_irq); set_exception_vector(i, interrupt[j]); } /* Mark Timer and IPI IRQs as CPU local */ irq_allocations[TIMER0_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED; irq_set_status_flags(TIMER0_INTR_VECT, IRQ_PER_CPU); irq_allocations[IPI_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED; irq_set_status_flags(IPI_INTR_VECT, IRQ_PER_CPU); set_exception_vector(0x00, nmi_interrupt); set_exception_vector(0x30, multiple_interrupt); /* Set up handler for various MMU bus faults. */ set_exception_vector(0x04, i_mmu_refill); set_exception_vector(0x05, i_mmu_invalid); set_exception_vector(0x06, i_mmu_access); set_exception_vector(0x07, i_mmu_execute); set_exception_vector(0x08, d_mmu_refill); set_exception_vector(0x09, d_mmu_invalid); set_exception_vector(0x0a, d_mmu_access); set_exception_vector(0x0b, d_mmu_write); #ifdef CONFIG_BUG /* Break 14 handler, used to implement cheap BUG(). */ set_exception_vector(0x1e, breakh_BUG); #endif /* The system-call trap is reached by "break 13". */ set_exception_vector(0x1d, system_call); /* Exception handlers for debugging, both user-mode and kernel-mode. */ /* Break 8. */ set_exception_vector(0x18, gdb_handle_exception); /* Hardware single step. */ set_exception_vector(0x3, gdb_handle_exception); /* Hardware breakpoint. */ set_exception_vector(0xc, gdb_handle_exception); #ifdef CONFIG_ETRAX_KGDB kgdb_init(); /* Everything is set up; now trap the kernel. */ breakpoint(); #endif }
gpl-2.0
koquantam/android_kernel_oc_vivalto3gvn
arch/arm/mach-omap2/cm33xx.c
2265
10440
/* * AM33XX CM functions * * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/ * Vaibhav Hiremath <hvaibhav@ti.com> * * Reference taken from from OMAP4 cminst44xx.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "clockdomain.h" #include "cm.h" #include "cm33xx.h" #include "cm-regbits-34xx.h" #include "cm-regbits-33xx.h" #include "prm33xx.h" /* * CLKCTRL_IDLEST_*: possible values for the CM_*_CLKCTRL.IDLEST bitfield: * * 0x0 func: Module is fully functional, including OCP * 0x1 trans: Module is performing transition: wakeup, or sleep, or sleep * abortion * 0x2 idle: Module is in Idle mode (only OCP part). It is functional if * using separate functional clock * 0x3 disabled: Module is disabled and cannot be accessed * */ #define CLKCTRL_IDLEST_FUNCTIONAL 0x0 #define CLKCTRL_IDLEST_INTRANSITION 0x1 #define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2 #define CLKCTRL_IDLEST_DISABLED 0x3 /* Private functions */ /* Read a register in a CM instance */ static inline u32 am33xx_cm_read_reg(s16 inst, u16 idx) { return __raw_readl(cm_base + inst + idx); } /* Write into a register in a CM */ static inline void am33xx_cm_write_reg(u32 val, s16 inst, u16 idx) { __raw_writel(val, cm_base + inst + idx); } /* Read-modify-write a register in CM */ static inline u32 am33xx_cm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx) { u32 v; v = am33xx_cm_read_reg(inst, idx); v &= ~mask; v |= bits; am33xx_cm_write_reg(v, inst, idx); return v; } static inline u32 am33xx_cm_set_reg_bits(u32 bits, s16 inst, s16 idx) { return am33xx_cm_rmw_reg_bits(bits, bits, inst, idx); } static inline u32 am33xx_cm_clear_reg_bits(u32 bits, s16 inst, s16 idx) { return am33xx_cm_rmw_reg_bits(bits, 0x0, inst, idx); } static inline u32 am33xx_cm_read_reg_bits(u16 inst, s16 idx, u32 mask) { u32 v; v = am33xx_cm_read_reg(inst, idx); v &= mask; v >>= __ffs(mask); return v; } /** * _clkctrl_idlest - read a CM_*_CLKCTRL register; mask & shift IDLEST bitfield * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Return the IDLEST bitfield of a CM_*_CLKCTRL register, shifted down to * bit 0. */ static u32 _clkctrl_idlest(u16 inst, s16 cdoffs, u16 clkctrl_offs) { u32 v = am33xx_cm_read_reg(inst, clkctrl_offs); v &= AM33XX_IDLEST_MASK; v >>= AM33XX_IDLEST_SHIFT; return v; } /** * _is_module_ready - can module registers be accessed without causing an abort? * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Returns true if the module's CM_*_CLKCTRL.IDLEST bitfield is either * *FUNCTIONAL or *INTERFACE_IDLE; false otherwise. */ static bool _is_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs) { u32 v; v = _clkctrl_idlest(inst, cdoffs, clkctrl_offs); return (v == CLKCTRL_IDLEST_FUNCTIONAL || v == CLKCTRL_IDLEST_INTERFACE_IDLE) ? true : false; } /** * _clktrctrl_write - write @c to a CM_CLKSTCTRL.CLKTRCTRL register bitfield * @c: CLKTRCTRL register bitfield (LSB = bit 0, i.e., unshifted) * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * @c must be the unshifted value for CLKTRCTRL - i.e., this function * will handle the shift itself. */ static void _clktrctrl_write(u8 c, s16 inst, u16 cdoffs) { u32 v; v = am33xx_cm_read_reg(inst, cdoffs); v &= ~AM33XX_CLKTRCTRL_MASK; v |= c << AM33XX_CLKTRCTRL_SHIFT; am33xx_cm_write_reg(v, inst, cdoffs); } /* Public functions */ /** * am33xx_cm_is_clkdm_in_hwsup - is a clockdomain in hwsup idle mode? * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Returns true if the clockdomain referred to by (@inst, @cdoffs) * is in hardware-supervised idle mode, or 0 otherwise. */ bool am33xx_cm_is_clkdm_in_hwsup(s16 inst, u16 cdoffs) { u32 v; v = am33xx_cm_read_reg(inst, cdoffs); v &= AM33XX_CLKTRCTRL_MASK; v >>= AM33XX_CLKTRCTRL_SHIFT; return (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? true : false; } /** * am33xx_cm_clkdm_enable_hwsup - put a clockdomain in hwsup-idle mode * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Put a clockdomain referred to by (@inst, @cdoffs) into * hardware-supervised idle mode. No return value. */ void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, inst, cdoffs); } /** * am33xx_cm_clkdm_disable_hwsup - put a clockdomain in swsup-idle mode * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Put a clockdomain referred to by (@inst, @cdoffs) into * software-supervised idle mode, i.e., controlled manually by the * Linux OMAP clockdomain code. No return value. */ void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, inst, cdoffs); } /** * am33xx_cm_clkdm_force_sleep - try to put a clockdomain into idle * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Put a clockdomain referred to by (@inst, @cdoffs) into idle * No return value. */ void am33xx_cm_clkdm_force_sleep(s16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, inst, cdoffs); } /** * am33xx_cm_clkdm_force_wakeup - try to take a clockdomain out of idle * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Take a clockdomain referred to by (@inst, @cdoffs) out of idle, * waking it up. No return value. */ void am33xx_cm_clkdm_force_wakeup(s16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, inst, cdoffs); } /* * */ /** * am33xx_cm_wait_module_ready - wait for a module to be in 'func' state * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Wait for the module IDLEST to be functional. If the idle state is in any * the non functional state (trans, idle or disabled), module and thus the * sysconfig cannot be accessed and will probably lead to an "imprecise * external abort" */ int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs) { int i = 0; omap_test_timeout(_is_module_ready(inst, cdoffs, clkctrl_offs), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; } /** * am33xx_cm_wait_module_idle - wait for a module to be in 'disabled' * state * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Wait for the module IDLEST to be disabled. Some PRCM transition, * like reset assertion or parent clock de-activation must wait the * module to be fully disabled. */ int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs, u16 clkctrl_offs) { int i = 0; if (!clkctrl_offs) return 0; omap_test_timeout((_clkctrl_idlest(inst, cdoffs, clkctrl_offs) == CLKCTRL_IDLEST_DISABLED), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; } /** * am33xx_cm_module_enable - Enable the modulemode inside CLKCTRL * @mode: Module mode (SW or HW) * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * No return value. */ void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs, u16 clkctrl_offs) { u32 v; v = am33xx_cm_read_reg(inst, clkctrl_offs); v &= ~AM33XX_MODULEMODE_MASK; v |= mode << AM33XX_MODULEMODE_SHIFT; am33xx_cm_write_reg(v, inst, clkctrl_offs); } /** * am33xx_cm_module_disable - Disable the module inside CLKCTRL * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * No return value. */ void am33xx_cm_module_disable(u16 inst, s16 cdoffs, u16 clkctrl_offs) { u32 v; v = am33xx_cm_read_reg(inst, clkctrl_offs); v &= ~AM33XX_MODULEMODE_MASK; am33xx_cm_write_reg(v, inst, clkctrl_offs); } /* * Clockdomain low-level functions */ static int am33xx_clkdm_sleep(struct clockdomain *clkdm) { am33xx_cm_clkdm_force_sleep(clkdm->cm_inst, clkdm->clkdm_offs); return 0; } static int am33xx_clkdm_wakeup(struct clockdomain *clkdm) { am33xx_cm_clkdm_force_wakeup(clkdm->cm_inst, clkdm->clkdm_offs); return 0; } static void am33xx_clkdm_allow_idle(struct clockdomain *clkdm) { am33xx_cm_clkdm_enable_hwsup(clkdm->cm_inst, clkdm->clkdm_offs); } static void am33xx_clkdm_deny_idle(struct clockdomain *clkdm) { am33xx_cm_clkdm_disable_hwsup(clkdm->cm_inst, clkdm->clkdm_offs); } static int am33xx_clkdm_clk_enable(struct clockdomain *clkdm) { if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) return am33xx_clkdm_wakeup(clkdm); return 0; } static int am33xx_clkdm_clk_disable(struct clockdomain *clkdm) { bool hwsup = false; hwsup = am33xx_cm_is_clkdm_in_hwsup(clkdm->cm_inst, clkdm->clkdm_offs); if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) am33xx_clkdm_sleep(clkdm); return 0; } struct clkdm_ops am33xx_clkdm_operations = { .clkdm_sleep = am33xx_clkdm_sleep, .clkdm_wakeup = am33xx_clkdm_wakeup, .clkdm_allow_idle = am33xx_clkdm_allow_idle, .clkdm_deny_idle = am33xx_clkdm_deny_idle, .clkdm_clk_enable = am33xx_clkdm_clk_enable, .clkdm_clk_disable = am33xx_clkdm_clk_disable, };
gpl-2.0
HtcLegacy/android_kernel_htc_golfu
drivers/media/video/cx88/cx88-alsa.c
2521
25739
/* * * Support for audio capture * PCI function #1 of the cx2388x. * * (c) 2007 Trent Piepho <xyzzy@speakeasy.org> * (c) 2005,2006 Ricardo Cerqueira <v4l@cerqueira.org> * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org> * Based on a dummy cx88 module by Gerd Knorr <kraxel@bytesex.org> * Based on dummy.c by Jaroslav Kysela <perex@perex.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/slab.h> #include <asm/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/control.h> #include <sound/initval.h> #include <sound/tlv.h> #include <media/wm8775.h> #include "cx88.h" #include "cx88-reg.h" #define dprintk(level,fmt, arg...) if (debug >= level) \ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg) #define dprintk_core(level,fmt, arg...) if (debug >= level) \ printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg) /**************************************************************************** Data type declarations - Can be moded to a header file later ****************************************************************************/ struct cx88_audio_buffer { unsigned int bpl; struct btcx_riscmem risc; struct videobuf_dmabuf dma; }; struct cx88_audio_dev { struct cx88_core *core; struct cx88_dmaqueue q; /* pci i/o */ struct pci_dev *pci; /* audio controls */ int irq; struct snd_card *card; spinlock_t reg_lock; atomic_t count; unsigned int dma_size; unsigned int period_size; unsigned int num_periods; struct videobuf_dmabuf *dma_risc; struct cx88_audio_buffer *buf; struct snd_pcm_substream *substream; }; typedef struct cx88_audio_dev snd_cx88_card_t; /**************************************************************************** Module global static vars ****************************************************************************/ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1}; module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable cx88x soundcard. default enabled."); module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s)."); /**************************************************************************** Module macros ****************************************************************************/ MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards"); MODULE_AUTHOR("Ricardo Cerqueira"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Conexant,23881}," "{{Conexant,23882}," "{{Conexant,23883}"); static unsigned int debug; module_param(debug,int,0644); MODULE_PARM_DESC(debug,"enable debug messages"); /**************************************************************************** Module specific funtions ****************************************************************************/ /* * BOARD Specific: Sets audio DMA */ static int _cx88_start_audio_dma(snd_cx88_card_t *chip) { struct cx88_audio_buffer *buf = chip->buf; struct cx88_core *core=chip->core; const struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25]; /* Make sure RISC/FIFO are off before changing FIFO/RISC settings */ cx_clear(MO_AUD_DMACNTRL, 0x11); /* setup fifo + format - out channel */ cx88_sram_channel_setup(chip->core, audio_ch, buf->bpl, buf->risc.dma); /* sets bpl size */ cx_write(MO_AUDD_LNGTH, buf->bpl); /* reset counter */ cx_write(MO_AUDD_GPCNTRL, GP_COUNT_CONTROL_RESET); atomic_set(&chip->count, 0); dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d " "byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start + 8)>>1, chip->num_periods, buf->bpl * chip->num_periods); /* Enables corresponding bits at AUD_INT_STAT */ cx_write(MO_AUD_INTMSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC | AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1); /* Clean any pending interrupt bits already set */ cx_write(MO_AUD_INTSTAT, ~0); /* enable audio irqs */ cx_set(MO_PCI_INTMSK, chip->core->pci_irqmask | PCI_INT_AUDINT); /* start dma */ cx_set(MO_DEV_CNTRL2, (1<<5)); /* Enables Risc Processor */ cx_set(MO_AUD_DMACNTRL, 0x11); /* audio downstream FIFO and RISC enable */ if (debug) cx88_sram_channel_dump(chip->core, audio_ch); return 0; } /* * BOARD Specific: Resets audio DMA */ static int _cx88_stop_audio_dma(snd_cx88_card_t *chip) { struct cx88_core *core=chip->core; dprintk(1, "Stopping audio DMA\n"); /* stop dma */ cx_clear(MO_AUD_DMACNTRL, 0x11); /* disable irqs */ cx_clear(MO_PCI_INTMSK, PCI_INT_AUDINT); cx_clear(MO_AUD_INTMSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC | AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1); if (debug) cx88_sram_channel_dump(chip->core, &cx88_sram_channels[SRAM_CH25]); return 0; } #define MAX_IRQ_LOOP 50 /* * BOARD Specific: IRQ dma bits */ static const char *cx88_aud_irqs[32] = { "dn_risci1", "up_risci1", "rds_dn_risc1", /* 0-2 */ NULL, /* reserved */ "dn_risci2", "up_risci2", "rds_dn_risc2", /* 4-6 */ NULL, /* reserved */ "dnf_of", "upf_uf", "rds_dnf_uf", /* 8-10 */ NULL, /* reserved */ "dn_sync", "up_sync", "rds_dn_sync", /* 12-14 */ NULL, /* reserved */ "opc_err", "par_err", "rip_err", /* 16-18 */ "pci_abort", "ber_irq", "mchg_irq" /* 19-21 */ }; /* * BOARD Specific: Threats IRQ audio specific calls */ static void cx8801_aud_irq(snd_cx88_card_t *chip) { struct cx88_core *core = chip->core; u32 status, mask; status = cx_read(MO_AUD_INTSTAT); mask = cx_read(MO_AUD_INTMSK); if (0 == (status & mask)) return; cx_write(MO_AUD_INTSTAT, status); if (debug > 1 || (status & mask & ~0xff)) cx88_print_irqbits(core->name, "irq aud", cx88_aud_irqs, ARRAY_SIZE(cx88_aud_irqs), status, mask); /* risc op code error */ if (status & AUD_INT_OPC_ERR) { printk(KERN_WARNING "%s/1: Audio risc op code error\n",core->name); cx_clear(MO_AUD_DMACNTRL, 0x11); cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH25]); } if (status & AUD_INT_DN_SYNC) { dprintk(1, "Downstream sync error\n"); cx_write(MO_AUDD_GPCNTRL, GP_COUNT_CONTROL_RESET); return; } /* risc1 downstream */ if (status & AUD_INT_DN_RISCI1) { atomic_set(&chip->count, cx_read(MO_AUDD_GPCNT)); snd_pcm_period_elapsed(chip->substream); } /* FIXME: Any other status should deserve a special handling? */ } /* * BOARD Specific: Handles IRQ calls */ static irqreturn_t cx8801_irq(int irq, void *dev_id) { snd_cx88_card_t *chip = dev_id; struct cx88_core *core = chip->core; u32 status; int loop, handled = 0; for (loop = 0; loop < MAX_IRQ_LOOP; loop++) { status = cx_read(MO_PCI_INTSTAT) & (core->pci_irqmask | PCI_INT_AUDINT); if (0 == status) goto out; dprintk(3, "cx8801_irq loop %d/%d, status %x\n", loop, MAX_IRQ_LOOP, status); handled = 1; cx_write(MO_PCI_INTSTAT, status); if (status & core->pci_irqmask) cx88_core_irq(core, status); if (status & PCI_INT_AUDINT) cx8801_aud_irq(chip); } if (MAX_IRQ_LOOP == loop) { printk(KERN_ERR "%s/1: IRQ loop detected, disabling interrupts\n", core->name); cx_clear(MO_PCI_INTMSK, PCI_INT_AUDINT); } out: return IRQ_RETVAL(handled); } static int dsp_buffer_free(snd_cx88_card_t *chip) { BUG_ON(!chip->dma_size); dprintk(2,"Freeing buffer\n"); videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc); videobuf_dma_free(chip->dma_risc); btcx_riscmem_free(chip->pci,&chip->buf->risc); kfree(chip->buf); chip->dma_risc = NULL; chip->dma_size = 0; return 0; } /**************************************************************************** ALSA PCM Interface ****************************************************************************/ /* * Digital hardware definition */ #define DEFAULT_FIFO_SIZE 4096 static const struct snd_pcm_hardware snd_cx88_digital_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, /* Analog audio output will be full of clicks and pops if there are not exactly four lines in the SRAM FIFO buffer. */ .period_bytes_min = DEFAULT_FIFO_SIZE/4, .period_bytes_max = DEFAULT_FIFO_SIZE/4, .periods_min = 1, .periods_max = 1024, .buffer_bytes_max = (1024*1024), }; /* * audio pcm capture open callback */ static int snd_cx88_pcm_open(struct snd_pcm_substream *substream) { snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if (!chip) { printk(KERN_ERR "BUG: cx88 can't find device struct." " Can't proceed with open\n"); return -ENODEV; } err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) goto _error; chip->substream = substream; runtime->hw = snd_cx88_digital_hw; if (cx88_sram_channels[SRAM_CH25].fifo_size != DEFAULT_FIFO_SIZE) { unsigned int bpl = cx88_sram_channels[SRAM_CH25].fifo_size / 4; bpl &= ~7; /* must be multiple of 8 */ runtime->hw.period_bytes_min = bpl; runtime->hw.period_bytes_max = bpl; } return 0; _error: dprintk(1,"Error opening PCM!\n"); return err; } /* * audio close callback */ static int snd_cx88_close(struct snd_pcm_substream *substream) { return 0; } /* * hw_params callback */ static int snd_cx88_hw_params(struct snd_pcm_substream * substream, struct snd_pcm_hw_params * hw_params) { snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); struct videobuf_dmabuf *dma; struct cx88_audio_buffer *buf; int ret; if (substream->runtime->dma_area) { dsp_buffer_free(chip); substream->runtime->dma_area = NULL; } chip->period_size = params_period_bytes(hw_params); chip->num_periods = params_periods(hw_params); chip->dma_size = chip->period_size * params_periods(hw_params); BUG_ON(!chip->dma_size); BUG_ON(chip->num_periods & (chip->num_periods-1)); buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (NULL == buf) return -ENOMEM; buf->bpl = chip->period_size; dma = &buf->dma; videobuf_dma_init(dma); ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE, (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT)); if (ret < 0) goto error; ret = videobuf_dma_map(&chip->pci->dev, dma); if (ret < 0) goto error; ret = cx88_risc_databuffer(chip->pci, &buf->risc, dma->sglist, chip->period_size, chip->num_periods, 1); if (ret < 0) goto error; /* Loop back to start of program */ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma); chip->buf = buf; chip->dma_risc = dma; substream->runtime->dma_area = chip->dma_risc->vaddr; substream->runtime->dma_bytes = chip->dma_size; substream->runtime->dma_addr = 0; return 0; error: kfree(buf); return ret; } /* * hw free callback */ static int snd_cx88_hw_free(struct snd_pcm_substream * substream) { snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); if (substream->runtime->dma_area) { dsp_buffer_free(chip); substream->runtime->dma_area = NULL; } return 0; } /* * prepare callback */ static int snd_cx88_prepare(struct snd_pcm_substream *substream) { return 0; } /* * trigger callback */ static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd) { snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); int err; /* Local interrupts are already disabled by ALSA */ spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: err=_cx88_start_audio_dma(chip); break; case SNDRV_PCM_TRIGGER_STOP: err=_cx88_stop_audio_dma(chip); break; default: err=-EINVAL; break; } spin_unlock(&chip->reg_lock); return err; } /* * pointer callback */ static snd_pcm_uframes_t snd_cx88_pointer(struct snd_pcm_substream *substream) { snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; u16 count; count = atomic_read(&chip->count); // dprintk(2, "%s - count %d (+%u), period %d, frame %lu\n", __func__, // count, new, count & (runtime->periods-1), // runtime->period_size * (count & (runtime->periods-1))); return runtime->period_size * (count & (runtime->periods-1)); } /* * page callback (needed for mmap) */ static struct page *snd_cx88_page(struct snd_pcm_substream *substream, unsigned long offset) { void *pageptr = substream->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } /* * operators */ static struct snd_pcm_ops snd_cx88_pcm_ops = { .open = snd_cx88_pcm_open, .close = snd_cx88_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cx88_hw_params, .hw_free = snd_cx88_hw_free, .prepare = snd_cx88_prepare, .trigger = snd_cx88_card_trigger, .pointer = snd_cx88_pointer, .page = snd_cx88_page, }; /* * create a PCM device */ static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name) { int err; struct snd_pcm *pcm; err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm); if (err < 0) return err; pcm->private_data = chip; strcpy(pcm->name, name); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cx88_pcm_ops); return 0; } /**************************************************************************** CONTROL INTERFACE ****************************************************************************/ static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0; info->value.integer.max = 0x3f; return 0; } static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core=chip->core; int vol = 0x3f - (cx_read(AUD_VOL_CTL) & 0x3f), bal = cx_read(AUD_BAL_CTL); value->value.integer.value[(bal & 0x40) ? 0 : 1] = vol; vol -= (bal & 0x3f); value->value.integer.value[(bal & 0x40) ? 1 : 0] = vol < 0 ? 0 : vol; return 0; } static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core = chip->core; struct v4l2_control client_ctl; int left = value->value.integer.value[0]; int right = value->value.integer.value[1]; int v, b; memset(&client_ctl, 0, sizeof(client_ctl)); /* Pass volume & balance onto any WM8775 */ if (left >= right) { v = left << 10; b = left ? (0x8000 * right) / left : 0x8000; } else { v = right << 10; b = right ? 0xffff - (0x8000 * left) / right : 0x8000; } client_ctl.value = v; client_ctl.id = V4L2_CID_AUDIO_VOLUME; call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); client_ctl.value = b; client_ctl.id = V4L2_CID_AUDIO_BALANCE; call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); } /* OK - TODO: test it */ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core=chip->core; int left, right, v, b; int changed = 0; u32 old; if (core->board.audio_chip == V4L2_IDENT_WM8775) snd_cx88_wm8775_volume_put(kcontrol, value); left = value->value.integer.value[0] & 0x3f; right = value->value.integer.value[1] & 0x3f; b = right - left; if (b < 0) { v = 0x3f - left; b = (-b) | 0x40; } else { v = 0x3f - right; } /* Do we really know this will always be called with IRQs on? */ spin_lock_irq(&chip->reg_lock); old = cx_read(AUD_VOL_CTL); if (v != (old & 0x3f)) { cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, (old & ~0x3f) | v); changed = 1; } if ((cx_read(AUD_BAL_CTL) & 0x7f) != b) { cx_write(AUD_BAL_CTL, b); changed = 1; } spin_unlock_irq(&chip->reg_lock); return changed; } static const DECLARE_TLV_DB_SCALE(snd_cx88_db_scale, -6300, 100, 0); static const struct snd_kcontrol_new snd_cx88_volume = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .name = "Analog-TV Volume", .info = snd_cx88_volume_info, .get = snd_cx88_volume_get, .put = snd_cx88_volume_put, .tlv.p = snd_cx88_db_scale, }; static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core = chip->core; u32 bit = kcontrol->private_value; value->value.integer.value[0] = !(cx_read(AUD_VOL_CTL) & bit); return 0; } static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core = chip->core; u32 bit = kcontrol->private_value; int ret = 0; u32 vol; spin_lock_irq(&chip->reg_lock); vol = cx_read(AUD_VOL_CTL); if (value->value.integer.value[0] != !(vol & bit)) { vol ^= bit; cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol); /* Pass mute onto any WM8775 */ if ((core->board.audio_chip == V4L2_IDENT_WM8775) && ((1<<6) == bit)) { struct v4l2_control client_ctl; memset(&client_ctl, 0, sizeof(client_ctl)); client_ctl.value = 0 != (vol & bit); client_ctl.id = V4L2_CID_AUDIO_MUTE; call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); } ret = 1; } spin_unlock_irq(&chip->reg_lock); return ret; } static const struct snd_kcontrol_new snd_cx88_dac_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Audio-Out Switch", .info = snd_ctl_boolean_mono_info, .get = snd_cx88_switch_get, .put = snd_cx88_switch_put, .private_value = (1<<8), }; static const struct snd_kcontrol_new snd_cx88_source_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog-TV Switch", .info = snd_ctl_boolean_mono_info, .get = snd_cx88_switch_get, .put = snd_cx88_switch_put, .private_value = (1<<6), }; static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core = chip->core; struct v4l2_control client_ctl; memset(&client_ctl, 0, sizeof(client_ctl)); client_ctl.id = V4L2_CID_AUDIO_LOUDNESS; call_hw(core, WM8775_GID, core, g_ctrl, &client_ctl); value->value.integer.value[0] = client_ctl.value ? 1 : 0; return 0; } static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol); struct cx88_core *core = chip->core; struct v4l2_control client_ctl; memset(&client_ctl, 0, sizeof(client_ctl)); client_ctl.value = 0 != value->value.integer.value[0]; client_ctl.id = V4L2_CID_AUDIO_LOUDNESS; call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl); return 0; } static struct snd_kcontrol_new snd_cx88_alc_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line-In ALC Switch", .info = snd_ctl_boolean_mono_info, .get = snd_cx88_alc_get, .put = snd_cx88_alc_put, }; /**************************************************************************** Basic Flow for Sound Devices ****************************************************************************/ /* * PCI ID Table - 14f1:8801 and 14f1:8811 means function 1: Audio * Only boards with eeprom and byte 1 at eeprom=1 have it */ static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = { {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0}, {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0}, {0, } }; MODULE_DEVICE_TABLE(pci, cx88_audio_pci_tbl); /* * Chip-specific destructor */ static int snd_cx88_free(snd_cx88_card_t *chip) { if (chip->irq >= 0) free_irq(chip->irq, chip); cx88_core_put(chip->core,chip->pci); pci_disable_device(chip->pci); return 0; } /* * Component Destructor */ static void snd_cx88_dev_free(struct snd_card * card) { snd_cx88_card_t *chip = card->private_data; snd_cx88_free(chip); } /* * Alsa Constructor - Component probe */ static int devno; static int __devinit snd_cx88_create(struct snd_card *card, struct pci_dev *pci, snd_cx88_card_t **rchip, struct cx88_core **core_ptr) { snd_cx88_card_t *chip; struct cx88_core *core; int err; unsigned char pci_lat; *rchip = NULL; err = pci_enable_device(pci); if (err < 0) return err; pci_set_master(pci); chip = card->private_data; core = cx88_core_get(pci); if (NULL == core) { err = -EINVAL; return err; } if (!pci_dma_supported(pci,DMA_BIT_MASK(32))) { dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); err = -EIO; cx88_core_put(core, pci); return err; } /* pci init */ chip->card = card; chip->pci = pci; chip->irq = -1; spin_lock_init(&chip->reg_lock); chip->core = core; /* get irq */ err = request_irq(chip->pci->irq, cx8801_irq, IRQF_SHARED | IRQF_DISABLED, chip->core->name, chip); if (err < 0) { dprintk(0, "%s: can't get IRQ %d\n", chip->core->name, chip->pci->irq); return err; } /* print pci info */ pci_read_config_byte(pci, PCI_LATENCY_TIMER, &pci_lat); dprintk(1,"ALSA %s/%i: found at %s, rev: %d, irq: %d, " "latency: %d, mmio: 0x%llx\n", core->name, devno, pci_name(pci), pci->revision, pci->irq, pci_lat, (unsigned long long)pci_resource_start(pci,0)); chip->irq = pci->irq; synchronize_irq(chip->irq); snd_card_set_dev(card, &pci->dev); *rchip = chip; *core_ptr = core; return 0; } static int __devinit cx88_audio_initdev(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; snd_cx88_card_t *chip; struct cx88_core *core = NULL; int err; if (devno >= SNDRV_CARDS) return (-ENODEV); if (!enable[devno]) { ++devno; return (-ENOENT); } err = snd_card_create(index[devno], id[devno], THIS_MODULE, sizeof(snd_cx88_card_t), &card); if (err < 0) return err; card->private_free = snd_cx88_dev_free; err = snd_cx88_create(card, pci, &chip, &core); if (err < 0) goto error; err = snd_cx88_pcm(chip, 0, "CX88 Digital"); if (err < 0) goto error; err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_volume, chip)); if (err < 0) goto error; err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_dac_switch, chip)); if (err < 0) goto error; err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_source_switch, chip)); if (err < 0) goto error; /* If there's a wm8775 then add a Line-In ALC switch */ if (core->board.audio_chip == V4L2_IDENT_WM8775) snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip)); strcpy (card->driver, "CX88x"); sprintf(card->shortname, "Conexant CX%x", pci->device); sprintf(card->longname, "%s at %#llx", card->shortname,(unsigned long long)pci_resource_start(pci, 0)); strcpy (card->mixername, "CX88"); dprintk (0, "%s/%i: ALSA support for cx2388x boards\n", card->driver,devno); err = snd_card_register(card); if (err < 0) goto error; pci_set_drvdata(pci,card); devno++; return 0; error: snd_card_free(card); return err; } /* * ALSA destructor */ static void __devexit cx88_audio_finidev(struct pci_dev *pci) { struct cx88_audio_dev *card = pci_get_drvdata(pci); snd_card_free((void *)card); pci_set_drvdata(pci, NULL); devno--; } /* * PCI driver definition */ static struct pci_driver cx88_audio_pci_driver = { .name = "cx88_audio", .id_table = cx88_audio_pci_tbl, .probe = cx88_audio_initdev, .remove = __devexit_p(cx88_audio_finidev), }; /**************************************************************************** LINUX MODULE INIT ****************************************************************************/ /* * module init */ static int __init cx88_audio_init(void) { printk(KERN_INFO "cx2388x alsa driver version %d.%d.%d loaded\n", (CX88_VERSION_CODE >> 16) & 0xff, (CX88_VERSION_CODE >> 8) & 0xff, CX88_VERSION_CODE & 0xff); #ifdef SNAPSHOT printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n", SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100); #endif return pci_register_driver(&cx88_audio_pci_driver); } /* * module remove */ static void __exit cx88_audio_fini(void) { pci_unregister_driver(&cx88_audio_pci_driver); } module_init(cx88_audio_init); module_exit(cx88_audio_fini); /* ----------------------------------------------------------- */ /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
DevSwift/Kernel-3.4-U8500
drivers/net/wireless/mwifiex/11n_rxreorder.c
2777
17857
/* * Marvell Wireless LAN device driver: 802.11n RX Re-ordering * * Copyright (C) 2011, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" #include "11n.h" #include "11n_rxreorder.h" /* * This function dispatches all packets in the Rx reorder table until the * start window. * * There could be holes in the buffer, which are skipped by the function. * Since the buffer is linear, the function uses rotation to simulate * circular buffer. */ static void mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *tbl, int start_win) { int pkt_to_send, i; void *rx_tmp_ptr; unsigned long flags; pkt_to_send = (start_win > tbl->start_win) ? min((start_win - tbl->start_win), tbl->win_size) : tbl->win_size; for (i = 0; i < pkt_to_send; ++i) { spin_lock_irqsave(&priv->rx_pkt_lock, flags); rx_tmp_ptr = NULL; if (tbl->rx_reorder_ptr[i]) { rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; } spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); if (rx_tmp_ptr) mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); } spin_lock_irqsave(&priv->rx_pkt_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer */ for (i = 0; i < tbl->win_size - pkt_to_send; ++i) { tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i]; tbl->rx_reorder_ptr[pkt_to_send + i] = NULL; } tbl->start_win = start_win; spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); } /* * This function dispatches all packets in the Rx reorder table until * a hole is found. * * The start window is adjusted automatically when a hole is located. * Since the buffer is linear, the function uses rotation to simulate * circular buffer. */ static void mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *tbl) { int i, j, xchg; void *rx_tmp_ptr; unsigned long flags; for (i = 0; i < tbl->win_size; ++i) { spin_lock_irqsave(&priv->rx_pkt_lock, flags); if (!tbl->rx_reorder_ptr[i]) { spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); break; } rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); } spin_lock_irqsave(&priv->rx_pkt_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer */ if (i > 0) { xchg = tbl->win_size - i; for (j = 0; j < xchg; ++j) { tbl->rx_reorder_ptr[j] = tbl->rx_reorder_ptr[i + j]; tbl->rx_reorder_ptr[i + j] = NULL; } } tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); } /* * This function deletes the Rx reorder table and frees the memory. * * The function stops the associated timer and dispatches all the * pending packets in the Rx reorder table before deletion. */ static void mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *tbl) { unsigned long flags; if (!tbl) return; mwifiex_11n_dispatch_pkt(priv, tbl, (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1)); del_timer(&tbl->timer_context.timer); spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_del(&tbl->list); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); kfree(tbl->rx_reorder_ptr); kfree(tbl); } /* * This function returns the pointer to an entry in Rx reordering * table which matches the given TA/TID pair. */ static struct mwifiex_rx_reorder_tbl * mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) { struct mwifiex_rx_reorder_tbl *tbl; unsigned long flags; spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) { if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) { spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return tbl; } } spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return NULL; } /* * This function finds the last sequence number used in the packets * buffered in Rx reordering table. */ static int mwifiex_11n_find_last_seq_num(struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr) { int i; for (i = (rx_reorder_tbl_ptr->win_size - 1); i >= 0; --i) if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) return i; return -1; } /* * This function flushes all the packets in Rx reordering table. * * The function checks if any packets are currently buffered in the * table or not. In case there are packets available, it dispatches * them and then dumps the Rx reordering table. */ static void mwifiex_flush_data(unsigned long context) { struct reorder_tmr_cnxt *ctx = (struct reorder_tmr_cnxt *) context; int start_win; start_win = mwifiex_11n_find_last_seq_num(ctx->ptr); if (start_win < 0) return; dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", start_win); mwifiex_11n_dispatch_pkt(ctx->priv, ctx->ptr, (ctx->ptr->start_win + start_win + 1) & (MAX_TID_VALUE - 1)); } /* * This function creates an entry in Rx reordering table for the * given TA/TID. * * The function also initializes the entry with sequence number, window * size as well as initializes the timer. * * If the received TA/TID pair is already present, all the packets are * dispatched and the window size is moved until the SSN. */ static void mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, int tid, int win_size, int seq_num) { int i; struct mwifiex_rx_reorder_tbl *tbl, *new_node; u16 last_seq = 0; unsigned long flags; /* * If we get a TID, ta pair which is already present dispatch all the * the packets and move the window size until the ssn */ tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); if (tbl) { mwifiex_11n_dispatch_pkt(priv, tbl, seq_num); return; } /* if !tbl then create one */ new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); if (!new_node) { dev_err(priv->adapter->dev, "%s: failed to alloc new_node\n", __func__); return; } INIT_LIST_HEAD(&new_node->list); new_node->tid = tid; memcpy(new_node->ta, ta, ETH_ALEN); new_node->start_win = seq_num; if (mwifiex_queuing_ra_based(priv)) /* TODO for adhoc */ dev_dbg(priv->adapter->dev, "info: ADHOC:last_seq=%d start_win=%d\n", last_seq, new_node->start_win); else last_seq = priv->rx_seq[tid]; if (last_seq >= new_node->start_win) new_node->start_win = last_seq + 1; new_node->win_size = win_size; new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size, GFP_KERNEL); if (!new_node->rx_reorder_ptr) { kfree((u8 *) new_node); dev_err(priv->adapter->dev, "%s: failed to alloc reorder_ptr\n", __func__); return; } new_node->timer_context.ptr = new_node; new_node->timer_context.priv = priv; init_timer(&new_node->timer_context.timer); new_node->timer_context.timer.function = mwifiex_flush_data; new_node->timer_context.timer.data = (unsigned long) &new_node->timer_context; for (i = 0; i < win_size; ++i) new_node->rx_reorder_ptr[i] = NULL; spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } /* * This function prepares command for adding a BA request. * * Preparation includes - * - Setting command ID and proper size * - Setting add BA request buffer * - Ensuring correct endian-ness */ int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf) { struct host_cmd_ds_11n_addba_req *add_ba_req = (struct host_cmd_ds_11n_addba_req *) &cmd->params.add_ba_req; cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ); cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN); memcpy(add_ba_req, data_buf, sizeof(*add_ba_req)); return 0; } /* * This function prepares command for adding a BA response. * * Preparation includes - * - Setting command ID and proper size * - Setting add BA response buffer * - Ensuring correct endian-ness */ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, struct host_cmd_ds_11n_addba_req *cmd_addba_req) { struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = (struct host_cmd_ds_11n_addba_rsp *) &cmd->params.add_ba_rsp; u8 tid; int win_size; uint16_t block_ack_param_set; cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP); cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN); memcpy(add_ba_rsp->peer_mac_addr, cmd_addba_req->peer_mac_addr, ETH_ALEN); add_ba_rsp->dialog_token = cmd_addba_req->dialog_token; add_ba_rsp->block_ack_tmo = cmd_addba_req->block_ack_tmo; add_ba_rsp->ssn = cmd_addba_req->ssn; block_ack_param_set = le16_to_cpu(cmd_addba_req->block_ack_param_set); tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK) >> BLOCKACKPARAM_TID_POS; add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT); block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK; /* We donot support AMSDU inside AMPDU, hence reset the bit */ block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK; block_ack_param_set |= (priv->add_ba_param.rx_win_size << BLOCKACKPARAM_WINSIZE_POS); add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set); win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set) & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> BLOCKACKPARAM_WINSIZE_POS; cmd_addba_req->block_ack_param_set = cpu_to_le16(block_ack_param_set); mwifiex_11n_create_rx_reorder_tbl(priv, cmd_addba_req->peer_mac_addr, tid, win_size, le16_to_cpu(cmd_addba_req->ssn)); return 0; } /* * This function prepares command for deleting a BA request. * * Preparation includes - * - Setting command ID and proper size * - Setting del BA request buffer * - Ensuring correct endian-ness */ int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf) { struct host_cmd_ds_11n_delba *del_ba = (struct host_cmd_ds_11n_delba *) &cmd->params.del_ba; cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA); cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN); memcpy(del_ba, data_buf, sizeof(*del_ba)); return 0; } /* * This function identifies if Rx reordering is needed for a received packet. * * In case reordering is required, the function will do the reordering * before sending it to kernel. * * The Rx reorder table is checked first with the received TID/TA pair. If * not found, the received packet is dispatched immediately. But if found, * the packet is reordered and all the packets in the updated Rx reordering * table is dispatched until a hole is found. * * For sequence number less than the starting window, the packet is dropped. */ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, u16 seq_num, u16 tid, u8 *ta, u8 pkt_type, void *payload) { struct mwifiex_rx_reorder_tbl *tbl; int start_win, end_win, win_size; u16 pkt_index; tbl = mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv, tid, ta); if (!tbl) { if (pkt_type != PKT_TYPE_BAR) mwifiex_process_rx_packet(priv->adapter, payload); return 0; } start_win = tbl->start_win; win_size = tbl->win_size; end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); del_timer(&tbl->timer_context.timer); mod_timer(&tbl->timer_context.timer, jiffies + (MIN_FLUSH_TIMER_MS * win_size * HZ) / 1000); /* * If seq_num is less then starting win then ignore and drop the * packet */ if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {/* Wrap */ if (seq_num >= ((start_win + TWOPOW11) & (MAX_TID_VALUE - 1)) && (seq_num < start_win)) return -1; } else if ((seq_num < start_win) || (seq_num > (start_win + TWOPOW11))) { return -1; } /* * If this packet is a BAR we adjust seq_num as * WinStart = seq_num */ if (pkt_type == PKT_TYPE_BAR) seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1); if (((end_win < start_win) && (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win))) && (seq_num > end_win)) || ((end_win > start_win) && ((seq_num > end_win) || (seq_num < start_win)))) { end_win = seq_num; if (((seq_num - win_size) + 1) >= 0) start_win = (end_win - win_size) + 1; else start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1; mwifiex_11n_dispatch_pkt(priv, tbl, start_win); } if (pkt_type != PKT_TYPE_BAR) { if (seq_num >= start_win) pkt_index = seq_num - start_win; else pkt_index = (seq_num+MAX_TID_VALUE) - start_win; if (tbl->rx_reorder_ptr[pkt_index]) return -1; tbl->rx_reorder_ptr[pkt_index] = payload; } /* * Dispatch all packets sequentially from start_win until a * hole is found and adjust the start_win appropriately */ mwifiex_11n_scan_and_dispatch(priv, tbl); return 0; } /* * This function deletes an entry for a given TID/TA pair. * * The TID/TA are taken from del BA event body. */ void mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, u8 type, int initiator) { struct mwifiex_rx_reorder_tbl *tbl; struct mwifiex_tx_ba_stream_tbl *ptx_tbl; u8 cleanup_rx_reorder_tbl; unsigned long flags; if (type == TYPE_DELBA_RECEIVE) cleanup_rx_reorder_tbl = (initiator) ? true : false; else cleanup_rx_reorder_tbl = (initiator) ? false : true; dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n", peer_mac, tid, initiator); if (cleanup_rx_reorder_tbl) { tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, peer_mac); if (!tbl) { dev_dbg(priv->adapter->dev, "event: TID, TA not found in table\n"); return; } mwifiex_del_rx_reorder_entry(priv, tbl); } else { ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); if (!ptx_tbl) { dev_dbg(priv->adapter->dev, "event: TID, RA not found in table\n"); return; } spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags); mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl); spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags); } } /* * This function handles the command response of an add BA response. * * Handling includes changing the header fields into CPU format and * creating the stream, provided the add BA is accepted. */ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = (struct host_cmd_ds_11n_addba_rsp *) &resp->params.add_ba_rsp; int tid, win_size; struct mwifiex_rx_reorder_tbl *tbl; uint16_t block_ack_param_set; block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK) >> BLOCKACKPARAM_TID_POS; /* * Check if we had rejected the ADDBA, if yes then do not create * the stream */ if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) { win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> BLOCKACKPARAM_WINSIZE_POS; dev_dbg(priv->adapter->dev, "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size); } else { dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n", add_ba_rsp->peer_mac_addr, tid); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, add_ba_rsp->peer_mac_addr); if (tbl) mwifiex_del_rx_reorder_entry(priv, tbl); } return 0; } /* * This function handles BA stream timeout event by preparing and sending * a command to the firmware. */ void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv, struct host_cmd_ds_11n_batimeout *event) { struct host_cmd_ds_11n_delba delba; memset(&delba, 0, sizeof(struct host_cmd_ds_11n_delba)); memcpy(delba.peer_mac_addr, event->peer_mac_addr, ETH_ALEN); delba.del_ba_param_set |= cpu_to_le16((u16) event->tid << DELBA_TID_POS); delba.del_ba_param_set |= cpu_to_le16( (u16) event->origninator << DELBA_INITIATOR_POS); delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT); mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba); } /* * This function cleans up the Rx reorder table by deleting all the entries * and re-initializing. */ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv) { struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node; unsigned long flags; spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_for_each_entry_safe(del_tbl_ptr, tmp_node, &priv->rx_reorder_tbl_ptr, list) { spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr); spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); } spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); memset(priv->rx_seq, 0, sizeof(priv->rx_seq)); }
gpl-2.0
jeffegg/beaglebonepsp
drivers/s390/char/tape_3590.c
2777
47905
/* * drivers/s390/char/tape_3590.c * tape device discipline for 3590 tapes. * * Copyright IBM Corp. 2001, 2009 * Author(s): Stefan Bader <shbader@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #define KMSG_COMPONENT "tape_3590" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/bio.h> #include <asm/ebcdic.h> #define TAPE_DBF_AREA tape_3590_dbf #define BUFSIZE 512 /* size of buffers for dynamic generated messages */ #include "tape.h" #include "tape_std.h" #include "tape_3590.h" static struct workqueue_struct *tape_3590_wq; /* * Pointer to debug area. */ debug_info_t *TAPE_DBF_AREA = NULL; EXPORT_SYMBOL(TAPE_DBF_AREA); /******************************************************************* * Error Recovery functions: * - Read Opposite: implemented * - Read Device (buffered) log: BRA * - Read Library log: BRA * - Swap Devices: BRA * - Long Busy: implemented * - Special Intercept: BRA * - Read Alternate: implemented *******************************************************************/ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { [0x00] = "", [0x10] = "Lost Sense", [0x11] = "Assigned Elsewhere", [0x12] = "Allegiance Reset", [0x13] = "Shared Access Violation", [0x20] = "Command Reject", [0x21] = "Configuration Error", [0x22] = "Protection Exception", [0x23] = "Write Protect", [0x24] = "Write Length", [0x25] = "Read-Only Format", [0x31] = "Beginning of Partition", [0x33] = "End of Partition", [0x34] = "End of Data", [0x35] = "Block not found", [0x40] = "Device Intervention", [0x41] = "Loader Intervention", [0x42] = "Library Intervention", [0x50] = "Write Error", [0x51] = "Erase Error", [0x52] = "Formatting Error", [0x53] = "Read Error", [0x54] = "Unsupported Format", [0x55] = "No Formatting", [0x56] = "Positioning lost", [0x57] = "Read Length", [0x60] = "Unsupported Medium", [0x61] = "Medium Length Error", [0x62] = "Medium removed", [0x64] = "Load Check", [0x65] = "Unload Check", [0x70] = "Equipment Check", [0x71] = "Bus out Check", [0x72] = "Protocol Error", [0x73] = "Interface Error", [0x74] = "Overrun", [0x75] = "Halt Signal", [0x90] = "Device fenced", [0x91] = "Device Path fenced", [0xa0] = "Volume misplaced", [0xa1] = "Volume inaccessible", [0xa2] = "Volume in input", [0xa3] = "Volume ejected", [0xa4] = "All categories reserved", [0xa5] = "Duplicate Volume", [0xa6] = "Library Manager Offline", [0xa7] = "Library Output Station full", [0xa8] = "Vision System non-operational", [0xa9] = "Library Manager Equipment Check", [0xaa] = "Library Equipment Check", [0xab] = "All Library Cells full", [0xac] = "No Cleaner Volumes in Library", [0xad] = "I/O Station door open", [0xae] = "Subsystem environmental alert", }; static int crypt_supported(struct tape_device *device) { return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device)); } static int crypt_enabled(struct tape_device *device) { return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device)); } static void ext_to_int_kekl(struct tape390_kekl *in, struct tape3592_kekl *out) { int i; memset(out, 0, sizeof(*out)); if (in->type == TAPE390_KEKL_TYPE_HASH) out->flags |= 0x40; if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH) out->flags |= 0x80; strncpy(out->label, in->label, 64); for (i = strlen(in->label); i < sizeof(out->label); i++) out->label[i] = ' '; ASCEBC(out->label, sizeof(out->label)); } static void int_to_ext_kekl(struct tape3592_kekl *in, struct tape390_kekl *out) { memset(out, 0, sizeof(*out)); if(in->flags & 0x40) out->type = TAPE390_KEKL_TYPE_HASH; else out->type = TAPE390_KEKL_TYPE_LABEL; if(in->flags & 0x80) out->type_on_tape = TAPE390_KEKL_TYPE_HASH; else out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; memcpy(out->label, in->label, sizeof(in->label)); EBCASC(out->label, sizeof(in->label)); strim(out->label); } static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in, struct tape390_kekl_pair *out) { if (in->count == 0) { out->kekl[0].type = TAPE390_KEKL_TYPE_NONE; out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE; out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; } else if (in->count == 1) { int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; } else if (in->count == 2) { int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); int_to_ext_kekl(&in->kekl[1], &out->kekl[1]); } else { printk("Invalid KEKL number: %d\n", in->count); BUG(); } } static int check_ext_kekl(struct tape390_kekl *kekl) { if (kekl->type == TAPE390_KEKL_TYPE_NONE) goto invalid; if (kekl->type > TAPE390_KEKL_TYPE_HASH) goto invalid; if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE) goto invalid; if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH) goto invalid; if ((kekl->type == TAPE390_KEKL_TYPE_HASH) && (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL)) goto invalid; return 0; invalid: return -EINVAL; } static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls) { if (check_ext_kekl(&kekls->kekl[0])) goto invalid; if (check_ext_kekl(&kekls->kekl[1])) goto invalid; return 0; invalid: return -EINVAL; } /* * Query KEKLs */ static int tape_3592_kekl_query(struct tape_device *device, struct tape390_kekl_pair *ext_kekls) { struct tape_request *request; struct tape3592_kekl_query_order *order; struct tape3592_kekl_query_data *int_kekls; int rc; DBF_EVENT(6, "tape3592_kekl_query\n"); int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA); if (!int_kekls) return -ENOMEM; request = tape_alloc_request(2, sizeof(*order)); if (IS_ERR(request)) { rc = PTR_ERR(request); goto fail_malloc; } order = request->cpdata; memset(order,0,sizeof(*order)); order->code = 0xe2; order->max_count = 2; request->op = TO_KEKL_QUERY; tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls), int_kekls); rc = tape_do_io(device, request); if (rc) goto fail_request; int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls); rc = 0; fail_request: tape_free_request(request); fail_malloc: kfree(int_kekls); return rc; } /* * IOCTL: Query KEKLs */ static int tape_3592_ioctl_kekl_query(struct tape_device *device, unsigned long arg) { int rc; struct tape390_kekl_pair *ext_kekls; DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n"); if (!crypt_supported(device)) return -ENOSYS; if (!crypt_enabled(device)) return -EUNATCH; ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); if (!ext_kekls) return -ENOMEM; rc = tape_3592_kekl_query(device, ext_kekls); if (rc != 0) goto fail; if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) { rc = -EFAULT; goto fail; } rc = 0; fail: kfree(ext_kekls); return rc; } static int tape_3590_mttell(struct tape_device *device, int mt_count); /* * Set KEKLs */ static int tape_3592_kekl_set(struct tape_device *device, struct tape390_kekl_pair *ext_kekls) { struct tape_request *request; struct tape3592_kekl_set_order *order; DBF_EVENT(6, "tape3592_kekl_set\n"); if (check_ext_kekl_pair(ext_kekls)) { DBF_EVENT(6, "invalid kekls\n"); return -EINVAL; } if (tape_3590_mttell(device, 0) != 0) return -EBADSLT; request = tape_alloc_request(1, sizeof(*order)); if (IS_ERR(request)) return PTR_ERR(request); order = request->cpdata; memset(order, 0, sizeof(*order)); order->code = 0xe3; order->kekls.count = 2; ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]); ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]); request->op = TO_KEKL_SET; tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); return tape_do_io_free(device, request); } /* * IOCTL: Set KEKLs */ static int tape_3592_ioctl_kekl_set(struct tape_device *device, unsigned long arg) { int rc; struct tape390_kekl_pair *ext_kekls; DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n"); if (!crypt_supported(device)) return -ENOSYS; if (!crypt_enabled(device)) return -EUNATCH; ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); if (!ext_kekls) return -ENOMEM; if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) { rc = -EFAULT; goto out; } rc = tape_3592_kekl_set(device, ext_kekls); out: kfree(ext_kekls); return rc; } /* * Enable encryption */ static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device) { struct tape_request *request; char *data; DBF_EVENT(6, "tape_3592_enable_crypt\n"); if (!crypt_supported(device)) return ERR_PTR(-ENOSYS); request = tape_alloc_request(2, 72); if (IS_ERR(request)) return request; data = request->cpdata; memset(data,0,72); data[0] = 0x05; data[36 + 0] = 0x03; data[36 + 1] = 0x03; data[36 + 4] = 0x40; data[36 + 6] = 0x01; data[36 + 14] = 0x2f; data[36 + 18] = 0xc3; data[36 + 35] = 0x72; request->op = TO_CRYPT_ON; tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); return request; } static int tape_3592_enable_crypt(struct tape_device *device) { struct tape_request *request; request = __tape_3592_enable_crypt(device); if (IS_ERR(request)) return PTR_ERR(request); return tape_do_io_free(device, request); } static void tape_3592_enable_crypt_async(struct tape_device *device) { struct tape_request *request; request = __tape_3592_enable_crypt(device); if (!IS_ERR(request)) tape_do_io_async_free(device, request); } /* * Disable encryption */ static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device) { struct tape_request *request; char *data; DBF_EVENT(6, "tape_3592_disable_crypt\n"); if (!crypt_supported(device)) return ERR_PTR(-ENOSYS); request = tape_alloc_request(2, 72); if (IS_ERR(request)) return request; data = request->cpdata; memset(data,0,72); data[0] = 0x05; data[36 + 0] = 0x03; data[36 + 1] = 0x03; data[36 + 35] = 0x32; request->op = TO_CRYPT_OFF; tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); return request; } static int tape_3592_disable_crypt(struct tape_device *device) { struct tape_request *request; request = __tape_3592_disable_crypt(device); if (IS_ERR(request)) return PTR_ERR(request); return tape_do_io_free(device, request); } static void tape_3592_disable_crypt_async(struct tape_device *device) { struct tape_request *request; request = __tape_3592_disable_crypt(device); if (!IS_ERR(request)) tape_do_io_async_free(device, request); } /* * IOCTL: Set encryption status */ static int tape_3592_ioctl_crypt_set(struct tape_device *device, unsigned long arg) { struct tape390_crypt_info info; DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n"); if (!crypt_supported(device)) return -ENOSYS; if (copy_from_user(&info, (char __user *)arg, sizeof(info))) return -EFAULT; if (info.status & ~TAPE390_CRYPT_ON_MASK) return -EINVAL; if (info.status & TAPE390_CRYPT_ON_MASK) return tape_3592_enable_crypt(device); else return tape_3592_disable_crypt(device); } static int tape_3590_sense_medium(struct tape_device *device); /* * IOCTL: Query enryption status */ static int tape_3592_ioctl_crypt_query(struct tape_device *device, unsigned long arg) { DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n"); if (!crypt_supported(device)) return -ENOSYS; tape_3590_sense_medium(device); if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device), sizeof(TAPE_3590_CRYPT_INFO(device)))) return -EFAULT; else return 0; } /* * 3590 IOCTL Overload */ static int tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) { switch (cmd) { case TAPE390_DISPLAY: { struct display_struct disp; if (copy_from_user(&disp, (char __user *) arg, sizeof(disp))) return -EFAULT; return tape_std_display(device, &disp); } case TAPE390_KEKL_SET: return tape_3592_ioctl_kekl_set(device, arg); case TAPE390_KEKL_QUERY: return tape_3592_ioctl_kekl_query(device, arg); case TAPE390_CRYPT_SET: return tape_3592_ioctl_crypt_set(device, arg); case TAPE390_CRYPT_QUERY: return tape_3592_ioctl_crypt_query(device, arg); default: return -EINVAL; /* no additional ioctls */ } } /* * SENSE Medium: Get Sense data about medium state */ static int tape_3590_sense_medium(struct tape_device *device) { struct tape_request *request; request = tape_alloc_request(1, 128); if (IS_ERR(request)) return PTR_ERR(request); request->op = TO_MSEN; tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); return tape_do_io_free(device, request); } static void tape_3590_sense_medium_async(struct tape_device *device) { struct tape_request *request; request = tape_alloc_request(1, 128); if (IS_ERR(request)) return; request->op = TO_MSEN; tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); tape_do_io_async_free(device, request); } /* * MTTELL: Tell block. Return the number of block relative to current file. */ static int tape_3590_mttell(struct tape_device *device, int mt_count) { __u64 block_id; int rc; rc = tape_std_read_block_id(device, &block_id); if (rc) return rc; return block_id >> 32; } /* * MTSEEK: seek to the specified block. */ static int tape_3590_mtseek(struct tape_device *device, int count) { struct tape_request *request; DBF_EVENT(6, "xsee id: %x\n", count); request = tape_alloc_request(3, 4); if (IS_ERR(request)) return PTR_ERR(request); request->op = TO_LBL; tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); *(__u32 *) request->cpdata = count; tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); return tape_do_io_free(device, request); } /* * Read Opposite Error Recovery Function: * Used, when Read Forward does not work */ static void tape_3590_read_opposite(struct tape_device *device, struct tape_request *request) { struct tape_3590_disc_data *data; /* * We have allocated 4 ccws in tape_std_read, so we can now * transform the request to a read backward, followed by a * forward space block. */ request->op = TO_RBA; tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); data = device->discdata; tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op, device->char_data.idal_buf); tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); DBF_EVENT(6, "xrop ccwg\n"); } /* * Read Attention Msg * This should be done after an interrupt with attention bit (0x80) * in device state. * * After a "read attention message" request there are two possible * results: * * 1. A unit check is presented, when attention sense is present (e.g. when * a medium has been unloaded). The attention sense comes then * together with the unit check. The recovery action is either "retry" * (in case there is an attention message pending) or "permanent error". * * 2. The attention msg is written to the "read subsystem data" buffer. * In this case we probably should print it to the console. */ static void tape_3590_read_attmsg_async(struct tape_device *device) { struct tape_request *request; char *buf; request = tape_alloc_request(3, 4096); if (IS_ERR(request)) return; request->op = TO_READ_ATTMSG; buf = request->cpdata; buf[0] = PREP_RD_SS_DATA; buf[6] = RD_ATTMSG; /* read att msg */ tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); tape_do_io_async_free(device, request); } /* * These functions are used to schedule follow-up actions from within an * interrupt context (like unsolicited interrupts). * Note: the work handler is called by the system work queue. The tape * commands started by the handler need to be asynchrounous, otherwise * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). */ struct work_handler_data { struct tape_device *device; enum tape_op op; struct work_struct work; }; static void tape_3590_work_handler(struct work_struct *work) { struct work_handler_data *p = container_of(work, struct work_handler_data, work); switch (p->op) { case TO_MSEN: tape_3590_sense_medium_async(p->device); break; case TO_READ_ATTMSG: tape_3590_read_attmsg_async(p->device); break; case TO_CRYPT_ON: tape_3592_enable_crypt_async(p->device); break; case TO_CRYPT_OFF: tape_3592_disable_crypt_async(p->device); break; default: DBF_EVENT(3, "T3590: work handler undefined for " "operation 0x%02x\n", p->op); } tape_put_device(p->device); kfree(p); } static int tape_3590_schedule_work(struct tape_device *device, enum tape_op op) { struct work_handler_data *p; if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) return -ENOMEM; INIT_WORK(&p->work, tape_3590_work_handler); p->device = tape_get_device(device); p->op = op; queue_work(tape_3590_wq, &p->work); return 0; } #ifdef CONFIG_S390_TAPE_BLOCK /* * Tape Block READ */ static struct tape_request * tape_3590_bread(struct tape_device *device, struct request *req) { struct tape_request *request; struct ccw1 *ccw; int count = 0, start_block; unsigned off; char *dst; struct bio_vec *bv; struct req_iterator iter; DBF_EVENT(6, "xBREDid:"); start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; DBF_EVENT(6, "start_block = %i\n", start_block); rq_for_each_segment(bv, req, iter) count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); request = tape_alloc_request(2 + count + 1, 4); if (IS_ERR(request)) return request; request->op = TO_BLOCK; *(__u32 *) request->cpdata = start_block; ccw = request->cpaddr; ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); /* * We always setup a nop after the mode set ccw. This slot is * used in tape_std_check_locate to insert a locate ccw if the * current tape position doesn't match the start block to be read. */ ccw = tape_ccw_cc(ccw, NOP, 0, NULL); rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) { ccw->flags = CCW_FLAG_CC; ccw->cmd_code = READ_FORWARD; ccw->count = TAPEBLOCK_HSEC_SIZE; set_normalized_cda(ccw, (void *) __pa(dst)); ccw++; dst += TAPEBLOCK_HSEC_SIZE; } BUG_ON(off > bv->bv_len); } ccw = tape_ccw_end(ccw, NOP, 0, NULL); DBF_EVENT(6, "xBREDccwg\n"); return request; } static void tape_3590_free_bread(struct tape_request *request) { struct ccw1 *ccw; /* Last ccw is a nop and doesn't need clear_normalized_cda */ for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++) if (ccw->cmd_code == READ_FORWARD) clear_normalized_cda(ccw); tape_free_request(request); } /* * check_locate is called just before the tape request is passed to * the common io layer for execution. It has to check the current * tape position and insert a locate ccw if it doesn't match the * start block for the request. */ static void tape_3590_check_locate(struct tape_device *device, struct tape_request *request) { __u32 *start_block; start_block = (__u32 *) request->cpdata; if (*start_block != device->blk_data.block_position) { /* Add the start offset of the file to get the real block. */ *start_block += device->bof; tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); } } #endif static void tape_3590_med_state_set(struct tape_device *device, struct tape_3590_med_sense *sense) { struct tape390_crypt_info *c_info; c_info = &TAPE_3590_CRYPT_INFO(device); DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst); switch (sense->macst) { case 0x04: case 0x05: case 0x06: tape_med_state_set(device, MS_UNLOADED); TAPE_3590_CRYPT_INFO(device).medium_status = 0; return; case 0x08: case 0x09: tape_med_state_set(device, MS_LOADED); break; default: tape_med_state_set(device, MS_UNKNOWN); return; } c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK; if (sense->flags & MSENSE_CRYPT_MASK) { DBF_EVENT(6, "Medium is encrypted (%04x)\n", sense->flags); c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK; } else { DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags); c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK; } } /* * The done handler is called at device/channel end and wakes up the sleeping * process */ static int tape_3590_done(struct tape_device *device, struct tape_request *request) { DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); switch (request->op) { case TO_BSB: case TO_BSF: case TO_DSE: case TO_FSB: case TO_FSF: case TO_LBL: case TO_RFO: case TO_RBA: case TO_REW: case TO_WRI: case TO_WTM: case TO_BLOCK: case TO_LOAD: tape_med_state_set(device, MS_LOADED); break; case TO_RUN: tape_med_state_set(device, MS_UNLOADED); tape_3590_schedule_work(device, TO_CRYPT_OFF); break; case TO_MSEN: tape_3590_med_state_set(device, request->cpdata); break; case TO_CRYPT_ON: TAPE_3590_CRYPT_INFO(device).status |= TAPE390_CRYPT_ON_MASK; *(device->modeset_byte) |= 0x03; break; case TO_CRYPT_OFF: TAPE_3590_CRYPT_INFO(device).status &= ~TAPE390_CRYPT_ON_MASK; *(device->modeset_byte) &= ~0x03; break; case TO_RBI: /* RBI seems to succeed even without medium loaded. */ case TO_NOP: /* Same to NOP. */ case TO_READ_CONFIG: case TO_READ_ATTMSG: case TO_DIS: case TO_ASSIGN: case TO_UNASSIGN: case TO_SIZE: case TO_KEKL_SET: case TO_KEKL_QUERY: case TO_RDC: break; } return TAPE_IO_SUCCESS; } /* * This function is called, when error recovery was successful */ static inline int tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) { DBF_EVENT(3, "Error Recovery successful for %s\n", tape_op_verbose[request->op]); return tape_3590_done(device, request); } /* * This function is called, when error recovery was not successful */ static inline int tape_3590_erp_failed(struct tape_device *device, struct tape_request *request, struct irb *irb, int rc) { DBF_EVENT(3, "Error Recovery failed for %s\n", tape_op_verbose[request->op]); tape_dump_sense_dbf(device, request, irb); return rc; } /* * Error Recovery do retry */ static inline int tape_3590_erp_retry(struct tape_device *device, struct tape_request *request, struct irb *irb) { DBF_EVENT(2, "Retry: %s\n", tape_op_verbose[request->op]); tape_dump_sense_dbf(device, request, irb); return TAPE_IO_RETRY; } /* * Handle unsolicited interrupts */ static int tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) { if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END) /* Probably result of halt ssch */ return TAPE_IO_PENDING; else if (irb->scsw.cmd.dstat == 0x85) /* Device Ready */ DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { tape_3590_schedule_work(device, TO_READ_ATTMSG); } else { DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); tape_dump_sense_dbf(device, NULL, irb); } /* check medium state */ tape_3590_schedule_work(device, TO_MSEN); return TAPE_IO_SUCCESS; } /* * Basic Recovery routine */ static int tape_3590_erp_basic(struct tape_device *device, struct tape_request *request, struct irb *irb, int rc) { struct tape_3590_sense *sense; sense = (struct tape_3590_sense *) irb->ecw; switch (sense->bra) { case SENSE_BRA_PER: return tape_3590_erp_failed(device, request, irb, rc); case SENSE_BRA_CONT: return tape_3590_erp_succeded(device, request); case SENSE_BRA_RE: return tape_3590_erp_retry(device, request, irb); case SENSE_BRA_DRE: return tape_3590_erp_failed(device, request, irb, rc); default: BUG(); return TAPE_IO_STOP; } } /* * RDL: Read Device (buffered) log */ static int tape_3590_erp_read_buf_log(struct tape_device *device, struct tape_request *request, struct irb *irb) { /* * We just do the basic error recovery at the moment (retry). * Perhaps in the future, we read the log and dump it somewhere... */ return tape_3590_erp_basic(device, request, irb, -EIO); } /* * SWAP: Swap Devices */ static int tape_3590_erp_swap(struct tape_device *device, struct tape_request *request, struct irb *irb) { /* * This error recovery should swap the tapes * if the original has a problem. The operation * should proceed with the new tape... this * should probably be done in user space! */ dev_warn (&device->cdev->dev, "The tape medium must be loaded into a " "different tape unit\n"); return tape_3590_erp_basic(device, request, irb, -EIO); } /* * LBY: Long Busy */ static int tape_3590_erp_long_busy(struct tape_device *device, struct tape_request *request, struct irb *irb) { DBF_EVENT(6, "Device is busy\n"); return TAPE_IO_LONG_BUSY; } /* * SPI: Special Intercept */ static int tape_3590_erp_special_interrupt(struct tape_device *device, struct tape_request *request, struct irb *irb) { return tape_3590_erp_basic(device, request, irb, -EIO); } /* * RDA: Read Alternate */ static int tape_3590_erp_read_alternate(struct tape_device *device, struct tape_request *request, struct irb *irb) { struct tape_3590_disc_data *data; /* * The issued Read Backward or Read Previous command is not * supported by the device * The recovery action should be to issue another command: * Read Revious: if Read Backward is not supported * Read Backward: if Read Previous is not supported */ data = device->discdata; if (data->read_back_op == READ_PREVIOUS) { DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n", device->cdev_id); data->read_back_op = READ_BACKWARD; } else { DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n", device->cdev_id); data->read_back_op = READ_PREVIOUS; } tape_3590_read_opposite(device, request); return tape_3590_erp_retry(device, request, irb); } /* * Error Recovery read opposite */ static int tape_3590_erp_read_opposite(struct tape_device *device, struct tape_request *request, struct irb *irb) { switch (request->op) { case TO_RFO: /* * We did read forward, but the data could not be read. * We will read backward and then skip forward again. */ tape_3590_read_opposite(device, request); return tape_3590_erp_retry(device, request, irb); case TO_RBA: /* We tried to read forward and backward, but hat no success */ return tape_3590_erp_failed(device, request, irb, -EIO); break; default: return tape_3590_erp_failed(device, request, irb, -EIO); } } /* * Print an MIM (Media Information Message) (message code f0) */ static void tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb) { struct tape_3590_sense *sense; char *exception, *service; exception = kmalloc(BUFSIZE, GFP_ATOMIC); service = kmalloc(BUFSIZE, GFP_ATOMIC); if (!exception || !service) goto out_nomem; sense = (struct tape_3590_sense *) irb->ecw; /* Exception Message */ switch (sense->fmt.f70.emc) { case 0x02: snprintf(exception, BUFSIZE, "Data degraded"); break; case 0x03: snprintf(exception, BUFSIZE, "Data degraded in partion %i", sense->fmt.f70.mp); break; case 0x04: snprintf(exception, BUFSIZE, "Medium degraded"); break; case 0x05: snprintf(exception, BUFSIZE, "Medium degraded in partition %i", sense->fmt.f70.mp); break; case 0x06: snprintf(exception, BUFSIZE, "Block 0 Error"); break; case 0x07: snprintf(exception, BUFSIZE, "Medium Exception 0x%02x", sense->fmt.f70.md); break; default: snprintf(exception, BUFSIZE, "0x%02x", sense->fmt.f70.emc); break; } /* Service Message */ switch (sense->fmt.f70.smc) { case 0x02: snprintf(service, BUFSIZE, "Reference Media maintenance " "procedure %i", sense->fmt.f70.md); break; default: snprintf(service, BUFSIZE, "0x%02x", sense->fmt.f70.smc); break; } dev_warn (&device->cdev->dev, "Tape media information: exception %s, " "service %s\n", exception, service); out_nomem: kfree(exception); kfree(service); } /* * Print an I/O Subsystem Service Information Message (message code f1) */ static void tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb) { struct tape_3590_sense *sense; char *exception, *service; exception = kmalloc(BUFSIZE, GFP_ATOMIC); service = kmalloc(BUFSIZE, GFP_ATOMIC); if (!exception || !service) goto out_nomem; sense = (struct tape_3590_sense *) irb->ecw; /* Exception Message */ switch (sense->fmt.f71.emc) { case 0x01: snprintf(exception, BUFSIZE, "Effect of failure is unknown"); break; case 0x02: snprintf(exception, BUFSIZE, "CU Exception - no performance " "impact"); break; case 0x03: snprintf(exception, BUFSIZE, "CU Exception on channel " "interface 0x%02x", sense->fmt.f71.md[0]); break; case 0x04: snprintf(exception, BUFSIZE, "CU Exception on device path " "0x%02x", sense->fmt.f71.md[0]); break; case 0x05: snprintf(exception, BUFSIZE, "CU Exception on library path " "0x%02x", sense->fmt.f71.md[0]); break; case 0x06: snprintf(exception, BUFSIZE, "CU Exception on node 0x%02x", sense->fmt.f71.md[0]); break; case 0x07: snprintf(exception, BUFSIZE, "CU Exception on partition " "0x%02x", sense->fmt.f71.md[0]); break; default: snprintf(exception, BUFSIZE, "0x%02x", sense->fmt.f71.emc); } /* Service Message */ switch (sense->fmt.f71.smc) { case 0x01: snprintf(service, BUFSIZE, "Repair impact is unknown"); break; case 0x02: snprintf(service, BUFSIZE, "Repair will not impact cu " "performance"); break; case 0x03: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable node " "0x%x on CU", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable " "nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x04: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable " "channel path 0x%x on CU", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable cannel" " paths (0x%x-0x%x) on CU", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x05: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable device" " path 0x%x on CU", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable device" " paths (0x%x-0x%x) on CU", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x06: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable " "library path 0x%x on CU", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable " "library paths (0x%x-0x%x) on CU", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x07: snprintf(service, BUFSIZE, "Repair will disable access to CU"); break; default: snprintf(service, BUFSIZE, "0x%02x", sense->fmt.f71.smc); } dev_warn (&device->cdev->dev, "I/O subsystem information: exception" " %s, service %s\n", exception, service); out_nomem: kfree(exception); kfree(service); } /* * Print an Device Subsystem Service Information Message (message code f2) */ static void tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb) { struct tape_3590_sense *sense; char *exception, *service; exception = kmalloc(BUFSIZE, GFP_ATOMIC); service = kmalloc(BUFSIZE, GFP_ATOMIC); if (!exception || !service) goto out_nomem; sense = (struct tape_3590_sense *) irb->ecw; /* Exception Message */ switch (sense->fmt.f71.emc) { case 0x01: snprintf(exception, BUFSIZE, "Effect of failure is unknown"); break; case 0x02: snprintf(exception, BUFSIZE, "DV Exception - no performance" " impact"); break; case 0x03: snprintf(exception, BUFSIZE, "DV Exception on channel " "interface 0x%02x", sense->fmt.f71.md[0]); break; case 0x04: snprintf(exception, BUFSIZE, "DV Exception on loader 0x%02x", sense->fmt.f71.md[0]); break; case 0x05: snprintf(exception, BUFSIZE, "DV Exception on message display" " 0x%02x", sense->fmt.f71.md[0]); break; case 0x06: snprintf(exception, BUFSIZE, "DV Exception in tape path"); break; case 0x07: snprintf(exception, BUFSIZE, "DV Exception in drive"); break; default: snprintf(exception, BUFSIZE, "0x%02x", sense->fmt.f71.emc); } /* Service Message */ switch (sense->fmt.f71.smc) { case 0x01: snprintf(service, BUFSIZE, "Repair impact is unknown"); break; case 0x02: snprintf(service, BUFSIZE, "Repair will not impact device " "performance"); break; case 0x03: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable " "channel path 0x%x on DV", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable " "channel path (0x%x-0x%x) on DV", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x04: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable " "interface 0x%x on DV", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable " "interfaces (0x%x-0x%x) on DV", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x05: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable loader" " 0x%x on DV", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable loader" " (0x%x-0x%x) on DV", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x07: snprintf(service, BUFSIZE, "Repair will disable access to DV"); break; case 0x08: if (sense->fmt.f71.mdf == 0) snprintf(service, BUFSIZE, "Repair will disable " "message display 0x%x on DV", sense->fmt.f71.md[1]); else snprintf(service, BUFSIZE, "Repair will disable " "message displays (0x%x-0x%x) on DV", sense->fmt.f71.md[1], sense->fmt.f71.md[2]); break; case 0x09: snprintf(service, BUFSIZE, "Clean DV"); break; default: snprintf(service, BUFSIZE, "0x%02x", sense->fmt.f71.smc); } dev_warn (&device->cdev->dev, "Device subsystem information: exception" " %s, service %s\n", exception, service); out_nomem: kfree(exception); kfree(service); } /* * Print standard ERA Message */ static void tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) { struct tape_3590_sense *sense; sense = (struct tape_3590_sense *) irb->ecw; if (sense->mc == 0) return; if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) { if (tape_3590_msg[sense->mc] != NULL) dev_warn (&device->cdev->dev, "The tape unit has " "issued sense message %s\n", tape_3590_msg[sense->mc]); else dev_warn (&device->cdev->dev, "The tape unit has " "issued an unknown sense message code 0x%x\n", sense->mc); return; } if (sense->mc == 0xf0) { /* Standard Media Information Message */ dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, " "RC=%02x-%04x-%02x\n", sense->fmt.f70.sev, sense->mc, sense->fmt.f70.emc, sense->fmt.f70.smc, sense->fmt.f70.refcode, sense->fmt.f70.mid, sense->fmt.f70.fid); tape_3590_print_mim_msg_f0(device, irb); return; } if (sense->mc == 0xf1) { /* Standard I/O Subsystem Service Information Message */ dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x," " MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", sense->fmt.f71.sev, device->cdev->id.dev_model, sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc, sense->fmt.f71.refcode1, sense->fmt.f71.refcode2, sense->fmt.f71.refcode3); tape_3590_print_io_sim_msg_f1(device, irb); return; } if (sense->mc == 0xf2) { /* Standard Device Service Information Message */ dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x" ", MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", sense->fmt.f71.sev, device->cdev->id.dev_model, sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc, sense->fmt.f71.refcode1, sense->fmt.f71.refcode2, sense->fmt.f71.refcode3); tape_3590_print_dev_sim_msg_f2(device, irb); return; } if (sense->mc == 0xf3) { /* Standard Library Service Information Message */ return; } dev_warn (&device->cdev->dev, "The tape unit has issued an unknown " "sense message code %x\n", sense->mc); } static int tape_3590_crypt_error(struct tape_device *device, struct tape_request *request, struct irb *irb) { u8 cu_rc; u16 ekm_rc2; char *sense; sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; cu_rc = sense[0]; ekm_rc2 = *((u16*) &sense[10]); if ((cu_rc == 0) && (ekm_rc2 == 0xee31)) /* key not defined on EKM */ return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED); if ((cu_rc == 1) || (cu_rc == 2)) /* No connection to EKM */ return tape_3590_erp_basic(device, request, irb, -ENOTCONN); dev_err (&device->cdev->dev, "The tape unit failed to obtain the " "encryption key from EKM\n"); return tape_3590_erp_basic(device, request, irb, -ENOKEY); } /* * 3590 error Recovery routine: * If possible, it tries to recover from the error. If this is not possible, * inform the user about the problem. */ static int tape_3590_unit_check(struct tape_device *device, struct tape_request *request, struct irb *irb) { struct tape_3590_sense *sense; #ifdef CONFIG_S390_TAPE_BLOCK if (request->op == TO_BLOCK) { /* * Recovery for block device requests. Set the block_position * to something invalid and retry. */ device->blk_data.block_position = -1; if (request->retries-- <= 0) return tape_3590_erp_failed(device, request, irb, -EIO); else return tape_3590_erp_retry(device, request, irb); } #endif sense = (struct tape_3590_sense *) irb->ecw; DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc); /* * First check all RC-QRCs where we want to do something special * - "break": basic error recovery is done * - "goto out:": just print error message if available */ switch (sense->rc_rqc) { case 0x1110: tape_3590_print_era_msg(device, irb); return tape_3590_erp_read_buf_log(device, request, irb); case 0x2011: tape_3590_print_era_msg(device, irb); return tape_3590_erp_read_alternate(device, request, irb); case 0x2230: case 0x2231: tape_3590_print_era_msg(device, irb); return tape_3590_erp_special_interrupt(device, request, irb); case 0x2240: return tape_3590_crypt_error(device, request, irb); case 0x3010: DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", device->cdev_id); return tape_3590_erp_basic(device, request, irb, -ENOSPC); case 0x3012: DBF_EVENT(2, "(%08x): Forward at End of Partition\n", device->cdev_id); return tape_3590_erp_basic(device, request, irb, -ENOSPC); case 0x3020: DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id); return tape_3590_erp_basic(device, request, irb, -ENOSPC); case 0x3122: DBF_EVENT(2, "(%08x): Rewind Unload initiated\n", device->cdev_id); return tape_3590_erp_basic(device, request, irb, -EIO); case 0x3123: DBF_EVENT(2, "(%08x): Rewind Unload complete\n", device->cdev_id); tape_med_state_set(device, MS_UNLOADED); tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, 0); case 0x4010: /* * print additional msg since default msg * "device intervention" is not very meaningfull */ tape_med_state_set(device, MS_UNLOADED); tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); case 0x4012: /* Device Long Busy */ /* XXX: Also use long busy handling here? */ DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id); tape_3590_print_era_msg(device, irb); return tape_3590_erp_basic(device, request, irb, -EBUSY); case 0x4014: DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id); return tape_3590_erp_long_busy(device, request, irb); case 0x5010: if (sense->rac == 0xd0) { /* Swap */ tape_3590_print_era_msg(device, irb); return tape_3590_erp_swap(device, request, irb); } if (sense->rac == 0x26) { /* Read Opposite */ tape_3590_print_era_msg(device, irb); return tape_3590_erp_read_opposite(device, request, irb); } return tape_3590_erp_basic(device, request, irb, -EIO); case 0x5020: case 0x5021: case 0x5022: case 0x5040: case 0x5041: case 0x5042: tape_3590_print_era_msg(device, irb); return tape_3590_erp_swap(device, request, irb); case 0x5110: case 0x5111: return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); case 0x5120: case 0x1120: tape_med_state_set(device, MS_UNLOADED); tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); case 0x6020: return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); case 0x8011: return tape_3590_erp_basic(device, request, irb, -EPERM); case 0x8013: dev_warn (&device->cdev->dev, "A different host has privileged" " access to the tape unit\n"); return tape_3590_erp_basic(device, request, irb, -EPERM); default: return tape_3590_erp_basic(device, request, irb, -EIO); } } /* * 3590 interrupt handler: */ static int tape_3590_irq(struct tape_device *device, struct tape_request *request, struct irb *irb) { if (request == NULL) return tape_3590_unsolicited_irq(device, irb); if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { /* Write at end of volume */ DBF_EVENT(2, "End of volume\n"); return tape_3590_erp_failed(device, request, irb, -ENOSPC); } if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) return tape_3590_unit_check(device, request, irb); if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) { if (request->op == TO_FSB || request->op == TO_BSB) request->rescnt++; else DBF_EVENT(5, "Unit Exception!\n"); } return tape_3590_done(device, request); } if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) { DBF_EVENT(2, "cannel end\n"); return TAPE_IO_PENDING; } if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { DBF_EVENT(2, "Unit Attention when busy..\n"); return TAPE_IO_PENDING; } DBF_EVENT(6, "xunknownirq\n"); tape_dump_sense_dbf(device, request, irb); return TAPE_IO_STOP; } static int tape_3590_read_dev_chars(struct tape_device *device, struct tape_3590_rdc_data *rdc_data) { int rc; struct tape_request *request; request = tape_alloc_request(1, sizeof(*rdc_data)); if (IS_ERR(request)) return PTR_ERR(request); request->op = TO_RDC; tape_ccw_end(request->cpaddr, CCW_CMD_RDC, sizeof(*rdc_data), request->cpdata); rc = tape_do_io(device, request); if (rc == 0) memcpy(rdc_data, request->cpdata, sizeof(*rdc_data)); tape_free_request(request); return rc; } /* * Setup device function */ static int tape_3590_setup_device(struct tape_device *device) { int rc; struct tape_3590_disc_data *data; struct tape_3590_rdc_data *rdc_data; DBF_EVENT(6, "3590 device setup\n"); data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA); if (data == NULL) return -ENOMEM; data->read_back_op = READ_PREVIOUS; device->discdata = data; rdc_data = kmalloc(sizeof(*rdc_data), GFP_KERNEL | GFP_DMA); if (!rdc_data) { rc = -ENOMEM; goto fail_kmalloc; } rc = tape_3590_read_dev_chars(device, rdc_data); if (rc) { DBF_LH(3, "Read device characteristics failed!\n"); goto fail_rdc_data; } rc = tape_std_assign(device); if (rc) goto fail_rdc_data; if (rdc_data->data[31] == 0x13) { data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK; tape_3592_disable_crypt(device); } else { DBF_EVENT(6, "Device has NO crypto support\n"); } /* Try to find out if medium is loaded */ rc = tape_3590_sense_medium(device); if (rc) { DBF_LH(3, "3590 medium sense returned %d\n", rc); goto fail_rdc_data; } return 0; fail_rdc_data: kfree(rdc_data); fail_kmalloc: kfree(data); return rc; } /* * Cleanup device function */ static void tape_3590_cleanup_device(struct tape_device *device) { flush_workqueue(tape_3590_wq); tape_std_unassign(device); kfree(device->discdata); device->discdata = NULL; } /* * List of 3590 magnetic tape commands. */ static tape_mtop_fn tape_3590_mtop[TAPE_NR_MTOPS] = { [MTRESET] = tape_std_mtreset, [MTFSF] = tape_std_mtfsf, [MTBSF] = tape_std_mtbsf, [MTFSR] = tape_std_mtfsr, [MTBSR] = tape_std_mtbsr, [MTWEOF] = tape_std_mtweof, [MTREW] = tape_std_mtrew, [MTOFFL] = tape_std_mtoffl, [MTNOP] = tape_std_mtnop, [MTRETEN] = tape_std_mtreten, [MTBSFM] = tape_std_mtbsfm, [MTFSFM] = tape_std_mtfsfm, [MTEOM] = tape_std_mteom, [MTERASE] = tape_std_mterase, [MTRAS1] = NULL, [MTRAS2] = NULL, [MTRAS3] = NULL, [MTSETBLK] = tape_std_mtsetblk, [MTSETDENSITY] = NULL, [MTSEEK] = tape_3590_mtseek, [MTTELL] = tape_3590_mttell, [MTSETDRVBUFFER] = NULL, [MTFSS] = NULL, [MTBSS] = NULL, [MTWSM] = NULL, [MTLOCK] = NULL, [MTUNLOCK] = NULL, [MTLOAD] = tape_std_mtload, [MTUNLOAD] = tape_std_mtunload, [MTCOMPRESSION] = tape_std_mtcompression, [MTSETPART] = NULL, [MTMKPART] = NULL }; /* * Tape discipline structure for 3590. */ static struct tape_discipline tape_discipline_3590 = { .owner = THIS_MODULE, .setup_device = tape_3590_setup_device, .cleanup_device = tape_3590_cleanup_device, .process_eov = tape_std_process_eov, .irq = tape_3590_irq, .read_block = tape_std_read_block, .write_block = tape_std_write_block, #ifdef CONFIG_S390_TAPE_BLOCK .bread = tape_3590_bread, .free_bread = tape_3590_free_bread, .check_locate = tape_3590_check_locate, #endif .ioctl_fn = tape_3590_ioctl, .mtop_array = tape_3590_mtop }; static struct ccw_device_id tape_3590_ids[] = { {CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590}, {CCW_DEVICE_DEVTYPE(0x3592, 0, 0x3592, 0), .driver_info = tape_3592}, { /* end of list */ } }; static int tape_3590_online(struct ccw_device *cdev) { return tape_generic_online(dev_get_drvdata(&cdev->dev), &tape_discipline_3590); } static struct ccw_driver tape_3590_driver = { .driver = { .name = "tape_3590", .owner = THIS_MODULE, }, .ids = tape_3590_ids, .probe = tape_generic_probe, .remove = tape_generic_remove, .set_offline = tape_generic_offline, .set_online = tape_3590_online, .freeze = tape_generic_pm_suspend, }; /* * Setup discipline structure. */ static int tape_3590_init(void) { int rc; TAPE_DBF_AREA = debug_register("tape_3590", 2, 2, 4 * sizeof(long)); debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); #ifdef DBF_LIKE_HELL debug_set_level(TAPE_DBF_AREA, 6); #endif DBF_EVENT(3, "3590 init\n"); tape_3590_wq = alloc_workqueue("tape_3590", 0, 0); if (!tape_3590_wq) return -ENOMEM; /* Register driver for 3590 tapes. */ rc = ccw_driver_register(&tape_3590_driver); if (rc) { destroy_workqueue(tape_3590_wq); DBF_EVENT(3, "3590 init failed\n"); } else DBF_EVENT(3, "3590 registered\n"); return rc; } static void tape_3590_exit(void) { ccw_driver_unregister(&tape_3590_driver); destroy_workqueue(tape_3590_wq); debug_unregister(TAPE_DBF_AREA); } MODULE_DEVICE_TABLE(ccw, tape_3590_ids); MODULE_AUTHOR("(C) 2001,2006 IBM Corporation"); MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver"); MODULE_LICENSE("GPL"); module_init(tape_3590_init); module_exit(tape_3590_exit);
gpl-2.0
Mustaavalkosta/htc7x30-3.0
drivers/infiniband/hw/cxgb4/device.c
2777
15138
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/debugfs.h> #include <rdma/ib_verbs.h> #include "iw_cxgb4.h" #define DRV_VERSION "0.1" MODULE_AUTHOR("Steve Wise"); MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); static LIST_HEAD(uld_ctx_list); static DEFINE_MUTEX(dev_mutex); static struct dentry *c4iw_debugfs_root; struct c4iw_debugfs_data { struct c4iw_dev *devp; char *buf; int bufsize; int pos; }; static int count_idrs(int id, void *p, void *data) { int *countp = data; *countp = *countp + 1; return 0; } static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct c4iw_debugfs_data *d = file->private_data; return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); } static int dump_qp(int id, void *p, void *data) { struct c4iw_qp *qp = p; struct c4iw_debugfs_data *qpd = data; int space; int cc; if (id != qp->wq.sq.qid) return 0; space = qpd->bufsize - qpd->pos - 1; if (space == 0) return 1; if (qp->ep) cc = snprintf(qpd->buf + qpd->pos, space, "qp sq id %u rq id %u state %u onchip %u " "ep tid %u state %u %pI4:%u->%pI4:%u\n", qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state, qp->wq.sq.flags & T4_SQ_ONCHIP, qp->ep->hwtid, (int)qp->ep->com.state, &qp->ep->com.local_addr.sin_addr.s_addr, ntohs(qp->ep->com.local_addr.sin_port), &qp->ep->com.remote_addr.sin_addr.s_addr, ntohs(qp->ep->com.remote_addr.sin_port)); else cc = snprintf(qpd->buf + qpd->pos, space, "qp sq id %u rq id %u state %u onchip %u\n", qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state, qp->wq.sq.flags & T4_SQ_ONCHIP); if (cc < space) qpd->pos += cc; return 0; } static int qp_release(struct inode *inode, struct file *file) { struct c4iw_debugfs_data *qpd = file->private_data; if (!qpd) { printk(KERN_INFO "%s null qpd?\n", __func__); return 0; } kfree(qpd->buf); kfree(qpd); return 0; } static int qp_open(struct inode *inode, struct file *file) { struct c4iw_debugfs_data *qpd; int ret = 0; int count = 1; qpd = kmalloc(sizeof *qpd, GFP_KERNEL); if (!qpd) { ret = -ENOMEM; goto out; } qpd->devp = inode->i_private; qpd->pos = 0; spin_lock_irq(&qpd->devp->lock); idr_for_each(&qpd->devp->qpidr, count_idrs, &count); spin_unlock_irq(&qpd->devp->lock); qpd->bufsize = count * 128; qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL); if (!qpd->buf) { ret = -ENOMEM; goto err1; } spin_lock_irq(&qpd->devp->lock); idr_for_each(&qpd->devp->qpidr, dump_qp, qpd); spin_unlock_irq(&qpd->devp->lock); qpd->buf[qpd->pos++] = 0; file->private_data = qpd; goto out; err1: kfree(qpd); out: return ret; } static const struct file_operations qp_debugfs_fops = { .owner = THIS_MODULE, .open = qp_open, .release = qp_release, .read = debugfs_read, .llseek = default_llseek, }; static int dump_stag(int id, void *p, void *data) { struct c4iw_debugfs_data *stagd = data; int space; int cc; space = stagd->bufsize - stagd->pos - 1; if (space == 0) return 1; cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8); if (cc < space) stagd->pos += cc; return 0; } static int stag_release(struct inode *inode, struct file *file) { struct c4iw_debugfs_data *stagd = file->private_data; if (!stagd) { printk(KERN_INFO "%s null stagd?\n", __func__); return 0; } kfree(stagd->buf); kfree(stagd); return 0; } static int stag_open(struct inode *inode, struct file *file) { struct c4iw_debugfs_data *stagd; int ret = 0; int count = 1; stagd = kmalloc(sizeof *stagd, GFP_KERNEL); if (!stagd) { ret = -ENOMEM; goto out; } stagd->devp = inode->i_private; stagd->pos = 0; spin_lock_irq(&stagd->devp->lock); idr_for_each(&stagd->devp->mmidr, count_idrs, &count); spin_unlock_irq(&stagd->devp->lock); stagd->bufsize = count * sizeof("0x12345678\n"); stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL); if (!stagd->buf) { ret = -ENOMEM; goto err1; } spin_lock_irq(&stagd->devp->lock); idr_for_each(&stagd->devp->mmidr, dump_stag, stagd); spin_unlock_irq(&stagd->devp->lock); stagd->buf[stagd->pos++] = 0; file->private_data = stagd; goto out; err1: kfree(stagd); out: return ret; } static const struct file_operations stag_debugfs_fops = { .owner = THIS_MODULE, .open = stag_open, .release = stag_release, .read = debugfs_read, .llseek = default_llseek, }; static int setup_debugfs(struct c4iw_dev *devp) { struct dentry *de; if (!devp->debugfs_root) return -1; de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root, (void *)devp, &qp_debugfs_fops); if (de && de->d_inode) de->d_inode->i_size = 4096; de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root, (void *)devp, &stag_debugfs_fops); if (de && de->d_inode) de->d_inode->i_size = 4096; return 0; } void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct list_head *pos, *nxt; struct c4iw_qid_list *entry; mutex_lock(&uctx->lock); list_for_each_safe(pos, nxt, &uctx->qpids) { entry = list_entry(pos, struct c4iw_qid_list, entry); list_del_init(&entry->entry); if (!(entry->qid & rdev->qpmask)) c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid, &rdev->resource.qid_fifo_lock); kfree(entry); } list_for_each_safe(pos, nxt, &uctx->qpids) { entry = list_entry(pos, struct c4iw_qid_list, entry); list_del_init(&entry->entry); kfree(entry); } mutex_unlock(&uctx->lock); } void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { INIT_LIST_HEAD(&uctx->qpids); INIT_LIST_HEAD(&uctx->cqids); mutex_init(&uctx->lock); } /* Caller takes care of locking if needed */ static int c4iw_rdev_open(struct c4iw_rdev *rdev) { int err; c4iw_init_dev_ucontext(rdev, &rdev->uctx); /* * qpshift is the number of bits to shift the qpid left in order * to get the correct address of the doorbell for that qp. */ rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density); rdev->qpmask = rdev->lldi.udb_density - 1; rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density); rdev->cqmask = rdev->lldi.ucq_density - 1; PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d " "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x " "qp qid start %u size %u cq qid start %u size %u\n", __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), rdev->lldi.vr->pbl.start, rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, rdev->lldi.vr->rq.size, rdev->lldi.vr->qp.start, rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.start, rdev->lldi.vr->cq.size); PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " "qpmask 0x%x cqshift %lu cqmask 0x%x\n", (unsigned)pci_resource_len(rdev->lldi.pdev, 2), (void *)pci_resource_start(rdev->lldi.pdev, 2), rdev->lldi.db_reg, rdev->lldi.gts_reg, rdev->qpshift, rdev->qpmask, rdev->cqshift, rdev->cqmask); if (c4iw_num_stags(rdev) == 0) { err = -EINVAL; goto err1; } err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); if (err) { printk(KERN_ERR MOD "error %d initializing resources\n", err); goto err1; } err = c4iw_pblpool_create(rdev); if (err) { printk(KERN_ERR MOD "error %d initializing pbl pool\n", err); goto err2; } err = c4iw_rqtpool_create(rdev); if (err) { printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); goto err3; } err = c4iw_ocqp_pool_create(rdev); if (err) { printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err); goto err4; } return 0; err4: c4iw_rqtpool_destroy(rdev); err3: c4iw_pblpool_destroy(rdev); err2: c4iw_destroy_resource(&rdev->resource); err1: return err; } static void c4iw_rdev_close(struct c4iw_rdev *rdev) { c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); c4iw_destroy_resource(&rdev->resource); } struct uld_ctx { struct list_head entry; struct cxgb4_lld_info lldi; struct c4iw_dev *dev; }; static void c4iw_remove(struct uld_ctx *ctx) { PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); c4iw_unregister_device(ctx->dev); c4iw_rdev_close(&ctx->dev->rdev); idr_destroy(&ctx->dev->cqidr); idr_destroy(&ctx->dev->qpidr); idr_destroy(&ctx->dev->mmidr); iounmap(ctx->dev->rdev.oc_mw_kva); ib_dealloc_device(&ctx->dev->ibdev); ctx->dev = NULL; } static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) { struct c4iw_dev *devp; int ret; devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); if (!devp) { printk(KERN_ERR MOD "Cannot allocate ib device\n"); return ERR_PTR(-ENOMEM); } devp->rdev.lldi = *infop; devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) + (pci_resource_len(devp->rdev.lldi.pdev, 2) - roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size)); devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, devp->rdev.lldi.vr->ocq.size); PDBG(KERN_INFO MOD "ocq memory: " "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); ret = c4iw_rdev_open(&devp->rdev); if (ret) { mutex_unlock(&dev_mutex); printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); ib_dealloc_device(&devp->ibdev); return ERR_PTR(ret); } idr_init(&devp->cqidr); idr_init(&devp->qpidr); idr_init(&devp->mmidr); spin_lock_init(&devp->lock); if (c4iw_debugfs_root) { devp->debugfs_root = debugfs_create_dir( pci_name(devp->rdev.lldi.pdev), c4iw_debugfs_root); setup_debugfs(devp); } return devp; } static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) { struct uld_ctx *ctx; static int vers_printed; int i; if (!vers_printed++) printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", DRV_VERSION); ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) { ctx = ERR_PTR(-ENOMEM); goto out; } ctx->lldi = *infop; PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", __func__, pci_name(ctx->lldi.pdev), ctx->lldi.nchan, ctx->lldi.nrxq, ctx->lldi.ntxq, ctx->lldi.nports); mutex_lock(&dev_mutex); list_add_tail(&ctx->entry, &uld_ctx_list); mutex_unlock(&dev_mutex); for (i = 0; i < ctx->lldi.nrxq; i++) PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); out: return ctx; } static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *gl) { struct uld_ctx *ctx = handle; struct c4iw_dev *dev = ctx->dev; struct sk_buff *skb; const struct cpl_act_establish *rpl; unsigned int opcode; if (gl == NULL) { /* omit RSS and rsp_ctrl at end of descriptor */ unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; skb = alloc_skb(256, GFP_ATOMIC); if (!skb) goto nomem; __skb_put(skb, len); skb_copy_to_linear_data(skb, &rsp[1], len); } else if (gl == CXGB4_MSG_AN) { const struct rsp_ctrl *rc = (void *)rsp; u32 qid = be32_to_cpu(rc->pldbuflen_qid); c4iw_ev_handler(dev, qid); return 0; } else { skb = cxgb4_pktgl_to_skb(gl, 128, 128); if (unlikely(!skb)) goto nomem; } rpl = cplhdr(skb); opcode = rpl->ot.opcode; if (c4iw_handlers[opcode]) c4iw_handlers[opcode](dev, skb); else printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__, opcode); return 0; nomem: return -1; } static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) { struct uld_ctx *ctx = handle; PDBG("%s new_state %u\n", __func__, new_state); switch (new_state) { case CXGB4_STATE_UP: printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); if (!ctx->dev) { int ret = 0; ctx->dev = c4iw_alloc(&ctx->lldi); if (!IS_ERR(ctx->dev)) ret = c4iw_register_device(ctx->dev); if (IS_ERR(ctx->dev) || ret) printk(KERN_ERR MOD "%s: RDMA registration failed: %d\n", pci_name(ctx->lldi.pdev), ret); } break; case CXGB4_STATE_DOWN: printk(KERN_INFO MOD "%s: Down\n", pci_name(ctx->lldi.pdev)); if (ctx->dev) c4iw_remove(ctx); break; case CXGB4_STATE_START_RECOVERY: printk(KERN_INFO MOD "%s: Fatal Error\n", pci_name(ctx->lldi.pdev)); if (ctx->dev) { struct ib_event event; ctx->dev->rdev.flags |= T4_FATAL_ERROR; memset(&event, 0, sizeof event); event.event = IB_EVENT_DEVICE_FATAL; event.device = &ctx->dev->ibdev; ib_dispatch_event(&event); c4iw_remove(ctx); } break; case CXGB4_STATE_DETACH: printk(KERN_INFO MOD "%s: Detach\n", pci_name(ctx->lldi.pdev)); if (ctx->dev) c4iw_remove(ctx); break; } return 0; } static struct cxgb4_uld_info c4iw_uld_info = { .name = DRV_NAME, .add = c4iw_uld_add, .rx_handler = c4iw_uld_rx_handler, .state_change = c4iw_uld_state_change, }; static int __init c4iw_init_module(void) { int err; err = c4iw_cm_init(); if (err) return err; c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); if (!c4iw_debugfs_root) printk(KERN_WARNING MOD "could not create debugfs entry, continuing\n"); cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); return 0; } static void __exit c4iw_exit_module(void) { struct uld_ctx *ctx, *tmp; mutex_lock(&dev_mutex); list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { if (ctx->dev) c4iw_remove(ctx); kfree(ctx); } mutex_unlock(&dev_mutex); cxgb4_unregister_uld(CXGB4_ULD_RDMA); c4iw_cm_term(); debugfs_remove_recursive(c4iw_debugfs_root); } module_init(c4iw_init_module); module_exit(c4iw_exit_module);
gpl-2.0
ArchiDroid/android_kernel_samsung_smdk4412-old
drivers/media/video/cx23885/cx23885-i2c.c
3033
9822
/* * Driver for the Conexant CX23885 PCIe bridge * * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/io.h> #include "cx23885.h" #include <media/v4l2-common.h> static unsigned int i2c_debug; module_param(i2c_debug, int, 0644); MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]"); static unsigned int i2c_scan; module_param(i2c_scan, int, 0444); MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time"); #define dprintk(level, fmt, arg...)\ do { if (i2c_debug >= level)\ printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\ } while (0) #define I2C_WAIT_DELAY 32 #define I2C_WAIT_RETRY 64 #define I2C_EXTEND (1 << 3) #define I2C_NOSTOP (1 << 4) static inline int i2c_slave_did_ack(struct i2c_adapter *i2c_adap) { struct cx23885_i2c *bus = i2c_adap->algo_data; struct cx23885_dev *dev = bus->dev; return cx_read(bus->reg_stat) & 0x01; } static inline int i2c_is_busy(struct i2c_adapter *i2c_adap) { struct cx23885_i2c *bus = i2c_adap->algo_data; struct cx23885_dev *dev = bus->dev; return cx_read(bus->reg_stat) & 0x02 ? 1 : 0; } static int i2c_wait_done(struct i2c_adapter *i2c_adap) { int count; for (count = 0; count < I2C_WAIT_RETRY; count++) { if (!i2c_is_busy(i2c_adap)) break; udelay(I2C_WAIT_DELAY); } if (I2C_WAIT_RETRY == count) return 0; return 1; } static int i2c_sendbytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg, int joined_rlen) { struct cx23885_i2c *bus = i2c_adap->algo_data; struct cx23885_dev *dev = bus->dev; u32 wdata, addr, ctrl; int retval, cnt; if (joined_rlen) dprintk(1, "%s(msg->wlen=%d, nextmsg->rlen=%d)\n", __func__, msg->len, joined_rlen); else dprintk(1, "%s(msg->len=%d)\n", __func__, msg->len); /* Deal with i2c probe functions with zero payload */ if (msg->len == 0) { cx_write(bus->reg_addr, msg->addr << 25); cx_write(bus->reg_ctrl, bus->i2c_period | (1 << 2)); if (!i2c_wait_done(i2c_adap)) return -EIO; if (!i2c_slave_did_ack(i2c_adap)) return -ENXIO; dprintk(1, "%s() returns 0\n", __func__); return 0; } /* dev, reg + first byte */ addr = (msg->addr << 25) | msg->buf[0]; wdata = msg->buf[0]; ctrl = bus->i2c_period | (1 << 12) | (1 << 2); if (msg->len > 1) ctrl |= I2C_NOSTOP | I2C_EXTEND; else if (joined_rlen) ctrl |= I2C_NOSTOP; cx_write(bus->reg_addr, addr); cx_write(bus->reg_wdata, wdata); cx_write(bus->reg_ctrl, ctrl); if (!i2c_wait_done(i2c_adap)) goto eio; if (i2c_debug) { printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]); if (!(ctrl & I2C_NOSTOP)) printk(" >\n"); } for (cnt = 1; cnt < msg->len; cnt++) { /* following bytes */ wdata = msg->buf[cnt]; ctrl = bus->i2c_period | (1 << 12) | (1 << 2); if (cnt < msg->len - 1) ctrl |= I2C_NOSTOP | I2C_EXTEND; else if (joined_rlen) ctrl |= I2C_NOSTOP; cx_write(bus->reg_addr, addr); cx_write(bus->reg_wdata, wdata); cx_write(bus->reg_ctrl, ctrl); if (!i2c_wait_done(i2c_adap)) goto eio; if (i2c_debug) { dprintk(1, " %02x", msg->buf[cnt]); if (!(ctrl & I2C_NOSTOP)) dprintk(1, " >\n"); } } return msg->len; eio: retval = -EIO; if (i2c_debug) printk(KERN_ERR " ERR: %d\n", retval); return retval; } static int i2c_readbytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg, int joined) { struct cx23885_i2c *bus = i2c_adap->algo_data; struct cx23885_dev *dev = bus->dev; u32 ctrl, cnt; int retval; if (i2c_debug && !joined) dprintk(1, "%s(msg->len=%d)\n", __func__, msg->len); /* Deal with i2c probe functions with zero payload */ if (msg->len == 0) { cx_write(bus->reg_addr, msg->addr << 25); cx_write(bus->reg_ctrl, bus->i2c_period | (1 << 2) | 1); if (!i2c_wait_done(i2c_adap)) return -EIO; if (!i2c_slave_did_ack(i2c_adap)) return -ENXIO; dprintk(1, "%s() returns 0\n", __func__); return 0; } if (i2c_debug) { if (joined) dprintk(1, " R"); else dprintk(1, " <R %02x", (msg->addr << 1) + 1); } for (cnt = 0; cnt < msg->len; cnt++) { ctrl = bus->i2c_period | (1 << 12) | (1 << 2) | 1; if (cnt < msg->len - 1) ctrl |= I2C_NOSTOP | I2C_EXTEND; cx_write(bus->reg_addr, msg->addr << 25); cx_write(bus->reg_ctrl, ctrl); if (!i2c_wait_done(i2c_adap)) goto eio; msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; if (i2c_debug) { dprintk(1, " %02x", msg->buf[cnt]); if (!(ctrl & I2C_NOSTOP)) dprintk(1, " >\n"); } } return msg->len; eio: retval = -EIO; if (i2c_debug) printk(KERN_ERR " ERR: %d\n", retval); return retval; } static int i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct cx23885_i2c *bus = i2c_adap->algo_data; struct cx23885_dev *dev = bus->dev; int i, retval = 0; dprintk(1, "%s(num = %d)\n", __func__, num); for (i = 0 ; i < num; i++) { dprintk(1, "%s(num = %d) addr = 0x%02x len = 0x%x\n", __func__, num, msgs[i].addr, msgs[i].len); if (msgs[i].flags & I2C_M_RD) { /* read */ retval = i2c_readbytes(i2c_adap, &msgs[i], 0); } else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) && msgs[i].addr == msgs[i + 1].addr) { /* write then read from same address */ retval = i2c_sendbytes(i2c_adap, &msgs[i], msgs[i + 1].len); if (retval < 0) goto err; i++; retval = i2c_readbytes(i2c_adap, &msgs[i], 1); } else { /* write */ retval = i2c_sendbytes(i2c_adap, &msgs[i], 0); } if (retval < 0) goto err; } return num; err: return retval; } static u32 cx23885_functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; } static struct i2c_algorithm cx23885_i2c_algo_template = { .master_xfer = i2c_xfer, .functionality = cx23885_functionality, }; /* ----------------------------------------------------------------------- */ static struct i2c_adapter cx23885_i2c_adap_template = { .name = "cx23885", .owner = THIS_MODULE, .algo = &cx23885_i2c_algo_template, }; static struct i2c_client cx23885_i2c_client_template = { .name = "cx23885 internal", }; static char *i2c_devs[128] = { [0x10 >> 1] = "tda10048", [0x12 >> 1] = "dib7000pc", [0x1c >> 1] = "lgdt3303", [0x86 >> 1] = "tda9887", [0x32 >> 1] = "cx24227", [0x88 >> 1] = "cx25837", [0x84 >> 1] = "tda8295", [0xa0 >> 1] = "eeprom", [0xc0 >> 1] = "tuner/mt2131/tda8275", [0xc2 >> 1] = "tuner/mt2131/tda8275/xc5000/xc3028", [0xc8 >> 1] = "tuner/xc3028L", }; static void do_i2c_scan(char *name, struct i2c_client *c) { unsigned char buf; int i, rc; for (i = 0; i < 128; i++) { c->addr = i; rc = i2c_master_recv(c, &buf, 0); if (rc < 0) continue; printk(KERN_INFO "%s: i2c scan: found device @ 0x%x [%s]\n", name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???"); } } /* init + register i2c algo-bit adapter */ int cx23885_i2c_register(struct cx23885_i2c *bus) { struct cx23885_dev *dev = bus->dev; dprintk(1, "%s(bus = %d)\n", __func__, bus->nr); memcpy(&bus->i2c_adap, &cx23885_i2c_adap_template, sizeof(bus->i2c_adap)); memcpy(&bus->i2c_algo, &cx23885_i2c_algo_template, sizeof(bus->i2c_algo)); memcpy(&bus->i2c_client, &cx23885_i2c_client_template, sizeof(bus->i2c_client)); bus->i2c_adap.dev.parent = &dev->pci->dev; strlcpy(bus->i2c_adap.name, bus->dev->name, sizeof(bus->i2c_adap.name)); bus->i2c_algo.data = bus; bus->i2c_adap.algo_data = bus; i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev); i2c_add_adapter(&bus->i2c_adap); bus->i2c_client.adapter = &bus->i2c_adap; if (0 == bus->i2c_rc) { dprintk(1, "%s: i2c bus %d registered\n", dev->name, bus->nr); if (i2c_scan) { printk(KERN_INFO "%s: scan bus %d:\n", dev->name, bus->nr); do_i2c_scan(dev->name, &bus->i2c_client); } } else printk(KERN_WARNING "%s: i2c bus %d register FAILED\n", dev->name, bus->nr); /* Instantiate the IR receiver device, if present */ if (0 == bus->i2c_rc) { struct i2c_board_info info; const unsigned short addr_list[] = { 0x6b, I2C_CLIENT_END }; memset(&info, 0, sizeof(struct i2c_board_info)); strlcpy(info.type, "ir_video", I2C_NAME_SIZE); /* Use quick read command for probe, some IR chips don't * support writes */ i2c_new_probed_device(&bus->i2c_adap, &info, addr_list, i2c_probe_func_quick_read); } return bus->i2c_rc; } int cx23885_i2c_unregister(struct cx23885_i2c *bus) { i2c_del_adapter(&bus->i2c_adap); return 0; } void cx23885_av_clk(struct cx23885_dev *dev, int enable) { /* write 0 to bus 2 addr 0x144 via i2x_xfer() */ char buffer[3]; struct i2c_msg msg; dprintk(1, "%s(enabled = %d)\n", __func__, enable); /* Register 0x144 */ buffer[0] = 0x01; buffer[1] = 0x44; if (enable == 1) buffer[2] = 0x05; else buffer[2] = 0x00; msg.addr = 0x44; msg.flags = I2C_M_TEN; msg.len = 3; msg.buf = buffer; i2c_xfer(&dev->i2c_bus[2].i2c_adap, &msg, 1); } /* ----------------------------------------------------------------------- */ /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
system1357/Zenfone-Kernel
arch/arm/mach-msm/devices-msm8960.c
3801
2255
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <mach/irqs-8960.h> #include <mach/board.h> #include "devices.h" #define MSM_GSBI2_PHYS 0x16100000 #define MSM_UART2DM_PHYS (MSM_GSBI2_PHYS + 0x40000) #define MSM_GSBI5_PHYS 0x16400000 #define MSM_UART5DM_PHYS (MSM_GSBI5_PHYS + 0x40000) static struct resource resources_uart_gsbi2[] = { { .start = GSBI2_UARTDM_IRQ, .end = GSBI2_UARTDM_IRQ, .flags = IORESOURCE_IRQ, }, { .start = MSM_UART2DM_PHYS, .end = MSM_UART2DM_PHYS + PAGE_SIZE - 1, .name = "uart_resource", .flags = IORESOURCE_MEM, }, { .start = MSM_GSBI2_PHYS, .end = MSM_GSBI2_PHYS + PAGE_SIZE - 1, .name = "gsbi_resource", .flags = IORESOURCE_MEM, }, }; struct platform_device msm8960_device_uart_gsbi2 = { .name = "msm_serial", .id = 0, .num_resources = ARRAY_SIZE(resources_uart_gsbi2), .resource = resources_uart_gsbi2, }; static struct resource resources_uart_gsbi5[] = { { .start = GSBI5_UARTDM_IRQ, .end = GSBI5_UARTDM_IRQ, .flags = IORESOURCE_IRQ, }, { .start = MSM_UART5DM_PHYS, .end = MSM_UART5DM_PHYS + PAGE_SIZE - 1, .name = "uart_resource", .flags = IORESOURCE_MEM, }, { .start = MSM_GSBI5_PHYS, .end = MSM_GSBI5_PHYS + PAGE_SIZE - 1, .name = "gsbi_resource", .flags = IORESOURCE_MEM, }, }; struct platform_device msm8960_device_uart_gsbi5 = { .name = "msm_serial", .id = 0, .num_resources = ARRAY_SIZE(resources_uart_gsbi5), .resource = resources_uart_gsbi5, };
gpl-2.0
CyanideL/android_kernel_samsung_d2
arch/arm/mach-s3c64xx/mach-smartq7.c
4825
3809
/* * linux/arch/arm/mach-s3c64xx/mach-smartq7.c * * Copyright (C) 2010 Maurus Cuelenaere * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/fb.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/init.h> #include <linux/input.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <asm/hardware/vic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/map.h> #include <mach/regs-gpio.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/fb.h> #include <plat/gpio-cfg.h> #include <plat/regs-fb-v4.h> #include "common.h" #include "mach-smartq.h" static struct gpio_led smartq7_leds[] = { { .name = "smartq7:red", .active_low = 1, .gpio = S3C64XX_GPN(8), }, { .name = "smartq7:green", .active_low = 1, .gpio = S3C64XX_GPN(9), }, }; static struct gpio_led_platform_data smartq7_led_data = { .num_leds = ARRAY_SIZE(smartq7_leds), .leds = smartq7_leds, }; static struct platform_device smartq7_leds_device = { .name = "leds-gpio", .id = -1, .dev.platform_data = &smartq7_led_data, }; /* Labels according to the SmartQ manual */ static struct gpio_keys_button smartq7_buttons[] = { { .gpio = S3C64XX_GPL(14), .code = KEY_POWER, .desc = "Power", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(2), .code = KEY_FN, .desc = "Function", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(3), .code = KEY_KPMINUS, .desc = "Minus", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(4), .code = KEY_KPPLUS, .desc = "Plus", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(12), .code = KEY_ENTER, .desc = "Enter", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, { .gpio = S3C64XX_GPN(15), .code = KEY_ESC, .desc = "Cancel", .active_low = 1, .debounce_interval = 5, .type = EV_KEY, }, }; static struct gpio_keys_platform_data smartq7_buttons_data = { .buttons = smartq7_buttons, .nbuttons = ARRAY_SIZE(smartq7_buttons), }; static struct platform_device smartq7_buttons_device = { .name = "gpio-keys", .id = 0, .num_resources = 0, .dev = { .platform_data = &smartq7_buttons_data, } }; static struct s3c_fb_pd_win smartq7_fb_win0 = { .win_mode = { .left_margin = 3, .right_margin = 5, .upper_margin = 1, .lower_margin = 20, .hsync_len = 10, .vsync_len = 3, .xres = 800, .yres = 480, .refresh = 80, }, .max_bpp = 32, .default_bpp = 16, }; static struct s3c_fb_platdata smartq7_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, .win[0] = &smartq7_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | VIDCON1_INV_VCLK, }; static struct platform_device *smartq7_devices[] __initdata = { &smartq7_leds_device, &smartq7_buttons_device, }; static void __init smartq7_machine_init(void) { s3c_fb_set_platdata(&smartq7_lcd_pdata); smartq_machine_init(); platform_add_devices(smartq7_devices, ARRAY_SIZE(smartq7_devices)); } MACHINE_START(SMARTQ7, "SmartQ 7") /* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */ .atag_offset = 0x100, .init_irq = s3c6410_init_irq, .handle_irq = vic_handle_irq, .map_io = smartq_map_io, .init_machine = smartq7_machine_init, .timer = &s3c24xx_timer, .restart = s3c64xx_restart, MACHINE_END
gpl-2.0
LeroViten/LerNex-Bacon
arch/arm/mm/copypage-v3.c
4825
2078
/* * linux/arch/arm/mm/copypage-v3.c * * Copyright (C) 1995-1999 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/highmem.h> /* * ARMv3 optimised copy_user_highpage * * FIXME: do we need to handle cache stuff... */ static void __naked v3_copy_user_page(void *kto, const void *kfrom) { asm("\n\ stmfd sp!, {r4, lr} @ 2\n\ mov r2, %2 @ 1\n\ ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ 1: stmia %1!, {r3, r4, ip, lr} @ 4\n\ ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ stmia %1!, {r3, r4, ip, lr} @ 4\n\ ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ stmia %1!, {r3, r4, ip, lr} @ 4\n\ ldmia %0!, {r3, r4, ip, lr} @ 4\n\ subs r2, r2, #1 @ 1\n\ stmia %1!, {r3, r4, ip, lr} @ 4\n\ ldmneia %0!, {r3, r4, ip, lr} @ 4\n\ bne 1b @ 1\n\ ldmfd sp!, {r4, pc} @ 3" : : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); } void v3_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; kto = kmap_atomic(to); kfrom = kmap_atomic(from); v3_copy_user_page(kto, kfrom); kunmap_atomic(kfrom); kunmap_atomic(kto); } /* * ARMv3 optimised clear_user_page * * FIXME: do we need to handle cache stuff... */ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) { void *ptr, *kaddr = kmap_atomic(page); asm volatile("\n\ mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1" : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr); } struct cpu_user_fns v3_user_fns __initdata = { .cpu_clear_user_highpage = v3_clear_user_highpage, .cpu_copy_user_highpage = v3_copy_user_highpage, };
gpl-2.0
mdeejay/android_kernel_mako
fs/jbd2/checkpoint.c
4825
20401
/* * linux/fs/jbd2/checkpoint.c * * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 * * Copyright 1999 Red Hat Software --- All Rights Reserved * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your * option, any later version, incorporated herein by reference. * * Checkpoint routines for the generic filesystem journaling code. * Part of the ext2fs journaling system. * * Checkpointing is the process of ensuring that a section of the log is * committed fully to disk, so that that portion of the log can be * reused. */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <trace/events/jbd2.h> /* * Unlink a buffer from a transaction checkpoint list. * * Called with j_list_lock held. */ static inline void __buffer_unlink_first(struct journal_head *jh) { transaction_t *transaction = jh->b_cp_transaction; jh->b_cpnext->b_cpprev = jh->b_cpprev; jh->b_cpprev->b_cpnext = jh->b_cpnext; if (transaction->t_checkpoint_list == jh) { transaction->t_checkpoint_list = jh->b_cpnext; if (transaction->t_checkpoint_list == jh) transaction->t_checkpoint_list = NULL; } } /* * Unlink a buffer from a transaction checkpoint(io) list. * * Called with j_list_lock held. */ static inline void __buffer_unlink(struct journal_head *jh) { transaction_t *transaction = jh->b_cp_transaction; __buffer_unlink_first(jh); if (transaction->t_checkpoint_io_list == jh) { transaction->t_checkpoint_io_list = jh->b_cpnext; if (transaction->t_checkpoint_io_list == jh) transaction->t_checkpoint_io_list = NULL; } } /* * Move a buffer from the checkpoint list to the checkpoint io list * * Called with j_list_lock held */ static inline void __buffer_relink_io(struct journal_head *jh) { transaction_t *transaction = jh->b_cp_transaction; __buffer_unlink_first(jh); if (!transaction->t_checkpoint_io_list) { jh->b_cpnext = jh->b_cpprev = jh; } else { jh->b_cpnext = transaction->t_checkpoint_io_list; jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev; jh->b_cpprev->b_cpnext = jh; jh->b_cpnext->b_cpprev = jh; } transaction->t_checkpoint_io_list = jh; } /* * Try to release a checkpointed buffer from its transaction. * Returns 1 if we released it and 2 if we also released the * whole transaction. * * Requires j_list_lock */ static int __try_to_free_cp_buf(struct journal_head *jh) { int ret = 0; struct buffer_head *bh = jh2bh(jh); if (jh->b_transaction == NULL && !buffer_locked(bh) && !buffer_dirty(bh) && !buffer_write_io_error(bh)) { /* * Get our reference so that bh cannot be freed before * we unlock it */ get_bh(bh); JBUFFER_TRACE(jh, "remove from checkpoint list"); ret = __jbd2_journal_remove_checkpoint(jh) + 1; BUFFER_TRACE(bh, "release"); __brelse(bh); } return ret; } /* * __jbd2_log_wait_for_space: wait until there is space in the journal. * * Called under j-state_lock *only*. It will be unlocked if we have to wait * for a checkpoint to free up some space in the log. */ void __jbd2_log_wait_for_space(journal_t *journal) { int nblocks, space_left; /* assert_spin_locked(&journal->j_state_lock); */ nblocks = jbd_space_needed(journal); while (__jbd2_log_space_left(journal) < nblocks) { if (journal->j_flags & JBD2_ABORT) return; write_unlock(&journal->j_state_lock); mutex_lock(&journal->j_checkpoint_mutex); /* * Test again, another process may have checkpointed while we * were waiting for the checkpoint lock. If there are no * transactions ready to be checkpointed, try to recover * journal space by calling cleanup_journal_tail(), and if * that doesn't work, by waiting for the currently committing * transaction to complete. If there is absolutely no way * to make progress, this is either a BUG or corrupted * filesystem, so abort the journal and leave a stack * trace for forensic evidence. */ write_lock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); nblocks = jbd_space_needed(journal); space_left = __jbd2_log_space_left(journal); if (space_left < nblocks) { int chkpt = journal->j_checkpoint_transactions != NULL; tid_t tid = 0; if (journal->j_committing_transaction) tid = journal->j_committing_transaction->t_tid; spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); if (chkpt) { jbd2_log_do_checkpoint(journal); } else if (jbd2_cleanup_journal_tail(journal) == 0) { /* We were able to recover space; yay! */ ; } else if (tid) { jbd2_log_wait_commit(journal, tid); } else { printk(KERN_ERR "%s: needed %d blocks and " "only had %d space available\n", __func__, nblocks, space_left); printk(KERN_ERR "%s: no way to get more " "journal space in %s\n", __func__, journal->j_devname); WARN_ON(1); jbd2_journal_abort(journal, 0); } write_lock(&journal->j_state_lock); } else { spin_unlock(&journal->j_list_lock); } mutex_unlock(&journal->j_checkpoint_mutex); } } /* * Clean up transaction's list of buffers submitted for io. * We wait for any pending IO to complete and remove any clean * buffers. Note that we take the buffers in the opposite ordering * from the one in which they were submitted for IO. * * Return 0 on success, and return <0 if some buffers have failed * to be written out. * * Called with j_list_lock held. */ static int __wait_cp_io(journal_t *journal, transaction_t *transaction) { struct journal_head *jh; struct buffer_head *bh; tid_t this_tid; int released = 0; int ret = 0; this_tid = transaction->t_tid; restart: /* Did somebody clean up the transaction in the meanwhile? */ if (journal->j_checkpoint_transactions != transaction || transaction->t_tid != this_tid) return ret; while (!released && transaction->t_checkpoint_io_list) { jh = transaction->t_checkpoint_io_list; bh = jh2bh(jh); get_bh(bh); if (buffer_locked(bh)) { spin_unlock(&journal->j_list_lock); wait_on_buffer(bh); /* the journal_head may have gone by now */ BUFFER_TRACE(bh, "brelse"); __brelse(bh); spin_lock(&journal->j_list_lock); goto restart; } if (unlikely(buffer_write_io_error(bh))) ret = -EIO; /* * Now in whatever state the buffer currently is, we know that * it has been written out and so we can drop it from the list */ released = __jbd2_journal_remove_checkpoint(jh); __brelse(bh); } return ret; } static void __flush_batch(journal_t *journal, int *batch_count) { int i; struct blk_plug plug; blk_start_plug(&plug); for (i = 0; i < *batch_count; i++) write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC); blk_finish_plug(&plug); for (i = 0; i < *batch_count; i++) { struct buffer_head *bh = journal->j_chkpt_bhs[i]; BUFFER_TRACE(bh, "brelse"); __brelse(bh); } *batch_count = 0; } /* * Try to flush one buffer from the checkpoint list to disk. * * Return 1 if something happened which requires us to abort the current * scan of the checkpoint list. Return <0 if the buffer has failed to * be written out. * * Called with j_list_lock held and drops it if 1 is returned */ static int __process_buffer(journal_t *journal, struct journal_head *jh, int *batch_count, transaction_t *transaction) { struct buffer_head *bh = jh2bh(jh); int ret = 0; if (buffer_locked(bh)) { get_bh(bh); spin_unlock(&journal->j_list_lock); wait_on_buffer(bh); /* the journal_head may have gone by now */ BUFFER_TRACE(bh, "brelse"); __brelse(bh); ret = 1; } else if (jh->b_transaction != NULL) { transaction_t *t = jh->b_transaction; tid_t tid = t->t_tid; transaction->t_chp_stats.cs_forced_to_close++; spin_unlock(&journal->j_list_lock); if (unlikely(journal->j_flags & JBD2_UNMOUNT)) /* * The journal thread is dead; so starting and * waiting for a commit to finish will cause * us to wait for a _very_ long time. */ printk(KERN_ERR "JBD2: %s: " "Waiting for Godot: block %llu\n", journal->j_devname, (unsigned long long) bh->b_blocknr); jbd2_log_start_commit(journal, tid); jbd2_log_wait_commit(journal, tid); ret = 1; } else if (!buffer_dirty(bh)) { ret = 1; if (unlikely(buffer_write_io_error(bh))) ret = -EIO; get_bh(bh); BUFFER_TRACE(bh, "remove from checkpoint"); __jbd2_journal_remove_checkpoint(jh); spin_unlock(&journal->j_list_lock); __brelse(bh); } else { /* * Important: we are about to write the buffer, and * possibly block, while still holding the journal lock. * We cannot afford to let the transaction logic start * messing around with this buffer before we write it to * disk, as that would break recoverability. */ BUFFER_TRACE(bh, "queue"); get_bh(bh); J_ASSERT_BH(bh, !buffer_jwrite(bh)); journal->j_chkpt_bhs[*batch_count] = bh; __buffer_relink_io(jh); transaction->t_chp_stats.cs_written++; (*batch_count)++; if (*batch_count == JBD2_NR_BATCH) { spin_unlock(&journal->j_list_lock); __flush_batch(journal, batch_count); ret = 1; } } return ret; } /* * Perform an actual checkpoint. We take the first transaction on the * list of transactions to be checkpointed and send all its buffers * to disk. We submit larger chunks of data at once. * * The journal should be locked before calling this function. * Called with j_checkpoint_mutex held. */ int jbd2_log_do_checkpoint(journal_t *journal) { transaction_t *transaction; tid_t this_tid; int result; jbd_debug(1, "Start checkpoint\n"); /* * First thing: if there are any transactions in the log which * don't need checkpointing, just eliminate them from the * journal straight away. */ result = jbd2_cleanup_journal_tail(journal); trace_jbd2_checkpoint(journal, result); jbd_debug(1, "cleanup_journal_tail returned %d\n", result); if (result <= 0) return result; /* * OK, we need to start writing disk blocks. Take one transaction * and write it. */ result = 0; spin_lock(&journal->j_list_lock); if (!journal->j_checkpoint_transactions) goto out; transaction = journal->j_checkpoint_transactions; if (transaction->t_chp_stats.cs_chp_time == 0) transaction->t_chp_stats.cs_chp_time = jiffies; this_tid = transaction->t_tid; restart: /* * If someone cleaned up this transaction while we slept, we're * done (maybe it's a new transaction, but it fell at the same * address). */ if (journal->j_checkpoint_transactions == transaction && transaction->t_tid == this_tid) { int batch_count = 0; struct journal_head *jh; int retry = 0, err; while (!retry && transaction->t_checkpoint_list) { jh = transaction->t_checkpoint_list; retry = __process_buffer(journal, jh, &batch_count, transaction); if (retry < 0 && !result) result = retry; if (!retry && (need_resched() || spin_needbreak(&journal->j_list_lock))) { spin_unlock(&journal->j_list_lock); retry = 1; break; } } if (batch_count) { if (!retry) { spin_unlock(&journal->j_list_lock); retry = 1; } __flush_batch(journal, &batch_count); } if (retry) { spin_lock(&journal->j_list_lock); goto restart; } /* * Now we have cleaned up the first transaction's checkpoint * list. Let's clean up the second one */ err = __wait_cp_io(journal, transaction); if (!result) result = err; } out: spin_unlock(&journal->j_list_lock); if (result < 0) jbd2_journal_abort(journal, result); else result = jbd2_cleanup_journal_tail(journal); return (result < 0) ? result : 0; } /* * Check the list of checkpoint transactions for the journal to see if * we have already got rid of any since the last update of the log tail * in the journal superblock. If so, we can instantly roll the * superblock forward to remove those transactions from the log. * * Return <0 on error, 0 on success, 1 if there was nothing to clean up. * * Called with the journal lock held. * * This is the only part of the journaling code which really needs to be * aware of transaction aborts. Checkpointing involves writing to the * main filesystem area rather than to the journal, so it can proceed * even in abort state, but we must not update the super block if * checkpointing may have failed. Otherwise, we would lose some metadata * buffers which should be written-back to the filesystem. */ int jbd2_cleanup_journal_tail(journal_t *journal) { tid_t first_tid; unsigned long blocknr; if (is_journal_aborted(journal)) return 1; if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr)) return 1; J_ASSERT(blocknr != 0); /* * We need to make sure that any blocks that were recently written out * --- perhaps by jbd2_log_do_checkpoint() --- are flushed out before * we drop the transactions from the journal. It's unlikely this will * be necessary, especially with an appropriately sized journal, but we * need this to guarantee correctness. Fortunately * jbd2_cleanup_journal_tail() doesn't get called all that often. */ if (journal->j_flags & JBD2_BARRIER) blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); __jbd2_update_log_tail(journal, first_tid, blocknr); return 0; } /* Checkpoint list management */ /* * journal_clean_one_cp_list * * Find all the written-back checkpoint buffers in the given list and * release them. * * Called with the journal locked. * Called with j_list_lock held. * Returns number of buffers reaped (for debug) */ static int journal_clean_one_cp_list(struct journal_head *jh, int *released) { struct journal_head *last_jh; struct journal_head *next_jh = jh; int ret, freed = 0; *released = 0; if (!jh) return 0; last_jh = jh->b_cpprev; do { jh = next_jh; next_jh = jh->b_cpnext; ret = __try_to_free_cp_buf(jh); if (ret) { freed++; if (ret == 2) { *released = 1; return freed; } } /* * This function only frees up some memory * if possible so we dont have an obligation * to finish processing. Bail out if preemption * requested: */ if (need_resched()) return freed; } while (jh != last_jh); return freed; } /* * journal_clean_checkpoint_list * * Find all the written-back checkpoint buffers in the journal and release them. * * Called with the journal locked. * Called with j_list_lock held. * Returns number of buffers reaped (for debug) */ int __jbd2_journal_clean_checkpoint_list(journal_t *journal) { transaction_t *transaction, *last_transaction, *next_transaction; int ret = 0; int released; transaction = journal->j_checkpoint_transactions; if (!transaction) goto out; last_transaction = transaction->t_cpprev; next_transaction = transaction; do { transaction = next_transaction; next_transaction = transaction->t_cpnext; ret += journal_clean_one_cp_list(transaction-> t_checkpoint_list, &released); /* * This function only frees up some memory if possible so we * dont have an obligation to finish processing. Bail out if * preemption requested: */ if (need_resched()) goto out; if (released) continue; /* * It is essential that we are as careful as in the case of * t_checkpoint_list with removing the buffer from the list as * we can possibly see not yet submitted buffers on io_list */ ret += journal_clean_one_cp_list(transaction-> t_checkpoint_io_list, &released); if (need_resched()) goto out; } while (transaction != last_transaction); out: return ret; } /* * journal_remove_checkpoint: called after a buffer has been committed * to disk (either by being write-back flushed to disk, or being * committed to the log). * * We cannot safely clean a transaction out of the log until all of the * buffer updates committed in that transaction have safely been stored * elsewhere on disk. To achieve this, all of the buffers in a * transaction need to be maintained on the transaction's checkpoint * lists until they have been rewritten, at which point this function is * called to remove the buffer from the existing transaction's * checkpoint lists. * * The function returns 1 if it frees the transaction, 0 otherwise. * The function can free jh and bh. * * This function is called with j_list_lock held. */ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) { struct transaction_chp_stats_s *stats; transaction_t *transaction; journal_t *journal; int ret = 0; JBUFFER_TRACE(jh, "entry"); if ((transaction = jh->b_cp_transaction) == NULL) { JBUFFER_TRACE(jh, "not on transaction"); goto out; } journal = transaction->t_journal; JBUFFER_TRACE(jh, "removing from transaction"); __buffer_unlink(jh); jh->b_cp_transaction = NULL; jbd2_journal_put_journal_head(jh); if (transaction->t_checkpoint_list != NULL || transaction->t_checkpoint_io_list != NULL) goto out; /* * There is one special case to worry about: if we have just pulled the * buffer off a running or committing transaction's checkpoing list, * then even if the checkpoint list is empty, the transaction obviously * cannot be dropped! * * The locking here around t_state is a bit sleazy. * See the comment at the end of jbd2_journal_commit_transaction(). */ if (transaction->t_state != T_FINISHED) goto out; /* OK, that was the last buffer for the transaction: we can now safely remove this transaction from the log */ stats = &transaction->t_chp_stats; if (stats->cs_chp_time) stats->cs_chp_time = jbd2_time_diff(stats->cs_chp_time, jiffies); trace_jbd2_checkpoint_stats(journal->j_fs_dev->bd_dev, transaction->t_tid, stats); __jbd2_journal_drop_transaction(journal, transaction); jbd2_journal_free_transaction(transaction); /* Just in case anybody was waiting for more transactions to be checkpointed... */ wake_up(&journal->j_wait_logspace); ret = 1; out: return ret; } /* * journal_insert_checkpoint: put a committed buffer onto a checkpoint * list so that we know when it is safe to clean the transaction out of * the log. * * Called with the journal locked. * Called with j_list_lock held. */ void __jbd2_journal_insert_checkpoint(struct journal_head *jh, transaction_t *transaction) { JBUFFER_TRACE(jh, "entry"); J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh))); J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); /* Get reference for checkpointing transaction */ jbd2_journal_grab_journal_head(jh2bh(jh)); jh->b_cp_transaction = transaction; if (!transaction->t_checkpoint_list) { jh->b_cpnext = jh->b_cpprev = jh; } else { jh->b_cpnext = transaction->t_checkpoint_list; jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev; jh->b_cpprev->b_cpnext = jh; jh->b_cpnext->b_cpprev = jh; } transaction->t_checkpoint_list = jh; } /* * We've finished with this transaction structure: adios... * * The transaction must have no links except for the checkpoint by this * point. * * Called with the journal locked. * Called with j_list_lock held. */ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction) { assert_spin_locked(&journal->j_list_lock); if (transaction->t_cpnext) { transaction->t_cpnext->t_cpprev = transaction->t_cpprev; transaction->t_cpprev->t_cpnext = transaction->t_cpnext; if (journal->j_checkpoint_transactions == transaction) journal->j_checkpoint_transactions = transaction->t_cpnext; if (journal->j_checkpoint_transactions == transaction) journal->j_checkpoint_transactions = NULL; } J_ASSERT(transaction->t_state == T_FINISHED); J_ASSERT(transaction->t_buffers == NULL); J_ASSERT(transaction->t_forget == NULL); J_ASSERT(transaction->t_iobuf_list == NULL); J_ASSERT(transaction->t_shadow_list == NULL); J_ASSERT(transaction->t_log_list == NULL); J_ASSERT(transaction->t_checkpoint_list == NULL); J_ASSERT(transaction->t_checkpoint_io_list == NULL); J_ASSERT(atomic_read(&transaction->t_updates) == 0); J_ASSERT(journal->j_committing_transaction != transaction); J_ASSERT(journal->j_running_transaction != transaction); trace_jbd2_drop_transaction(journal, transaction); jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid); }
gpl-2.0
moonlightly/AK-OnePone
arch/arm/mach-s5p64x0/mach-smdk6440.c
4825
6779
/* linux/arch/arm/mach-s5p64x0/mach-smdk6440.c * * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/pwm_backlight.h> #include <linux/fb.h> #include <linux/mmc/host.h> #include <video/platform_lcd.h> #include <asm/hardware/vic.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <mach/i2c.h> #include <mach/regs-gpio.h> #include <plat/regs-serial.h> #include <plat/gpio-cfg.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/iic.h> #include <plat/pll.h> #include <plat/adc.h> #include <plat/ts.h> #include <plat/s5p-time.h> #include <plat/backlight.h> #include <plat/fb.h> #include <plat/regs-fb.h> #include <plat/sdhci.h> #include "common.h" #define SMDK6440_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ S3C2410_UCON_RXILEVEL | \ S3C2410_UCON_TXIRQMODE | \ S3C2410_UCON_RXIRQMODE | \ S3C2410_UCON_RXFIFO_TOI | \ S3C2443_UCON_RXERR_IRQEN) #define SMDK6440_ULCON_DEFAULT S3C2410_LCON_CS8 #define SMDK6440_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ S3C2440_UFCON_TXTRIG16 | \ S3C2410_UFCON_RXTRIG8) static struct s3c2410_uartcfg smdk6440_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = SMDK6440_UCON_DEFAULT, .ulcon = SMDK6440_ULCON_DEFAULT, .ufcon = SMDK6440_UFCON_DEFAULT, }, [1] = { .hwport = 1, .flags = 0, .ucon = SMDK6440_UCON_DEFAULT, .ulcon = SMDK6440_ULCON_DEFAULT, .ufcon = SMDK6440_UFCON_DEFAULT, }, [2] = { .hwport = 2, .flags = 0, .ucon = SMDK6440_UCON_DEFAULT, .ulcon = SMDK6440_ULCON_DEFAULT, .ufcon = SMDK6440_UFCON_DEFAULT, }, [3] = { .hwport = 3, .flags = 0, .ucon = SMDK6440_UCON_DEFAULT, .ulcon = SMDK6440_ULCON_DEFAULT, .ufcon = SMDK6440_UFCON_DEFAULT, }, }; /* Frame Buffer */ static struct s3c_fb_pd_win smdk6440_fb_win0 = { .win_mode = { .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 24, }; static struct s3c_fb_platdata smdk6440_lcd_pdata __initdata = { .win[0] = &smdk6440_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = s5p64x0_fb_gpio_setup_24bpp, }; /* LCD power controller */ static void smdk6440_lte480_reset_power(struct plat_lcd_data *pd, unsigned int power) { int err; if (power) { err = gpio_request(S5P6440_GPN(5), "GPN"); if (err) { printk(KERN_ERR "failed to request GPN for lcd reset\n"); return; } gpio_direction_output(S5P6440_GPN(5), 1); gpio_set_value(S5P6440_GPN(5), 0); gpio_set_value(S5P6440_GPN(5), 1); gpio_free(S5P6440_GPN(5)); } } static struct plat_lcd_data smdk6440_lcd_power_data = { .set_power = smdk6440_lte480_reset_power, }; static struct platform_device smdk6440_lcd_lte480wv = { .name = "platform-lcd", .dev.parent = &s3c_device_fb.dev, .dev.platform_data = &smdk6440_lcd_power_data, }; static struct platform_device *smdk6440_devices[] __initdata = { &s3c_device_adc, &s3c_device_rtc, &s3c_device_i2c0, &s3c_device_i2c1, &s3c_device_ts, &s3c_device_wdt, &samsung_asoc_dma, &s5p6440_device_iis, &s3c_device_fb, &smdk6440_lcd_lte480wv, &s3c_device_hsmmc0, &s3c_device_hsmmc1, &s3c_device_hsmmc2, }; static struct s3c_sdhci_platdata smdk6440_hsmmc0_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, }; static struct s3c_sdhci_platdata smdk6440_hsmmc1_pdata __initdata = { .cd_type = S3C_SDHCI_CD_INTERNAL, #if defined(CONFIG_S5P64X0_SD_CH1_8BIT) .max_width = 8, .host_caps = MMC_CAP_8_BIT_DATA, #endif }; static struct s3c_sdhci_platdata smdk6440_hsmmc2_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, }; static struct s3c2410_platform_i2c s5p6440_i2c0_data __initdata = { .flags = 0, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, .cfg_gpio = s5p6440_i2c0_cfg_gpio, }; static struct s3c2410_platform_i2c s5p6440_i2c1_data __initdata = { .flags = 0, .bus_num = 1, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, .cfg_gpio = s5p6440_i2c1_cfg_gpio, }; static struct i2c_board_info smdk6440_i2c_devs0[] __initdata = { { I2C_BOARD_INFO("24c08", 0x50), }, { I2C_BOARD_INFO("wm8580", 0x1b), }, }; static struct i2c_board_info smdk6440_i2c_devs1[] __initdata = { /* To be populated */ }; /* LCD Backlight data */ static struct samsung_bl_gpio_info smdk6440_bl_gpio_info = { .no = S5P6440_GPF(15), .func = S3C_GPIO_SFN(2), }; static struct platform_pwm_backlight_data smdk6440_bl_data = { .pwm_id = 1, }; static void __init smdk6440_map_io(void) { s5p64x0_init_io(NULL, 0); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(smdk6440_uartcfgs, ARRAY_SIZE(smdk6440_uartcfgs)); s5p_set_timer_source(S5P_PWM3, S5P_PWM4); } static void s5p6440_set_lcd_interface(void) { unsigned int cfg; /* select TFT LCD type (RGB I/F) */ cfg = __raw_readl(S5P64X0_SPCON0); cfg &= ~S5P64X0_SPCON0_LCD_SEL_MASK; cfg |= S5P64X0_SPCON0_LCD_SEL_RGB; __raw_writel(cfg, S5P64X0_SPCON0); } static void __init smdk6440_machine_init(void) { s3c24xx_ts_set_platdata(NULL); s3c_i2c0_set_platdata(&s5p6440_i2c0_data); s3c_i2c1_set_platdata(&s5p6440_i2c1_data); i2c_register_board_info(0, smdk6440_i2c_devs0, ARRAY_SIZE(smdk6440_i2c_devs0)); i2c_register_board_info(1, smdk6440_i2c_devs1, ARRAY_SIZE(smdk6440_i2c_devs1)); samsung_bl_set(&smdk6440_bl_gpio_info, &smdk6440_bl_data); s5p6440_set_lcd_interface(); s3c_fb_set_platdata(&smdk6440_lcd_pdata); s3c_sdhci0_set_platdata(&smdk6440_hsmmc0_pdata); s3c_sdhci1_set_platdata(&smdk6440_hsmmc1_pdata); s3c_sdhci2_set_platdata(&smdk6440_hsmmc2_pdata); platform_add_devices(smdk6440_devices, ARRAY_SIZE(smdk6440_devices)); } MACHINE_START(SMDK6440, "SMDK6440") /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ .atag_offset = 0x100, .init_irq = s5p6440_init_irq, .handle_irq = vic_handle_irq, .map_io = smdk6440_map_io, .init_machine = smdk6440_machine_init, .timer = &s5p_timer, .restart = s5p64x0_restart, MACHINE_END
gpl-2.0
nimon/GPSense_1
arch/x86/um/ptrace_32.c
5081
6160
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include "linux/mm.h" #include "linux/sched.h" #include "asm/uaccess.h" #include "skas.h" extern int arch_switch_tls(struct task_struct *to); void arch_switch_to(struct task_struct *to) { int err = arch_switch_tls(to); if (!err) return; if (err != -EINVAL) printk(KERN_WARNING "arch_switch_tls failed, errno %d, " "not EINVAL\n", -err); else printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n"); } int is_syscall(unsigned long addr) { unsigned short instr; int n; n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); if (n) { /* access_process_vm() grants access to vsyscall and stub, * while copy_from_user doesn't. Maybe access_process_vm is * slow, but that doesn't matter, since it will be called only * in case of singlestepping, if copy_from_user failed. */ n = access_process_vm(current, addr, &instr, sizeof(instr), 0); if (n != sizeof(instr)) { printk(KERN_ERR "is_syscall : failed to read " "instruction from 0x%lx\n", addr); return 1; } } /* int 0x80 or sysenter */ return (instr == 0x80cd) || (instr == 0x340f); } /* determines which flags the user has access to. */ /* 1 = access 0 = no access */ #define FLAG_MASK 0x00044dd5 static const int reg_offsets[] = { [EBX] = HOST_BX, [ECX] = HOST_CX, [EDX] = HOST_DX, [ESI] = HOST_SI, [EDI] = HOST_DI, [EBP] = HOST_BP, [EAX] = HOST_AX, [DS] = HOST_DS, [ES] = HOST_ES, [FS] = HOST_FS, [GS] = HOST_GS, [EIP] = HOST_IP, [CS] = HOST_CS, [EFL] = HOST_EFLAGS, [UESP] = HOST_SP, [SS] = HOST_SS, }; int putreg(struct task_struct *child, int regno, unsigned long value) { regno >>= 2; switch (regno) { case EBX: case ECX: case EDX: case ESI: case EDI: case EBP: case EAX: case EIP: case UESP: break; case FS: if (value && (value & 3) != 3) return -EIO; break; case GS: if (value && (value & 3) != 3) return -EIO; break; case DS: case ES: if (value && (value & 3) != 3) return -EIO; value &= 0xffff; break; case SS: case CS: if ((value & 3) != 3) return -EIO; value &= 0xffff; break; case EFL: value &= FLAG_MASK; child->thread.regs.regs.gp[HOST_EFLAGS] |= value; return 0; case ORIG_EAX: child->thread.regs.regs.syscall = value; return 0; default : panic("Bad register in putreg() : %d\n", regno); } child->thread.regs.regs.gp[reg_offsets[regno]] = value; return 0; } int poke_user(struct task_struct *child, long addr, long data) { if ((addr & 3) || addr < 0) return -EIO; if (addr < MAX_REG_OFFSET) return putreg(child, addr, data); else if ((addr >= offsetof(struct user, u_debugreg[0])) && (addr <= offsetof(struct user, u_debugreg[7]))) { addr -= offsetof(struct user, u_debugreg[0]); addr = addr >> 2; if ((addr == 4) || (addr == 5)) return -EIO; child->thread.arch.debugregs[addr] = data; return 0; } return -EIO; } unsigned long getreg(struct task_struct *child, int regno) { unsigned long mask = ~0UL; regno >>= 2; switch (regno) { case ORIG_EAX: return child->thread.regs.regs.syscall; case FS: case GS: case DS: case ES: case SS: case CS: mask = 0xffff; break; case EIP: case UESP: case EAX: case EBX: case ECX: case EDX: case ESI: case EDI: case EBP: case EFL: break; default: panic("Bad register in getreg() : %d\n", regno); } return mask & child->thread.regs.regs.gp[reg_offsets[regno]]; } /* read the word at location addr in the USER area. */ int peek_user(struct task_struct *child, long addr, long data) { unsigned long tmp; if ((addr & 3) || addr < 0) return -EIO; tmp = 0; /* Default return condition */ if (addr < MAX_REG_OFFSET) { tmp = getreg(child, addr); } else if ((addr >= offsetof(struct user, u_debugreg[0])) && (addr <= offsetof(struct user, u_debugreg[7]))) { addr -= offsetof(struct user, u_debugreg[0]); addr = addr >> 2; tmp = child->thread.arch.debugregs[addr]; } return put_user(tmp, (unsigned long __user *) data); } static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) { int err, n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_i387_struct fpregs; err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs); if (err) return err; n = copy_to_user(buf, &fpregs, sizeof(fpregs)); if(n > 0) return -EFAULT; return n; } static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) { int n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_i387_struct fpregs; n = copy_from_user(&fpregs, buf, sizeof(fpregs)); if (n > 0) return -EFAULT; return restore_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs); } static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) { int err, n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_fxsr_struct fpregs; err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs); if (err) return err; n = copy_to_user(buf, &fpregs, sizeof(fpregs)); if(n > 0) return -EFAULT; return n; } static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) { int n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_fxsr_struct fpregs; n = copy_from_user(&fpregs, buf, sizeof(fpregs)); if (n > 0) return -EFAULT; return restore_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs); } long subarch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret = -EIO; void __user *datap = (void __user *) data; switch (request) { case PTRACE_GETFPREGS: /* Get the child FPU state. */ ret = get_fpregs(datap, child); break; case PTRACE_SETFPREGS: /* Set the child FPU state. */ ret = set_fpregs(datap, child); break; case PTRACE_GETFPXREGS: /* Get the child FPU state. */ ret = get_fpxregs(datap, child); break; case PTRACE_SETFPXREGS: /* Set the child FPU state. */ ret = set_fpxregs(datap, child); break; default: ret = -EIO; } return ret; }
gpl-2.0
Quarx2k/android_kernel_lge_msm8226
drivers/ide/tx4939ide.c
5593
17667
/* * TX4939 internal IDE driver * Based on RBTX49xx patch from CELF patch archive. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * (C) Copyright TOSHIBA CORPORATION 2005-2007 */ #include <linux/module.h> #include <linux/types.h> #include <linux/ide.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/scatterlist.h> #include <asm/ide.h> #define MODNAME "tx4939ide" /* ATA Shadow Registers (8-bit except for Data which is 16-bit) */ #define TX4939IDE_Data 0x000 #define TX4939IDE_Error_Feature 0x001 #define TX4939IDE_Sec 0x002 #define TX4939IDE_LBA0 0x003 #define TX4939IDE_LBA1 0x004 #define TX4939IDE_LBA2 0x005 #define TX4939IDE_DevHead 0x006 #define TX4939IDE_Stat_Cmd 0x007 #define TX4939IDE_AltStat_DevCtl 0x402 /* H/W DMA Registers */ #define TX4939IDE_DMA_Cmd 0x800 /* 8-bit */ #define TX4939IDE_DMA_Stat 0x802 /* 8-bit */ #define TX4939IDE_PRD_Ptr 0x804 /* 32-bit */ /* ATA100 CORE Registers (16-bit) */ #define TX4939IDE_Sys_Ctl 0xc00 #define TX4939IDE_Xfer_Cnt_1 0xc08 #define TX4939IDE_Xfer_Cnt_2 0xc0a #define TX4939IDE_Sec_Cnt 0xc10 #define TX4939IDE_Start_Lo_Addr 0xc18 #define TX4939IDE_Start_Up_Addr 0xc20 #define TX4939IDE_Add_Ctl 0xc28 #define TX4939IDE_Lo_Burst_Cnt 0xc30 #define TX4939IDE_Up_Burst_Cnt 0xc38 #define TX4939IDE_PIO_Addr 0xc88 #define TX4939IDE_H_Rst_Tim 0xc90 #define TX4939IDE_Int_Ctl 0xc98 #define TX4939IDE_Pkt_Cmd 0xcb8 #define TX4939IDE_Bxfer_Cnt_Hi 0xcc0 #define TX4939IDE_Bxfer_Cnt_Lo 0xcc8 #define TX4939IDE_Dev_TErr 0xcd0 #define TX4939IDE_Pkt_Xfer_Ctl 0xcd8 #define TX4939IDE_Start_TAddr 0xce0 /* bits for Int_Ctl */ #define TX4939IDE_INT_ADDRERR 0x80 #define TX4939IDE_INT_REACHMUL 0x40 #define TX4939IDE_INT_DEVTIMING 0x20 #define TX4939IDE_INT_UDMATERM 0x10 #define TX4939IDE_INT_TIMER 0x08 #define TX4939IDE_INT_BUSERR 0x04 #define TX4939IDE_INT_XFEREND 0x02 #define TX4939IDE_INT_HOST 0x01 #define TX4939IDE_IGNORE_INTS \ (TX4939IDE_INT_ADDRERR | TX4939IDE_INT_REACHMUL | \ TX4939IDE_INT_DEVTIMING | TX4939IDE_INT_UDMATERM | \ TX4939IDE_INT_TIMER | TX4939IDE_INT_XFEREND) #ifdef __BIG_ENDIAN #define tx4939ide_swizzlel(a) ((a) ^ 4) #define tx4939ide_swizzlew(a) ((a) ^ 6) #define tx4939ide_swizzleb(a) ((a) ^ 7) #else #define tx4939ide_swizzlel(a) (a) #define tx4939ide_swizzlew(a) (a) #define tx4939ide_swizzleb(a) (a) #endif static u16 tx4939ide_readw(void __iomem *base, u32 reg) { return __raw_readw(base + tx4939ide_swizzlew(reg)); } static u8 tx4939ide_readb(void __iomem *base, u32 reg) { return __raw_readb(base + tx4939ide_swizzleb(reg)); } static void tx4939ide_writel(u32 val, void __iomem *base, u32 reg) { __raw_writel(val, base + tx4939ide_swizzlel(reg)); } static void tx4939ide_writew(u16 val, void __iomem *base, u32 reg) { __raw_writew(val, base + tx4939ide_swizzlew(reg)); } static void tx4939ide_writeb(u8 val, void __iomem *base, u32 reg) { __raw_writeb(val, base + tx4939ide_swizzleb(reg)); } #define TX4939IDE_BASE(hwif) ((void __iomem *)(hwif)->extra_base) static void tx4939ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { int is_slave = drive->dn; u32 mask, val; const u8 pio = drive->pio_mode - XFER_PIO_0; u8 safe = pio; ide_drive_t *pair; pair = ide_get_pair_dev(drive); if (pair) safe = min_t(u8, safe, pair->pio_mode - XFER_PIO_0); /* * Update Command Transfer Mode for master/slave and Data * Transfer Mode for this drive. */ mask = is_slave ? 0x07f00000 : 0x000007f0; val = ((safe << 8) | (pio << 4)) << (is_slave ? 16 : 0); hwif->select_data = (hwif->select_data & ~mask) | val; /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */ } static void tx4939ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { u32 mask, val; const u8 mode = drive->dma_mode; /* Update Data Transfer Mode for this drive. */ if (mode >= XFER_UDMA_0) val = mode - XFER_UDMA_0 + 8; else val = mode - XFER_MW_DMA_0 + 5; if (drive->dn) { mask = 0x00f00000; val <<= 20; } else { mask = 0x000000f0; val <<= 4; } hwif->select_data = (hwif->select_data & ~mask) | val; /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */ } static u16 tx4939ide_check_error_ints(ide_hwif_t *hwif) { void __iomem *base = TX4939IDE_BASE(hwif); u16 ctl = tx4939ide_readw(base, TX4939IDE_Int_Ctl); if (ctl & TX4939IDE_INT_BUSERR) { /* reset FIFO */ u16 sysctl = tx4939ide_readw(base, TX4939IDE_Sys_Ctl); tx4939ide_writew(sysctl | 0x4000, base, TX4939IDE_Sys_Ctl); mmiowb(); /* wait 12GBUSCLK (typ. 60ns @ GBUS200MHz, max 270ns) */ ndelay(270); tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl); } if (ctl & (TX4939IDE_INT_ADDRERR | TX4939IDE_INT_DEVTIMING | TX4939IDE_INT_BUSERR)) pr_err("%s: Error interrupt %#x (%s%s%s )\n", hwif->name, ctl, ctl & TX4939IDE_INT_ADDRERR ? " Address-Error" : "", ctl & TX4939IDE_INT_DEVTIMING ? " DEV-Timing" : "", ctl & TX4939IDE_INT_BUSERR ? " Bus-Error" : ""); return ctl; } static void tx4939ide_clear_irq(ide_drive_t *drive) { ide_hwif_t *hwif; void __iomem *base; u16 ctl; /* * tx4939ide_dma_test_irq() and tx4939ide_dma_end() do all job * for DMA case. */ if (drive->waiting_for_dma) return; hwif = drive->hwif; base = TX4939IDE_BASE(hwif); ctl = tx4939ide_check_error_ints(hwif); tx4939ide_writew(ctl, base, TX4939IDE_Int_Ctl); } static u8 tx4939ide_cable_detect(ide_hwif_t *hwif) { void __iomem *base = TX4939IDE_BASE(hwif); return tx4939ide_readw(base, TX4939IDE_Sys_Ctl) & 0x2000 ? ATA_CBL_PATA40 : ATA_CBL_PATA80; } #ifdef __BIG_ENDIAN static void tx4939ide_dma_host_set(ide_drive_t *drive, int on) { ide_hwif_t *hwif = drive->hwif; u8 unit = drive->dn; void __iomem *base = TX4939IDE_BASE(hwif); u8 dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat); if (on) dma_stat |= (1 << (5 + unit)); else dma_stat &= ~(1 << (5 + unit)); tx4939ide_writeb(dma_stat, base, TX4939IDE_DMA_Stat); } #else #define tx4939ide_dma_host_set ide_dma_host_set #endif static u8 tx4939ide_clear_dma_status(void __iomem *base) { u8 dma_stat; /* read DMA status for INTR & ERROR flags */ dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat); /* clear INTR & ERROR flags */ tx4939ide_writeb(dma_stat | ATA_DMA_INTR | ATA_DMA_ERR, base, TX4939IDE_DMA_Stat); /* recover intmask cleared by writing to bit2 of DMA_Stat */ tx4939ide_writew(TX4939IDE_IGNORE_INTS << 8, base, TX4939IDE_Int_Ctl); return dma_stat; } #ifdef __BIG_ENDIAN /* custom ide_build_dmatable to handle swapped layout */ static int tx4939ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; u32 *table = (u32 *)hwif->dmatable_cpu; unsigned int count = 0; int i; struct scatterlist *sg; for_each_sg(hwif->sg_table, sg, cmd->sg_nents, i) { u32 cur_addr, cur_len, bcount; cur_addr = sg_dma_address(sg); cur_len = sg_dma_len(sg); /* * Fill in the DMA table, without crossing any 64kB boundaries. */ while (cur_len) { if (count++ >= PRD_ENTRIES) goto use_pio_instead; bcount = 0x10000 - (cur_addr & 0xffff); if (bcount > cur_len) bcount = cur_len; /* * This workaround for zero count seems required. * (standard ide_build_dmatable does it too) */ if (bcount == 0x10000) bcount = 0x8000; *table++ = bcount & 0xffff; *table++ = cur_addr; cur_addr += bcount; cur_len -= bcount; } } if (count) { *(table - 2) |= 0x80000000; return count; } use_pio_instead: printk(KERN_ERR "%s: %s\n", drive->name, count ? "DMA table too small" : "empty DMA table?"); return 0; /* revert to PIO for this request */ } #else #define tx4939ide_build_dmatable ide_build_dmatable #endif static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; void __iomem *base = TX4939IDE_BASE(hwif); u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR; /* fall back to PIO! */ if (tx4939ide_build_dmatable(drive, cmd) == 0) return 1; /* PRD table */ tx4939ide_writel(hwif->dmatable_dma, base, TX4939IDE_PRD_Ptr); /* specify r/w */ tx4939ide_writeb(rw, base, TX4939IDE_DMA_Cmd); /* clear INTR & ERROR flags */ tx4939ide_clear_dma_status(base); tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ? TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1); tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt); return 0; } static int tx4939ide_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat, dma_cmd; void __iomem *base = TX4939IDE_BASE(hwif); u16 ctl = tx4939ide_readw(base, TX4939IDE_Int_Ctl); /* get DMA command mode */ dma_cmd = tx4939ide_readb(base, TX4939IDE_DMA_Cmd); /* stop DMA */ tx4939ide_writeb(dma_cmd & ~ATA_DMA_START, base, TX4939IDE_DMA_Cmd); /* read and clear the INTR & ERROR bits */ dma_stat = tx4939ide_clear_dma_status(base); #define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR) /* verify good DMA status */ if ((dma_stat & CHECK_DMA_MASK) == 0 && (ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) == (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) /* INT_IDE lost... bug? */ return 0; return ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR) ? 0x10 | dma_stat : 0; } /* returns 1 if DMA IRQ issued, 0 otherwise */ static int tx4939ide_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; void __iomem *base = TX4939IDE_BASE(hwif); u16 ctl, ide_int; u8 dma_stat, stat; int found = 0; ctl = tx4939ide_check_error_ints(hwif); ide_int = ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST); switch (ide_int) { case TX4939IDE_INT_HOST: /* On error, XFEREND might not be asserted. */ stat = tx4939ide_readb(base, TX4939IDE_AltStat_DevCtl); if ((stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) == ATA_ERR) found = 1; else /* Wait for XFEREND (Mask HOST and unmask XFEREND) */ ctl &= ~TX4939IDE_INT_XFEREND << 8; ctl |= ide_int << 8; break; case TX4939IDE_INT_HOST | TX4939IDE_INT_XFEREND: dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat); if (!(dma_stat & ATA_DMA_INTR)) pr_warning("%s: weird interrupt status. " "DMA_Stat %#02x int_ctl %#04x\n", hwif->name, dma_stat, ctl); found = 1; break; } /* * Do not clear XFEREND, HOST now. They will be cleared by * clearing bit2 of DMA_Stat. */ ctl &= ~ide_int; tx4939ide_writew(ctl, base, TX4939IDE_Int_Ctl); return found; } #ifdef __BIG_ENDIAN static u8 tx4939ide_dma_sff_read_status(ide_hwif_t *hwif) { void __iomem *base = TX4939IDE_BASE(hwif); return tx4939ide_readb(base, TX4939IDE_DMA_Stat); } #else #define tx4939ide_dma_sff_read_status ide_dma_sff_read_status #endif static void tx4939ide_init_hwif(ide_hwif_t *hwif) { void __iomem *base = TX4939IDE_BASE(hwif); /* Soft Reset */ tx4939ide_writew(0x8000, base, TX4939IDE_Sys_Ctl); mmiowb(); /* at least 20 GBUSCLK (typ. 100ns @ GBUS200MHz, max 450ns) */ ndelay(450); tx4939ide_writew(0x0000, base, TX4939IDE_Sys_Ctl); /* mask some interrupts and clear all interrupts */ tx4939ide_writew((TX4939IDE_IGNORE_INTS << 8) | 0xff, base, TX4939IDE_Int_Ctl); tx4939ide_writew(0x0008, base, TX4939IDE_Lo_Burst_Cnt); tx4939ide_writew(0, base, TX4939IDE_Up_Burst_Cnt); } static int tx4939ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d) { hwif->dma_base = hwif->extra_base + tx4939ide_swizzleb(TX4939IDE_DMA_Cmd); /* * Note that we cannot use ATA_DMA_TABLE_OFS, ATA_DMA_STATUS * for big endian. */ return ide_allocate_dma_engine(hwif); } static void tx4939ide_tf_load_fixup(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; void __iomem *base = TX4939IDE_BASE(hwif); u16 sysctl = hwif->select_data >> (drive->dn ? 16 : 0); /* * Fix ATA100 CORE System Control Register. (The write to the * Device/Head register may write wrong data to the System * Control Register) * While Sys_Ctl is written here, dev_select() is not needed. */ tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl); } static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) { ide_tf_load(drive, tf, valid); if (valid & IDE_VALID_DEVICE) tx4939ide_tf_load_fixup(drive); } #ifdef __BIG_ENDIAN /* custom iops (independent from SWAP_IO_SPACE) */ static void tx4939ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { unsigned long port = drive->hwif->io_ports.data_addr; unsigned short *ptr = buf; unsigned int count = (len + 1) / 2; while (count--) *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port)); __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2)); } static void tx4939ide_output_data_swap(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { unsigned long port = drive->hwif->io_ports.data_addr; unsigned short *ptr = buf; unsigned int count = (len + 1) / 2; while (count--) { __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port); ptr++; } __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2)); } static const struct ide_tp_ops tx4939ide_tp_ops = { .exec_command = ide_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = ide_write_devctl, .dev_select = ide_dev_select, .tf_load = tx4939ide_tf_load, .tf_read = ide_tf_read, .input_data = tx4939ide_input_data_swap, .output_data = tx4939ide_output_data_swap, }; #else /* __LITTLE_ENDIAN */ static const struct ide_tp_ops tx4939ide_tp_ops = { .exec_command = ide_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = ide_write_devctl, .dev_select = ide_dev_select, .tf_load = tx4939ide_tf_load, .tf_read = ide_tf_read, .input_data = ide_input_data, .output_data = ide_output_data, }; #endif /* __LITTLE_ENDIAN */ static const struct ide_port_ops tx4939ide_port_ops = { .set_pio_mode = tx4939ide_set_pio_mode, .set_dma_mode = tx4939ide_set_dma_mode, .clear_irq = tx4939ide_clear_irq, .cable_detect = tx4939ide_cable_detect, }; static const struct ide_dma_ops tx4939ide_dma_ops = { .dma_host_set = tx4939ide_dma_host_set, .dma_setup = tx4939ide_dma_setup, .dma_start = ide_dma_start, .dma_end = tx4939ide_dma_end, .dma_test_irq = tx4939ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = tx4939ide_dma_sff_read_status, }; static const struct ide_port_info tx4939ide_port_info __initdata = { .init_hwif = tx4939ide_init_hwif, .init_dma = tx4939ide_init_dma, .port_ops = &tx4939ide_port_ops, .dma_ops = &tx4939ide_dma_ops, .tp_ops = &tx4939ide_tp_ops, .host_flags = IDE_HFLAG_MMIO, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .chipset = ide_generic, }; static int __init tx4939ide_probe(struct platform_device *pdev) { struct ide_hw hw, *hws[] = { &hw }; struct ide_host *host; struct resource *res; int irq, ret; unsigned long mapbase; irq = platform_get_irq(pdev, 0); if (irq < 0) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), "tx4938ide")) return -EBUSY; mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!mapbase) return -EBUSY; memset(&hw, 0, sizeof(hw)); hw.io_ports.data_addr = mapbase + tx4939ide_swizzlew(TX4939IDE_Data); hw.io_ports.error_addr = mapbase + tx4939ide_swizzleb(TX4939IDE_Error_Feature); hw.io_ports.nsect_addr = mapbase + tx4939ide_swizzleb(TX4939IDE_Sec); hw.io_ports.lbal_addr = mapbase + tx4939ide_swizzleb(TX4939IDE_LBA0); hw.io_ports.lbam_addr = mapbase + tx4939ide_swizzleb(TX4939IDE_LBA1); hw.io_ports.lbah_addr = mapbase + tx4939ide_swizzleb(TX4939IDE_LBA2); hw.io_ports.device_addr = mapbase + tx4939ide_swizzleb(TX4939IDE_DevHead); hw.io_ports.command_addr = mapbase + tx4939ide_swizzleb(TX4939IDE_Stat_Cmd); hw.io_ports.ctl_addr = mapbase + tx4939ide_swizzleb(TX4939IDE_AltStat_DevCtl); hw.irq = irq; hw.dev = &pdev->dev; pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq); host = ide_host_alloc(&tx4939ide_port_info, hws, 1); if (!host) return -ENOMEM; /* use extra_base for base address of the all registers */ host->ports[0]->extra_base = mapbase; ret = ide_host_register(host, &tx4939ide_port_info, hws); if (ret) { ide_host_free(host); return ret; } platform_set_drvdata(pdev, host); return 0; } static int __exit tx4939ide_remove(struct platform_device *pdev) { struct ide_host *host = platform_get_drvdata(pdev); ide_host_remove(host); return 0; } #ifdef CONFIG_PM static int tx4939ide_resume(struct platform_device *dev) { struct ide_host *host = platform_get_drvdata(dev); ide_hwif_t *hwif = host->ports[0]; tx4939ide_init_hwif(hwif); return 0; } #else #define tx4939ide_resume NULL #endif static struct platform_driver tx4939ide_driver = { .driver = { .name = MODNAME, .owner = THIS_MODULE, }, .remove = __exit_p(tx4939ide_remove), .resume = tx4939ide_resume, }; static int __init tx4939ide_init(void) { return platform_driver_probe(&tx4939ide_driver, tx4939ide_probe); } static void __exit tx4939ide_exit(void) { platform_driver_unregister(&tx4939ide_driver); } module_init(tx4939ide_init); module_exit(tx4939ide_exit); MODULE_DESCRIPTION("TX4939 internal IDE driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:tx4939ide");
gpl-2.0
ztemt/NX512J_kernel
crypto/async_tx/async_raid6_recov.c
7641
14730
/* * Asynchronous RAID-6 recovery calculations ASYNC_TX API. * Copyright(c) 2009 Intel Corporation * * based on raid6recov.c: * Copyright 2002 H. Peter Anvin * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/raid/pq.h> #include <linux/async_tx.h> static struct dma_async_tx_descriptor * async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, size_t len, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &dest, 1, srcs, 2, len); struct dma_device *dma = chan ? chan->device : NULL; const u8 *amul, *bmul; u8 ax, bx; u8 *a, *b, *c; if (dma) { dma_addr_t dma_dest[2]; dma_addr_t dma_src[2]; struct device *dev = dma->dev; struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, len, dma_flags); if (tx) { async_tx_submit(chan, tx, submit); return tx; } /* could not get a descriptor, unmap and fall through to * the synchronous path */ dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE); } /* run the operation synchronously */ async_tx_quiesce(&submit->depend_tx); amul = raid6_gfmul[coef[0]]; bmul = raid6_gfmul[coef[1]]; a = page_address(srcs[0]); b = page_address(srcs[1]); c = page_address(dest); while (len--) { ax = amul[*a++]; bx = bmul[*b++]; *c++ = ax ^ bx; } return NULL; } static struct dma_async_tx_descriptor * async_mult(struct page *dest, struct page *src, u8 coef, size_t len, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &dest, 1, &src, 1, len); struct dma_device *dma = chan ? chan->device : NULL; const u8 *qmul; /* Q multiplier table */ u8 *d, *s; if (dma) { dma_addr_t dma_dest[2]; dma_addr_t dma_src[1]; struct device *dev = dma->dev; struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, len, dma_flags); if (tx) { async_tx_submit(chan, tx, submit); return tx; } /* could not get a descriptor, unmap and fall through to * the synchronous path */ dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); } /* no channel available, or failed to allocate a descriptor, so * perform the operation synchronously */ async_tx_quiesce(&submit->depend_tx); qmul = raid6_gfmul[coef]; d = page_address(dest); s = page_address(src); while (len--) *d++ = qmul[*s++]; return NULL; } static struct dma_async_tx_descriptor * __2data_recov_4(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *a, *b; struct page *srcs[2]; unsigned char coef[2]; enum async_tx_flags flags = submit->flags; dma_async_tx_callback cb_fn = submit->cb_fn; void *cb_param = submit->cb_param; void *scribble = submit->scribble; p = blocks[disks-2]; q = blocks[disks-1]; a = blocks[faila]; b = blocks[failb]; /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */ /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ srcs[0] = p; srcs[1] = q; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_sum_product(b, srcs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ srcs[0] = p; srcs[1] = b; init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, cb_param, scribble); tx = async_xor(a, srcs, 0, 2, bytes, submit); return tx; } static struct dma_async_tx_descriptor * __2data_recov_5(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *g, *dp, *dq; struct page *srcs[2]; unsigned char coef[2]; enum async_tx_flags flags = submit->flags; dma_async_tx_callback cb_fn = submit->cb_fn; void *cb_param = submit->cb_param; void *scribble = submit->scribble; int good_srcs, good, i; good_srcs = 0; good = -1; for (i = 0; i < disks-2; i++) { if (blocks[i] == NULL) continue; if (i == faila || i == failb) continue; good = i; good_srcs++; } BUG_ON(good_srcs > 1); p = blocks[disks-2]; q = blocks[disks-1]; g = blocks[good]; /* Compute syndrome with zero for the missing data pages * Use the dead data pages as temporary storage for delta p and * delta q */ dp = blocks[faila]; dq = blocks[failb]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_memcpy(dp, g, 0, 0, bytes, submit); init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); /* compute P + Pxy */ srcs[0] = dp; srcs[1] = p; init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, scribble); tx = async_xor(dp, srcs, 0, 2, bytes, submit); /* compute Q + Qxy */ srcs[0] = dq; srcs[1] = q; init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, scribble); tx = async_xor(dq, srcs, 0, 2, bytes, submit); /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ srcs[0] = dp; srcs[1] = dq; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_sum_product(dq, srcs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ srcs[0] = dp; srcs[1] = dq; init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, cb_param, scribble); tx = async_xor(dp, srcs, 0, 2, bytes, submit); return tx; } static struct dma_async_tx_descriptor * __2data_recov_n(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *dp, *dq; struct page *srcs[2]; unsigned char coef[2]; enum async_tx_flags flags = submit->flags; dma_async_tx_callback cb_fn = submit->cb_fn; void *cb_param = submit->cb_param; void *scribble = submit->scribble; p = blocks[disks-2]; q = blocks[disks-1]; /* Compute syndrome with zero for the missing data pages * Use the dead data pages as temporary storage for * delta p and delta q */ dp = blocks[faila]; blocks[faila] = NULL; blocks[disks-2] = dp; dq = blocks[failb]; blocks[failb] = NULL; blocks[disks-1] = dq; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); /* Restore pointer table */ blocks[faila] = dp; blocks[failb] = dq; blocks[disks-2] = p; blocks[disks-1] = q; /* compute P + Pxy */ srcs[0] = dp; srcs[1] = p; init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, scribble); tx = async_xor(dp, srcs, 0, 2, bytes, submit); /* compute Q + Qxy */ srcs[0] = dq; srcs[1] = q; init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, scribble); tx = async_xor(dq, srcs, 0, 2, bytes, submit); /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ srcs[0] = dp; srcs[1] = dq; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_sum_product(dq, srcs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ srcs[0] = dp; srcs[1] = dq; init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, cb_param, scribble); tx = async_xor(dp, srcs, 0, 2, bytes, submit); return tx; } /** * async_raid6_2data_recov - asynchronously calculate two missing data blocks * @disks: number of disks in the RAID-6 array * @bytes: block size * @faila: first failed drive index * @failb: second failed drive index * @blocks: array of source pointers where the last two entries are p and q * @submit: submission/completion modifiers */ struct dma_async_tx_descriptor * async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) { void *scribble = submit->scribble; int non_zero_srcs, i; BUG_ON(faila == failb); if (failb < faila) swap(faila, failb); pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); /* if a dma resource is not available or a scribble buffer is not * available punt to the synchronous path. In the 'dma not * available' case be sure to use the scribble buffer to * preserve the content of 'blocks' as the caller intended. */ if (!async_dma_find_channel(DMA_PQ) || !scribble) { void **ptrs = scribble ? scribble : (void **) blocks; async_tx_quiesce(&submit->depend_tx); for (i = 0; i < disks; i++) if (blocks[i] == NULL) ptrs[i] = (void *) raid6_empty_zero_page; else ptrs[i] = page_address(blocks[i]); raid6_2data_recov(disks, bytes, faila, failb, ptrs); async_tx_sync_epilog(submit); return NULL; } non_zero_srcs = 0; for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) if (blocks[i]) non_zero_srcs++; switch (non_zero_srcs) { case 0: case 1: /* There must be at least 2 sources - the failed devices. */ BUG(); case 2: /* dma devices do not uniformly understand a zero source pq * operation (in contrast to the synchronous case), so * explicitly handle the special case of a 4 disk array with * both data disks missing. */ return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); case 3: /* dma devices do not uniformly understand a single * source pq operation (in contrast to the synchronous * case), so explicitly handle the special case of a 5 disk * array with 2 of 3 data disks missing. */ return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); default: return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); } } EXPORT_SYMBOL_GPL(async_raid6_2data_recov); /** * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block * @disks: number of disks in the RAID-6 array * @bytes: block size * @faila: failed drive index * @blocks: array of source pointers where the last two entries are p and q * @submit: submission/completion modifiers */ struct dma_async_tx_descriptor * async_raid6_datap_recov(int disks, size_t bytes, int faila, struct page **blocks, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *dq; u8 coef; enum async_tx_flags flags = submit->flags; dma_async_tx_callback cb_fn = submit->cb_fn; void *cb_param = submit->cb_param; void *scribble = submit->scribble; int good_srcs, good, i; struct page *srcs[2]; pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); /* if a dma resource is not available or a scribble buffer is not * available punt to the synchronous path. In the 'dma not * available' case be sure to use the scribble buffer to * preserve the content of 'blocks' as the caller intended. */ if (!async_dma_find_channel(DMA_PQ) || !scribble) { void **ptrs = scribble ? scribble : (void **) blocks; async_tx_quiesce(&submit->depend_tx); for (i = 0; i < disks; i++) if (blocks[i] == NULL) ptrs[i] = (void*)raid6_empty_zero_page; else ptrs[i] = page_address(blocks[i]); raid6_datap_recov(disks, bytes, faila, ptrs); async_tx_sync_epilog(submit); return NULL; } good_srcs = 0; good = -1; for (i = 0; i < disks-2; i++) { if (i == faila) continue; if (blocks[i]) { good = i; good_srcs++; if (good_srcs > 1) break; } } BUG_ON(good_srcs == 0); p = blocks[disks-2]; q = blocks[disks-1]; /* Compute syndrome with zero for the missing data page * Use the dead data page as temporary storage for delta q */ dq = blocks[faila]; blocks[faila] = NULL; blocks[disks-1] = dq; /* in the 4-disk case we only need to perform a single source * multiplication with the one good data block. */ if (good_srcs == 1) { struct page *g = blocks[good]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_memcpy(p, g, 0, 0, bytes, submit); init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); } else { init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); } /* Restore pointer table */ blocks[faila] = dq; blocks[disks-1] = q; /* calculate g^{-faila} */ coef = raid6_gfinv[raid6_gfexp[faila]]; srcs[0] = dq; srcs[1] = q; init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, scribble); tx = async_xor(dq, srcs, 0, 2, bytes, submit); init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_mult(dq, dq, coef, bytes, submit); srcs[0] = p; srcs[1] = dq; init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, cb_param, scribble); tx = async_xor(p, srcs, 0, 2, bytes, submit); return tx; } EXPORT_SYMBOL_GPL(async_raid6_datap_recov); MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>"); MODULE_DESCRIPTION("asynchronous RAID-6 recovery api"); MODULE_LICENSE("GPL");
gpl-2.0
flar2/bulletproof-m7-5.0
arch/avr32/mach-at32ap/pm.c
9945
5664
/* * AVR32 AP Power Management * * Copyright (C) 2008 Atmel Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/suspend.h> #include <linux/vmalloc.h> #include <asm/cacheflush.h> #include <asm/sysreg.h> #include <mach/chip.h> #include <mach/pm.h> #include <mach/sram.h> #include "sdramc.h" #define SRAM_PAGE_FLAGS (SYSREG_BIT(TLBELO_D) | SYSREG_BF(SZ, 1) \ | SYSREG_BF(AP, 3) | SYSREG_BIT(G)) static unsigned long pm_sram_start; static size_t pm_sram_size; static struct vm_struct *pm_sram_area; static void (*avr32_pm_enter_standby)(unsigned long sdramc_base); static void (*avr32_pm_enter_str)(unsigned long sdramc_base); /* * Must be called with interrupts disabled. Exceptions will be masked * on return (i.e. all exceptions will be "unrecoverable".) */ static void *avr32_pm_map_sram(void) { unsigned long vaddr; unsigned long page_addr; u32 tlbehi; u32 mmucr; vaddr = (unsigned long)pm_sram_area->addr; page_addr = pm_sram_start & PAGE_MASK; /* * Mask exceptions and grab the first TLB entry. We won't be * needing it while sleeping. */ asm volatile("ssrf %0" : : "i"(SYSREG_EM_OFFSET) : "memory"); mmucr = sysreg_read(MMUCR); tlbehi = sysreg_read(TLBEHI); sysreg_write(MMUCR, SYSREG_BFINS(DRP, 0, mmucr)); tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi)); tlbehi |= vaddr & PAGE_MASK; tlbehi |= SYSREG_BIT(TLBEHI_V); sysreg_write(TLBELO, page_addr | SRAM_PAGE_FLAGS); sysreg_write(TLBEHI, tlbehi); __builtin_tlbw(); return (void *)(vaddr + pm_sram_start - page_addr); } /* * Must be called with interrupts disabled. Exceptions will be * unmasked on return. */ static void avr32_pm_unmap_sram(void) { u32 mmucr; u32 tlbehi; u32 tlbarlo; /* Going to update TLB entry at index 0 */ mmucr = sysreg_read(MMUCR); tlbehi = sysreg_read(TLBEHI); sysreg_write(MMUCR, SYSREG_BFINS(DRP, 0, mmucr)); /* Clear the "valid" bit */ tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi)); sysreg_write(TLBEHI, tlbehi); /* Mark it as "not accessed" */ tlbarlo = sysreg_read(TLBARLO); sysreg_write(TLBARLO, tlbarlo | 0x80000000U); /* Update the TLB */ __builtin_tlbw(); /* Unmask exceptions */ asm volatile("csrf %0" : : "i"(SYSREG_EM_OFFSET) : "memory"); } static int avr32_pm_valid_state(suspend_state_t state) { switch (state) { case PM_SUSPEND_ON: case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: return 1; default: return 0; } } static int avr32_pm_enter(suspend_state_t state) { u32 lpr_saved; u32 evba_saved; void *sram; switch (state) { case PM_SUSPEND_STANDBY: sram = avr32_pm_map_sram(); /* Switch to in-sram exception handlers */ evba_saved = sysreg_read(EVBA); sysreg_write(EVBA, (unsigned long)sram); /* * Save the LPR register so that we can re-enable * SDRAM Low Power mode on resume. */ lpr_saved = sdramc_readl(LPR); pr_debug("%s: Entering standby...\n", __func__); avr32_pm_enter_standby(SDRAMC_BASE); sdramc_writel(LPR, lpr_saved); /* Switch back to regular exception handlers */ sysreg_write(EVBA, evba_saved); avr32_pm_unmap_sram(); break; case PM_SUSPEND_MEM: sram = avr32_pm_map_sram(); /* Switch to in-sram exception handlers */ evba_saved = sysreg_read(EVBA); sysreg_write(EVBA, (unsigned long)sram); /* * Save the LPR register so that we can re-enable * SDRAM Low Power mode on resume. */ lpr_saved = sdramc_readl(LPR); pr_debug("%s: Entering suspend-to-ram...\n", __func__); avr32_pm_enter_str(SDRAMC_BASE); sdramc_writel(LPR, lpr_saved); /* Switch back to regular exception handlers */ sysreg_write(EVBA, evba_saved); avr32_pm_unmap_sram(); break; case PM_SUSPEND_ON: pr_debug("%s: Entering idle...\n", __func__); cpu_enter_idle(); break; default: pr_debug("%s: Invalid suspend state %d\n", __func__, state); goto out; } pr_debug("%s: wakeup\n", __func__); out: return 0; } static const struct platform_suspend_ops avr32_pm_ops = { .valid = avr32_pm_valid_state, .enter = avr32_pm_enter, }; static unsigned long avr32_pm_offset(void *symbol) { extern u8 pm_exception[]; return (unsigned long)symbol - (unsigned long)pm_exception; } static int __init avr32_pm_init(void) { extern u8 pm_exception[]; extern u8 pm_irq0[]; extern u8 pm_standby[]; extern u8 pm_suspend_to_ram[]; extern u8 pm_sram_end[]; void *dst; /* * To keep things simple, we depend on not needing more than a * single page. */ pm_sram_size = avr32_pm_offset(pm_sram_end); if (pm_sram_size > PAGE_SIZE) goto err; pm_sram_start = sram_alloc(pm_sram_size); if (!pm_sram_start) goto err_alloc_sram; /* Grab a virtual area we can use later on. */ pm_sram_area = get_vm_area(pm_sram_size, VM_IOREMAP); if (!pm_sram_area) goto err_vm_area; pm_sram_area->phys_addr = pm_sram_start; local_irq_disable(); dst = avr32_pm_map_sram(); memcpy(dst, pm_exception, pm_sram_size); flush_dcache_region(dst, pm_sram_size); invalidate_icache_region(dst, pm_sram_size); avr32_pm_unmap_sram(); local_irq_enable(); avr32_pm_enter_standby = dst + avr32_pm_offset(pm_standby); avr32_pm_enter_str = dst + avr32_pm_offset(pm_suspend_to_ram); intc_set_suspend_handler(avr32_pm_offset(pm_irq0)); suspend_set_ops(&avr32_pm_ops); printk("AVR32 AP Power Management enabled\n"); return 0; err_vm_area: sram_free(pm_sram_start, pm_sram_size); err_alloc_sram: err: pr_err("AVR32 Power Management initialization failed\n"); return -ENOMEM; } arch_initcall(avr32_pm_init);
gpl-2.0
JoseDuque/linux.4.1.7
fs/ext4/readpage.c
218
8381
/* * linux/fs/ext4/readpage.c * * Copyright (C) 2002, Linus Torvalds. * Copyright (C) 2015, Google, Inc. * * This was originally taken from fs/mpage.c * * The intent is the ext4_mpage_readpages() function here is intended * to replace mpage_readpages() in the general case, not just for * encrypted files. It has some limitations (see below), where it * will fall back to read_block_full_page(), but these limitations * should only be hit when page_size != block_size. * * This will allow us to attach a callback function to support ext4 * encryption. * * If anything unusual happens, such as: * * - encountering a page which has buffers * - encountering a page which has a non-hole after a hole * - encountering a page with non-contiguous blocks * * then this code just gives up and calls the buffer_head-based read function. * It does handle a page which has holes at the end - that is a common case: * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. * */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/kdev_t.h> #include <linux/gfp.h> #include <linux/bio.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/blkdev.h> #include <linux/highmem.h> #include <linux/prefetch.h> #include <linux/mpage.h> #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/pagevec.h> #include <linux/cleancache.h> #include "ext4.h" /* * Call ext4_decrypt on every single page, reusing the encryption * context. */ static void completion_pages(struct work_struct *work) { #ifdef CONFIG_EXT4_FS_ENCRYPTION struct ext4_crypto_ctx *ctx = container_of(work, struct ext4_crypto_ctx, work); struct bio *bio = ctx->bio; struct bio_vec *bv; int i; bio_for_each_segment_all(bv, bio, i) { struct page *page = bv->bv_page; int ret = ext4_decrypt(ctx, page); if (ret) { WARN_ON_ONCE(1); SetPageError(page); } else SetPageUptodate(page); unlock_page(page); } ext4_release_crypto_ctx(ctx); bio_put(bio); #else BUG(); #endif } static inline bool ext4_bio_encrypted(struct bio *bio) { #ifdef CONFIG_EXT4_FS_ENCRYPTION return unlikely(bio->bi_private != NULL); #else return false; #endif } /* * I/O completion handler for multipage BIOs. * * The mpage code never puts partial pages into a BIO (except for end-of-file). * If a page does not map to a contiguous run of blocks then it simply falls * back to block_read_full_page(). * * Why is this? If a page's completion depends on a number of different BIOs * which can complete in any order (or at the same time) then determining the * status of that page is hard. See end_buffer_async_read() for the details. * There is no point in duplicating all that complexity. */ static void mpage_end_io(struct bio *bio, int err) { struct bio_vec *bv; int i; if (ext4_bio_encrypted(bio)) { struct ext4_crypto_ctx *ctx = bio->bi_private; if (err) { ext4_release_crypto_ctx(ctx); } else { INIT_WORK(&ctx->work, completion_pages); ctx->bio = bio; queue_work(ext4_read_workqueue, &ctx->work); return; } } bio_for_each_segment_all(bv, bio, i) { struct page *page = bv->bv_page; if (!err) { SetPageUptodate(page); } else { ClearPageUptodate(page); SetPageError(page); } unlock_page(page); } bio_put(bio); } int ext4_mpage_readpages(struct address_space *mapping, struct list_head *pages, struct page *page, unsigned nr_pages) { struct bio *bio = NULL; unsigned page_idx; sector_t last_block_in_bio = 0; struct inode *inode = mapping->host; const unsigned blkbits = inode->i_blkbits; const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; sector_t blocks[MAX_BUF_PER_PAGE]; unsigned page_block; struct block_device *bdev = inode->i_sb->s_bdev; int length; unsigned relative_block = 0; struct ext4_map_blocks map; map.m_pblk = 0; map.m_lblk = 0; map.m_len = 0; map.m_flags = 0; for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { int fully_mapped = 1; unsigned first_hole = blocks_per_page; prefetchw(&page->flags); if (pages) { page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, GFP_KERNEL)) goto next_page; } if (page_has_buffers(page)) goto confused; block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) last_block = last_block_in_file; page_block = 0; /* * Map blocks using the previous result first. */ if ((map.m_flags & EXT4_MAP_MAPPED) && block_in_file > map.m_lblk && block_in_file < (map.m_lblk + map.m_len)) { unsigned map_offset = block_in_file - map.m_lblk; unsigned last = map.m_len - map_offset; for (relative_block = 0; ; relative_block++) { if (relative_block == last) { /* needed? */ map.m_flags &= ~EXT4_MAP_MAPPED; break; } if (page_block == blocks_per_page) break; blocks[page_block] = map.m_pblk + map_offset + relative_block; page_block++; block_in_file++; } } /* * Then do more ext4_map_blocks() calls until we are * done with this page. */ while (page_block < blocks_per_page) { if (block_in_file < last_block) { map.m_lblk = block_in_file; map.m_len = last_block - block_in_file; if (ext4_map_blocks(NULL, inode, &map, 0) < 0) { set_error_page: SetPageError(page); zero_user_segment(page, 0, PAGE_CACHE_SIZE); unlock_page(page); goto next_page; } } if ((map.m_flags & EXT4_MAP_MAPPED) == 0) { fully_mapped = 0; if (first_hole == blocks_per_page) first_hole = page_block; page_block++; block_in_file++; continue; } if (first_hole != blocks_per_page) goto confused; /* hole -> non-hole */ /* Contiguous blocks? */ if (page_block && blocks[page_block-1] != map.m_pblk-1) goto confused; for (relative_block = 0; ; relative_block++) { if (relative_block == map.m_len) { /* needed? */ map.m_flags &= ~EXT4_MAP_MAPPED; break; } else if (page_block == blocks_per_page) break; blocks[page_block] = map.m_pblk+relative_block; page_block++; block_in_file++; } } if (first_hole != blocks_per_page) { zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); if (first_hole == 0) { SetPageUptodate(page); unlock_page(page); goto next_page; } } else if (fully_mapped) { SetPageMappedToDisk(page); } if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && cleancache_get_page(page) == 0) { SetPageUptodate(page); goto confused; } /* * This page will go to BIO. Do we need to send this * BIO off first? */ if (bio && (last_block_in_bio != blocks[0] - 1)) { submit_and_realloc: submit_bio(READ, bio); bio = NULL; } if (bio == NULL) { struct ext4_crypto_ctx *ctx = NULL; if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { ctx = ext4_get_crypto_ctx(inode); if (IS_ERR(ctx)) goto set_error_page; } bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, bio_get_nr_vecs(bdev))); if (!bio) { if (ctx) ext4_release_crypto_ctx(ctx); goto set_error_page; } bio->bi_bdev = bdev; bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_end_io = mpage_end_io; bio->bi_private = ctx; } length = first_hole << blkbits; if (bio_add_page(bio, page, length, 0) < length) goto submit_and_realloc; if (((map.m_flags & EXT4_MAP_BOUNDARY) && (relative_block == map.m_len)) || (first_hole != blocks_per_page)) { submit_bio(READ, bio); bio = NULL; } else last_block_in_bio = blocks[blocks_per_page - 1]; goto next_page; confused: if (bio) { submit_bio(READ, bio); bio = NULL; } if (!PageUptodate(page)) block_read_full_page(page, ext4_get_block); else unlock_page(page); next_page: if (pages) page_cache_release(page); } BUG_ON(pages && !list_empty(pages)); if (bio) submit_bio(READ, bio); return 0; }
gpl-2.0
sbu-fsl/fuse-kernel-instrumentation
arch/mips/cavium-octeon/crypto/octeon-md5.c
218
5127
/* * Cryptographic API. * * MD5 Message Digest Algorithm (RFC1321). * * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>. * * Based on crypto/md5.c, which is: * * Derived from cryptoapi implementation, originally based on the * public domain implementation written by Colin Plumb in 1993. * * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <crypto/md5.h> #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/string.h> #include <asm/byteorder.h> #include <linux/cryptohash.h> #include <asm/octeon/octeon.h> #include <crypto/internal/hash.h> #include "octeon-crypto.h" /* * We pass everything as 64-bit. OCTEON can handle misaligned data. */ static void octeon_md5_store_hash(struct md5_state *ctx) { u64 *hash = (u64 *)ctx->hash; write_octeon_64bit_hash_dword(hash[0], 0); write_octeon_64bit_hash_dword(hash[1], 1); } static void octeon_md5_read_hash(struct md5_state *ctx) { u64 *hash = (u64 *)ctx->hash; hash[0] = read_octeon_64bit_hash_dword(0); hash[1] = read_octeon_64bit_hash_dword(1); } static void octeon_md5_transform(const void *_block) { const u64 *block = _block; write_octeon_64bit_block_dword(block[0], 0); write_octeon_64bit_block_dword(block[1], 1); write_octeon_64bit_block_dword(block[2], 2); write_octeon_64bit_block_dword(block[3], 3); write_octeon_64bit_block_dword(block[4], 4); write_octeon_64bit_block_dword(block[5], 5); write_octeon_64bit_block_dword(block[6], 6); octeon_md5_start(block[7]); } static int octeon_md5_init(struct shash_desc *desc) { struct md5_state *mctx = shash_desc_ctx(desc); mctx->hash[0] = cpu_to_le32(0x67452301); mctx->hash[1] = cpu_to_le32(0xefcdab89); mctx->hash[2] = cpu_to_le32(0x98badcfe); mctx->hash[3] = cpu_to_le32(0x10325476); mctx->byte_count = 0; return 0; } static int octeon_md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_state *mctx = shash_desc_ctx(desc); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); struct octeon_cop2_state state; unsigned long flags; mctx->byte_count += len; if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); return 0; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, avail); flags = octeon_crypto_enable(&state); octeon_md5_store_hash(mctx); octeon_md5_transform(mctx->block); data += avail; len -= avail; while (len >= sizeof(mctx->block)) { octeon_md5_transform(data); data += sizeof(mctx->block); len -= sizeof(mctx->block); } octeon_md5_read_hash(mctx); octeon_crypto_disable(&state, flags); memcpy(mctx->block, data, len); return 0; } static int octeon_md5_final(struct shash_desc *desc, u8 *out) { struct md5_state *mctx = shash_desc_ctx(desc); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); struct octeon_cop2_state state; unsigned long flags; *p++ = 0x80; flags = octeon_crypto_enable(&state); octeon_md5_store_hash(mctx); if (padding < 0) { memset(p, 0x00, padding + sizeof(u64)); octeon_md5_transform(mctx->block); p = (char *)mctx->block; padding = 56; } memset(p, 0, padding); mctx->block[14] = cpu_to_le32(mctx->byte_count << 3); mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29); octeon_md5_transform(mctx->block); octeon_md5_read_hash(mctx); octeon_crypto_disable(&state, flags); memcpy(out, mctx->hash, sizeof(mctx->hash)); memset(mctx, 0, sizeof(*mctx)); return 0; } static int octeon_md5_export(struct shash_desc *desc, void *out) { struct md5_state *ctx = shash_desc_ctx(desc); memcpy(out, ctx, sizeof(*ctx)); return 0; } static int octeon_md5_import(struct shash_desc *desc, const void *in) { struct md5_state *ctx = shash_desc_ctx(desc); memcpy(ctx, in, sizeof(*ctx)); return 0; } static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = octeon_md5_init, .update = octeon_md5_update, .final = octeon_md5_final, .export = octeon_md5_export, .import = octeon_md5_import, .descsize = sizeof(struct md5_state), .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name= "octeon-md5", .cra_priority = OCTEON_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init md5_mod_init(void) { if (!octeon_has_crypto()) return -ENOTSUPP; return crypto_register_shash(&alg); } static void __exit md5_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(md5_mod_init); module_exit(md5_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Message Digest Algorithm (OCTEON)"); MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
gpl-2.0
atilag/flatfish-kernel
drivers/sbus/char/uctrl.c
218
10993
/* uctrl.c: TS102 Microcontroller interface on Tadpole Sparcbook 3 * * Copyright 1999 Derrick J Brashear (shadow@dementia.org) * Copyright 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/system.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/pgtable.h> #define UCTRL_MINOR 174 #define DEBUG 1 #ifdef DEBUG #define dprintk(x) printk x #else #define dprintk(x) #endif struct uctrl_regs { u32 uctrl_intr; u32 uctrl_data; u32 uctrl_stat; u32 uctrl_xxx[5]; }; struct ts102_regs { u32 card_a_intr; u32 card_a_stat; u32 card_a_ctrl; u32 card_a_xxx; u32 card_b_intr; u32 card_b_stat; u32 card_b_ctrl; u32 card_b_xxx; u32 uctrl_intr; u32 uctrl_data; u32 uctrl_stat; u32 uctrl_xxx; u32 ts102_xxx[4]; }; /* Bits for uctrl_intr register */ #define UCTRL_INTR_TXE_REQ 0x01 /* transmit FIFO empty int req */ #define UCTRL_INTR_TXNF_REQ 0x02 /* transmit FIFO not full int req */ #define UCTRL_INTR_RXNE_REQ 0x04 /* receive FIFO not empty int req */ #define UCTRL_INTR_RXO_REQ 0x08 /* receive FIFO overflow int req */ #define UCTRL_INTR_TXE_MSK 0x10 /* transmit FIFO empty mask */ #define UCTRL_INTR_TXNF_MSK 0x20 /* transmit FIFO not full mask */ #define UCTRL_INTR_RXNE_MSK 0x40 /* receive FIFO not empty mask */ #define UCTRL_INTR_RXO_MSK 0x80 /* receive FIFO overflow mask */ /* Bits for uctrl_stat register */ #define UCTRL_STAT_TXE_STA 0x01 /* transmit FIFO empty status */ #define UCTRL_STAT_TXNF_STA 0x02 /* transmit FIFO not full status */ #define UCTRL_STAT_RXNE_STA 0x04 /* receive FIFO not empty status */ #define UCTRL_STAT_RXO_STA 0x08 /* receive FIFO overflow status */ static DEFINE_MUTEX(uctrl_mutex); static const char *uctrl_extstatus[16] = { "main power available", "internal battery attached", "external battery attached", "external VGA attached", "external keyboard attached", "external mouse attached", "lid down", "internal battery currently charging", "external battery currently charging", "internal battery currently discharging", "external battery currently discharging", }; /* Everything required for one transaction with the uctrl */ struct uctrl_txn { u8 opcode; u8 inbits; u8 outbits; u8 *inbuf; u8 *outbuf; }; struct uctrl_status { u8 current_temp; /* 0x07 */ u8 reset_status; /* 0x0b */ u16 event_status; /* 0x0c */ u16 error_status; /* 0x10 */ u16 external_status; /* 0x11, 0x1b */ u8 internal_charge; /* 0x18 */ u8 external_charge; /* 0x19 */ u16 control_lcd; /* 0x20 */ u8 control_bitport; /* 0x21 */ u8 speaker_volume; /* 0x23 */ u8 control_tft_brightness; /* 0x24 */ u8 control_kbd_repeat_delay; /* 0x28 */ u8 control_kbd_repeat_period; /* 0x29 */ u8 control_screen_contrast; /* 0x2F */ }; enum uctrl_opcode { READ_SERIAL_NUMBER=0x1, READ_ETHERNET_ADDRESS=0x2, READ_HARDWARE_VERSION=0x3, READ_MICROCONTROLLER_VERSION=0x4, READ_MAX_TEMPERATURE=0x5, READ_MIN_TEMPERATURE=0x6, READ_CURRENT_TEMPERATURE=0x7, READ_SYSTEM_VARIANT=0x8, READ_POWERON_CYCLES=0x9, READ_POWERON_SECONDS=0xA, READ_RESET_STATUS=0xB, READ_EVENT_STATUS=0xC, READ_REAL_TIME_CLOCK=0xD, READ_EXTERNAL_VGA_PORT=0xE, READ_MICROCONTROLLER_ROM_CHECKSUM=0xF, READ_ERROR_STATUS=0x10, READ_EXTERNAL_STATUS=0x11, READ_USER_CONFIGURATION_AREA=0x12, READ_MICROCONTROLLER_VOLTAGE=0x13, READ_INTERNAL_BATTERY_VOLTAGE=0x14, READ_DCIN_VOLTAGE=0x15, READ_HORIZONTAL_POINTER_VOLTAGE=0x16, READ_VERTICAL_POINTER_VOLTAGE=0x17, READ_INTERNAL_BATTERY_CHARGE_LEVEL=0x18, READ_EXTERNAL_BATTERY_CHARGE_LEVEL=0x19, READ_REAL_TIME_CLOCK_ALARM=0x1A, READ_EVENT_STATUS_NO_RESET=0x1B, READ_INTERNAL_KEYBOARD_LAYOUT=0x1C, READ_EXTERNAL_KEYBOARD_LAYOUT=0x1D, READ_EEPROM_STATUS=0x1E, CONTROL_LCD=0x20, CONTROL_BITPORT=0x21, SPEAKER_VOLUME=0x23, CONTROL_TFT_BRIGHTNESS=0x24, CONTROL_WATCHDOG=0x25, CONTROL_FACTORY_EEPROM_AREA=0x26, CONTROL_KBD_TIME_UNTIL_REPEAT=0x28, CONTROL_KBD_TIME_BETWEEN_REPEATS=0x29, CONTROL_TIMEZONE=0x2A, CONTROL_MARK_SPACE_RATIO=0x2B, CONTROL_DIAGNOSTIC_MODE=0x2E, CONTROL_SCREEN_CONTRAST=0x2F, RING_BELL=0x30, SET_DIAGNOSTIC_STATUS=0x32, CLEAR_KEY_COMBINATION_TABLE=0x33, PERFORM_SOFTWARE_RESET=0x34, SET_REAL_TIME_CLOCK=0x35, RECALIBRATE_POINTING_STICK=0x36, SET_BELL_FREQUENCY=0x37, SET_INTERNAL_BATTERY_CHARGE_RATE=0x39, SET_EXTERNAL_BATTERY_CHARGE_RATE=0x3A, SET_REAL_TIME_CLOCK_ALARM=0x3B, READ_EEPROM=0x40, WRITE_EEPROM=0x41, WRITE_TO_STATUS_DISPLAY=0x42, DEFINE_SPECIAL_CHARACTER=0x43, DEFINE_KEY_COMBINATION_ENTRY=0x50, DEFINE_STRING_TABLE_ENTRY=0x51, DEFINE_STATUS_SCREEN_DISPLAY=0x52, PERFORM_EMU_COMMANDS=0x64, READ_EMU_REGISTER=0x65, WRITE_EMU_REGISTER=0x66, READ_EMU_RAM=0x67, WRITE_EMU_RAM=0x68, READ_BQ_REGISTER=0x69, WRITE_BQ_REGISTER=0x6A, SET_USER_PASSWORD=0x70, VERIFY_USER_PASSWORD=0x71, GET_SYSTEM_PASSWORD_KEY=0x72, VERIFY_SYSTEM_PASSWORD=0x73, POWER_OFF=0x82, POWER_RESTART=0x83, }; static struct uctrl_driver { struct uctrl_regs __iomem *regs; int irq; int pending; struct uctrl_status status; } *global_driver; static void uctrl_get_event_status(struct uctrl_driver *); static void uctrl_get_external_status(struct uctrl_driver *); static long uctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { default: return -EINVAL; } return 0; } static int uctrl_open(struct inode *inode, struct file *file) { mutex_lock(&uctrl_mutex); uctrl_get_event_status(global_driver); uctrl_get_external_status(global_driver); mutex_unlock(&uctrl_mutex); return 0; } static irqreturn_t uctrl_interrupt(int irq, void *dev_id) { return IRQ_HANDLED; } static const struct file_operations uctrl_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = uctrl_ioctl, .open = uctrl_open, }; static struct miscdevice uctrl_dev = { UCTRL_MINOR, "uctrl", &uctrl_fops }; /* Wait for space to write, then write to it */ #define WRITEUCTLDATA(value) \ { \ unsigned int i; \ for (i = 0; i < 10000; i++) { \ if (UCTRL_STAT_TXNF_STA & sbus_readl(&driver->regs->uctrl_stat)) \ break; \ } \ dprintk(("write data 0x%02x\n", value)); \ sbus_writel(value, &driver->regs->uctrl_data); \ } /* Wait for something to read, read it, then clear the bit */ #define READUCTLDATA(value) \ { \ unsigned int i; \ value = 0; \ for (i = 0; i < 10000; i++) { \ if ((UCTRL_STAT_RXNE_STA & sbus_readl(&driver->regs->uctrl_stat)) == 0) \ break; \ udelay(1); \ } \ value = sbus_readl(&driver->regs->uctrl_data); \ dprintk(("read data 0x%02x\n", value)); \ sbus_writel(UCTRL_STAT_RXNE_STA, &driver->regs->uctrl_stat); \ } static void uctrl_do_txn(struct uctrl_driver *driver, struct uctrl_txn *txn) { int stat, incnt, outcnt, bytecnt, intr; u32 byte; stat = sbus_readl(&driver->regs->uctrl_stat); intr = sbus_readl(&driver->regs->uctrl_intr); sbus_writel(stat, &driver->regs->uctrl_stat); dprintk(("interrupt stat 0x%x int 0x%x\n", stat, intr)); incnt = txn->inbits; outcnt = txn->outbits; byte = (txn->opcode << 8); WRITEUCTLDATA(byte); bytecnt = 0; while (incnt > 0) { byte = (txn->inbuf[bytecnt] << 8); WRITEUCTLDATA(byte); incnt--; bytecnt++; } /* Get the ack */ READUCTLDATA(byte); dprintk(("ack was %x\n", (byte >> 8))); bytecnt = 0; while (outcnt > 0) { READUCTLDATA(byte); txn->outbuf[bytecnt] = (byte >> 8); dprintk(("set byte to %02x\n", byte)); outcnt--; bytecnt++; } } static void uctrl_get_event_status(struct uctrl_driver *driver) { struct uctrl_txn txn; u8 outbits[2]; txn.opcode = READ_EVENT_STATUS; txn.inbits = 0; txn.outbits = 2; txn.inbuf = NULL; txn.outbuf = outbits; uctrl_do_txn(driver, &txn); dprintk(("bytes %x %x\n", (outbits[0] & 0xff), (outbits[1] & 0xff))); driver->status.event_status = ((outbits[0] & 0xff) << 8) | (outbits[1] & 0xff); dprintk(("ev is %x\n", driver->status.event_status)); } static void uctrl_get_external_status(struct uctrl_driver *driver) { struct uctrl_txn txn; u8 outbits[2]; int i, v; txn.opcode = READ_EXTERNAL_STATUS; txn.inbits = 0; txn.outbits = 2; txn.inbuf = NULL; txn.outbuf = outbits; uctrl_do_txn(driver, &txn); dprintk(("bytes %x %x\n", (outbits[0] & 0xff), (outbits[1] & 0xff))); driver->status.external_status = ((outbits[0] * 256) + (outbits[1])); dprintk(("ex is %x\n", driver->status.external_status)); v = driver->status.external_status; for (i = 0; v != 0; i++, v >>= 1) { if (v & 1) { dprintk(("%s%s", " ", uctrl_extstatus[i])); } } dprintk(("\n")); } static int __devinit uctrl_probe(struct platform_device *op) { struct uctrl_driver *p; int err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR "uctrl: Unable to allocate device struct.\n"); goto out; } p->regs = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), "uctrl"); if (!p->regs) { printk(KERN_ERR "uctrl: Unable to map registers.\n"); goto out_free; } p->irq = op->archdata.irqs[0]; err = request_irq(p->irq, uctrl_interrupt, 0, "uctrl", p); if (err) { printk(KERN_ERR "uctrl: Unable to register irq.\n"); goto out_iounmap; } err = misc_register(&uctrl_dev); if (err) { printk(KERN_ERR "uctrl: Unable to register misc device.\n"); goto out_free_irq; } sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr); printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n", op->dev.of_node->full_name, p->regs, p->irq); uctrl_get_event_status(p); uctrl_get_external_status(p); dev_set_drvdata(&op->dev, p); global_driver = p; out: return err; out_free_irq: free_irq(p->irq, p); out_iounmap: of_iounmap(&op->resource[0], p->regs, resource_size(&op->resource[0])); out_free: kfree(p); goto out; } static int __devexit uctrl_remove(struct platform_device *op) { struct uctrl_driver *p = dev_get_drvdata(&op->dev); if (p) { misc_deregister(&uctrl_dev); free_irq(p->irq, p); of_iounmap(&op->resource[0], p->regs, resource_size(&op->resource[0])); kfree(p); } return 0; } static const struct of_device_id uctrl_match[] = { { .name = "uctrl", }, {}, }; MODULE_DEVICE_TABLE(of, uctrl_match); static struct platform_driver uctrl_driver = { .driver = { .name = "uctrl", .owner = THIS_MODULE, .of_match_table = uctrl_match, }, .probe = uctrl_probe, .remove = __devexit_p(uctrl_remove), }; module_platform_driver(uctrl_driver); MODULE_LICENSE("GPL");
gpl-2.0
tyler6389/android_kernel_samsung_ls02
drivers/net/wireless/ipsecdrvtl/bo.c
218
37785
/* 'src_nf_nf.c' Obfuscated by COBF (Version 1.06 2006-01-07 by BB) at Fri Oct 12 22:15:16 2012 */ #include"cobf.h" #ifdef _WIN32 #if defined( UNDER_CE) && defined( bb337) || ! defined( bb329) #define bb355 1 #define bb332 1 #else #define bb351 bb343 #define bb333 1 #define bb331 1 #endif #define bb348 1 #include"uncobf.h" #include<ndis.h> #include"cobf.h" #ifdef UNDER_CE #include"uncobf.h" #include<ndiswan.h> #include"cobf.h" #endif #include"uncobf.h" #include<stdio.h> #include<basetsd.h> #include"cobf.h" bba bbs bbl bbf, *bb1;bba bbs bbe bbq, *bb93;bba bb135 bb124, *bb334; bba bbs bbl bb40, *bb72;bba bbs bb135 bbk, *bb59;bba bbe bbu, *bb133; bba bbh bbf*bb89; #ifdef bb311 bba bbd bb60, *bb122; #endif #else #include"uncobf.h" #include<linux/module.h> #include<linux/ctype.h> #include<linux/time.h> #include<linux/slab.h> #include"cobf.h" #ifndef bb116 #define bb116 #ifdef _WIN32 #include"uncobf.h" #include<wtypes.h> #include"cobf.h" #else #ifdef bb120 #include"uncobf.h" #include<linux/types.h> #include"cobf.h" #else #include"uncobf.h" #include<stddef.h> #include<sys/types.h> #include"cobf.h" #endif #endif #ifdef _WIN32 bba bb119 bb215; #else bba bbe bbu, *bb133, *bb246; #define bb201 1 #define bb202 0 bba bb251 bb205, *bb240, *bb208;bba bbe bb285, *bb283, *bb262;bba bbs bbq, *bb93, *bb270;bba bb6 bb238, *bb216;bba bbs bb6 bb263, *bb250; bba bb6 bb111, *bb222;bba bbs bb6 bb63, *bb289;bba bb63 bb264, *bb207 ;bba bb63 bb219, *bb254;bba bb111 bb119, *bb226;bba bb243 bb247;bba bb279 bb124;bba bb230 bb83;bba bb118 bb112;bba bb118 bb253; #ifdef bb211 bba bb282 bb40, *bb72;bba bb258 bbk, *bb59;bba bb232 bbd, *bb28;bba bb256 bb56, *bb113; #else bba bb271 bb40, *bb72;bba bb229 bbk, *bb59;bba bb233 bbd, *bb28;bba bb277 bb56, *bb113; #endif bba bb40 bbf, *bb1, *bb214;bba bbk bb237, *bb245, *bb224;bba bbk bb255 , *bb220, *bb248;bba bbd bb60, *bb122, *bb206;bba bb83 bb37, *bb274, * bb252;bba bbd bb290, *bb275, *bb210;bba bb112 bb265, *bb291, *bb269; bba bb56 bb227, *bb261, *bb223; #define bb140 bbb bba bbb*bb221, *bb77;bba bbh bbb*bb225;bba bbl bb287;bba bbl*bb276; bba bbh bbl*bb82; #if defined( bb120) bba bbe bb115; #endif bba bb115 bb20;bba bb20*bb218;bba bbh bb20*bb187; #if defined( bb213) || defined( bb266) bba bb20 bb36;bba bb20 bb114; #else bba bbl bb36;bba bbs bbl bb114; #endif bba bbh bb36*bb257;bba bb36*bb244;bba bb60 bb212, *bb239;bba bbb* bb106;bba bb106*bb241; #define bb281( bb34) bbi bb34##__ { bbe bb228; }; bba bbi bb34##__ * \ bb34 bba bbi{bb37 bb188,bb242,bb231,bb260;}bb286, *bb234, *bb278;bba bbi{ bb37 bb8,bb193;}bb280, *bb235, *bb259;bba bbi{bb37 bb267,bb249;}bb236 , *bb217, *bb284; #endif bba bbh bbf*bb89; #endif bba bbf bb100; #define IN #define OUT #ifdef _DEBUG #define bb139( bbc) bb31( bbc) #else #define bb139( bbc) ( bbb)( bbc) #endif bba bbe bb161, *bb173; #define bb209 0 #define bb314 1 #define bb298 2 #define bb324 3 #define bb346 4 bba bbe bb349;bba bbb*bb121; #endif #ifdef _WIN32 #ifndef UNDER_CE #define bb30 bb344 #define bb43 bb335 bba bbs bb6 bb30;bba bb6 bb43; #endif #else #endif #ifdef _WIN32 bbb*bb128(bb30 bb47);bbb bb105(bbb* );bbb*bb137(bb30 bb159,bb30 bb47); #else #define bb128( bbc) bb146(1, bbc, bb142) #define bb105( bbc) bb342( bbc) #define bb137( bbc, bbn) bb146( bbc, bbn, bb142) #endif #ifdef _WIN32 #define bb31( bbc) bb358( bbc) #else #ifdef _DEBUG bbe bb145(bbh bbl*bb95,bbh bbl*bb25,bbs bb272); #define bb31( bbc) ( bbb)(( bbc) || ( bb145(# bbc, __FILE__, __LINE__ \ ))) #else #define bb31( bbc) (( bbb)0) #endif #endif bb43 bb301(bb43*bb320); #ifndef _WIN32 bbe bb328(bbh bbl*bbg);bbe bb322(bbh bbl*bb19,...); #endif #ifdef _WIN32 bba bb353 bb96; #define bb141( bbc) bb356( bbc) #define bb144( bbc) bb345( bbc) #define bb134( bbc) bb350( bbc) #define bb132( bbc) bb339( bbc) #else bba bb347 bb96; #define bb141( bbc) ( bbb)( * bbc = bb330( bbc)) #define bb144( bbc) (( bbb)0) #define bb134( bbc) bb352( bbc) #define bb132( bbc) bb354( bbc) #endif #include"uncobf.h" #include<linux/netdevice.h> #include<linux/netfilter.h> #include<linux/netfilter_ipv4.h> #include<linux/skbuff.h> #include<linux/ip.h> #include<net/ip.h> #include<linux/udp.h> #include<linux/miscdevice.h> #include<linux/fs.h> #include<asm/uaccess.h> #include"cobf.h" bba bb9{bb407,bb1497,}bb296;bbk bb1214(bb296 bb758,bbh bbf*bb447);bbd bb544(bb296 bb758,bbh bbf*bb447);bbb bb1158(bbk bb159,bb296 bb555,bbf bb439[2 ]);bbb bb972(bbd bb159,bb296 bb555,bbf bb439[4 ]);bba bb83 bb4; bb9{bb98=0 ,bb364=-12000 ,bb357=-11999 ,bb375=-11998 ,bb671=-11997 ,bb797= -11996 ,bb724=-11995 ,bb871=-11994 ,bb788=-11992 ,bb806=-11991 ,bb668=- 11990 ,bb712=-11989 ,bb833=-11988 ,bb637=-11987 ,bb672=-11986 ,bb773=- 11985 ,bb851=-11984 ,bb623=-11983 ,bb617=-11982 ,bb764=-11981 ,bb903=- 11980 ,bb791=-11979 ,bb722=-11978 ,bb843=-11977 ,bb583=-11976 ,bb844=- 11975 ,bb767=-11960 ,bb678=-11959 ,bb689=-11500 ,bb732=-11499 ,bb856=- 11498 ,bb798=-11497 ,bb879=-11496 ,bb869=-11495 ,bb828=-11494 ,bb774=- 11493 ,bb858=-11492 ,bb885=-11491 ,bb705=-11490 ,bb745=-11489 ,bb702=- 11488 ,bb892=-11487 ,bb872=-11486 ,bb706=-11485 ,bb645=-11484 ,bb902=- 11483 ,bb769=-11482 ,bb905=-11481 ,bb846=-11480 ,bb759=-11479 ,bb644=- 11478 ,bb717=-11477 ,bb657=-11476 ,bb631=-11475 ,bb864=-11474 ,bb789=- 11473 ,bb707=-11472 ,bb809=-11460 ,bb652=-11450 ,bb740=-11449 ,bb710=- 11448 ,bb733=-11447 ,bb790=-11446 ,bb635=-11445 ,bb887=-11444 ,bb824=- 11443 ,bb842=-11440 ,bb865=-11439 ,bb801=-11438 ,bb800=-11437 ,bb673=- 11436 ,bb688=-11435 ,bb620=-11420 ,bb531=-11419 ,bb571=-11418 ,bb685=- 11417 ,bb835=-11416 ,bb667=-11415 ,bb795=-11414 ,bb731=-11413 ,bb633=- 11412 ,bb823=-11411 ,bb674=-11410 ,bb636=-11409 ,bb708=-11408 ,bb900=- 11407 ,bb898=-11406 ,bb803=-11405 ,bb720=-11404 ,bb658=-11403 ,bb761=- 11402 ,bb634=-11401 ,bb679=-11400 ,bb878=-11399 ,bb754=-11398 ,bb762=- 11397 ,bb683=-11396 ,bb866=-11395 ,bb889=-11394 ,bb615=-11393 ,bb894=- 11392 ,bb692=-11391 ,bb784=-11390 ,bb727=-11389 ,bb711=-11388 ,bb749=- 11387 ,bb904=-11386 ,bb627=-11385 ,bb700=-11384 ,bb786=-11383 ,bb648=- 11382 ,bb814=-11381 ,bb736=-11380 ,bb785=-11379 ,bb669=-11378 ,bb752=- 11377 ,bb808=-11376 ,bb709=-11375 ,bb763=-11374 ,bb699=-11373 ,bb897=- 11372 ,bb862=-11371 ,bb802=-11370 ,bb777=-11369 ,bb841=-11368 ,bb756=- 11367 ,bb794=-11366 ,bb719=-11365 ,bb860=-11364 ,bb845=-11363 ,bb388=- 11350 ,bb883=bb388,bb714=-11349 ,bb834=-11348 ,bb836=-11347 ,bb643=-11346 ,bb649=-11345 ,bb906=-11344 ,bb822=-11343 ,bb779=-11342 ,bb680=-11341 , bb770=-11340 ,bb901=-11339 ,bb398=-11338 ,bb663=-11337 ,bb687=bb398,bb799 =-11330 ,bb817=-11329 ,bb781=-11328 ,bb632=-11327 ,bb718=-11326 ,bb650=- 11325 ,bb821=-11324 ,bb698=-11320 ,bb819=-11319 ,bb859=-11318 ,bb690=- 11317 ,bb626=-11316 ,bb681=-11315 ,bb825=-11314 ,bb723=-11313 ,bb641=- 11312 ,bb642=-11300 ,bb741=-11299 ,bb796=-11298 ,bb703=-11297 ,bb852=- 11296 ,bb811=-11295 ,bb832=-11294 ,bb654=-11293 ,bb847=-11292 ,bb882=- 11291 ,bb618=-11290 ,bb804=-11289 ,bb857=-11288 ,bb849=-11287 ,bb734=- 11286 ,bb653=-11285 ,bb646=-11284 ,bb812=-11283 ,bb738=-11282 ,bb704=- 11281 ,bb661=-11280 ,bb713=-11279 ,bb701=-11250 ,bb850=-11249 ,bb848=- 11248 ,bb748=-11247 ,bb737=-11246 ,bb805=-11245 ,bb778=-11244 ,bb755=- 11243 ,bb621=-11242 ,bb839=-11240 ,bb651=-11239 ,bb729=-11238 ,bb792=- 11237 ,bb677=-11150 ,bb855=-11100 ,bb820=-11099 ,bb655=-11098 ,bb744=- 11097 ,bb782=-11096 ,bb793=-11095 ,bb768=-11094 ,bb628=-11093 ,bb830=- 11092 ,bb899=-11091 ,bb666=-11090 ,bb877=-11089 ,bb884=-11088 ,bb853=- 11087 ,bb638=-11086 ,bb780=-11085 ,bb783=-11050 ,bb751=-11049 ,bb691=- 10999 ,bb639=-10998 ,bb656=-10997 ,bb753=-10996 ,bb893=-10995 ,bb682=- 10994 ,bb694=-10993 ,bb840=-10992 ,bb771=-10991 ,bb735=-10990 ,bb630=- 10989 ,bb907=-10988 ,bb728=-10979 ,bb660=-10978 ,bb765=-10977 ,bb873=- 10976 ,bb695=-10975 ,bb826=-10974 ,};bba bbi bb455{bb1 bb74;bbd bb127; bbd bb181;bbi bb455*bb94;}bbx;bb4 bb465(bbx*bb670,bbd bb909,bbx*bb696 ,bbd bb895,bbd bb538);bb4 bb532(bbx*bbj,bbd bb92,bbh bbb*bb95,bbd bb47 );bb4 bb577(bbx*bbj,bbd bb92,bbb*bb131,bbd bb47);bbu bb827(bbx*bbj, bbd bb92,bbh bbb*bb95,bbd bb47); #define bb951 bb53(0x0800) #define bb1138 bb53(0x0806) #define bb937 bb53(0x01f4) #define bb945 bb53(0x1194) #define bb1131 bb53(0x4000) #define bb1137 bb53(0x2000) #define bb1107 bb53(0x1FFF) #define bb1064( bb8) (( bb8) & bb53(0x2000 | 0x1FFF)) #define bb1022( bb8) ((( bb196( bb8)) & 0x1FFF) << 3) #define bb979( bb8) ((( bb8) & bb53(0x1FFF)) == 0) #define bb494( bb8) (( bb8) & bb53(0x2000)) #define bb987( bb8) (!( bb494( bb8))) #pragma pack(push, 1) bba bbi{bbf bb371[6 ];bbf bb1006[6 ];bbk bb373;}bb366, *bb376;bba bbi{ bbf bb446[6 ];bbk bb373;}bb1082, *bb1091;bba bbi{bbf bb938:4 ;bbf bb1088 :4 ;bbf bb1048;bbk bb361;bbk bb881;bbk bb567;bbf bb1002;bbf bb292;bbk bb610;bbd bb310;bbd bb203;}bb326, *bb312;bba bbi{bbk bb1085;bbk bb1092 ;bbf bb1047;bbf bb1039;bbk bb1057;bbf bb1074[6 ];bbd bb1035;bbf bb1090 [6 ];bbd bb1061;}bb1071, *bb1077; #pragma pack(pop) bba bbi{bbk bb288;bbk bb427;bbk bb1005;bbk bb319;}bb416, *bb341;bba bbi{bbk bb288;bbk bb592;bbd bb549;bbd bb918;bbf bb92;bbf bb172;bbk bb158;bbk bb319;bbk bb1019;}bb487, *bb318;bba bbi{bbf bb1075;bbf bb1067;bbf bb1058;bbf bb1036;bbd bb1060;bbk bb1072;bbk bb372;bbd bb1032;bbd bb1078;bbd bb1063;bbd bb1056;bbf bb1076[16 ];bbf bb1046[64 ] ;bbf bb25[128 ];bbf bb1033[64 ];}bb1080, *bb1086;bba bbi{bbd bb310;bbd bb203;bbf bb910;bbf bb292;bbk bb919;}bb612, *bb569; #if defined( _WIN32) #define bb53( bbc) (((( bbc) & 0XFF00) >> 8) | ((( bbc) & 0X00FF) << \ 8)) #define bb196( bbc) ( bb53( bbc)) #define bb445( bbc) (((( bbc) & 0XFF000000) >> 24) | ((( bbc) & \ 0X00FF0000) >> 8) | ((( bbc) & 0X0000FF00) << 8) | ((( bbc) & \ 0X000000FF) << 24)) #define bb499( bbc) ( bb445( bbc)) #endif bbk bb921(bbh bbb*bb295);bbk bb886(bbh bbb*bb510,bbe bb22);bb4 bb595( bbx*bb85,bbf bb102,bbx*bb58);bb4 bb686(bbx*bb85,bbu bb177,bbf*bb408); bb4 bb955(bbx*bb58,bbf*bb389);bb4 bb941(bbh bbf*bb389,bbx*bb58);bb4 bb541(bbx*bb51,bbf bb102,bbd*bb939);bb4 bb929(bbx*bb85,bbf bb102,bbf bb408,bbx*bb58);bbd bb517(bbx*bb51);bbk bb534(bbx*bb51);bbb bb527(bbk bb151,bbx*bb51);bbb bb536(bbx*bb51);bbb bb968(bbx*bb51,bbd*bb26);bbb bb999(bbx*bb51,bbd*bb26);bbb bb1020(bbx*bb51,bbd bb26);bbb bb931(bbx* bb51,bbd bb26);bbb bb983(bbx*bb51);bbu bb1013(bbf*bb51);bba bbi bb991 *bb989;bba bbi bb1026*bb1027;bba bbi bb993*bb1021;bba bbi bb1000* bb1010;bba bbi bb1012*bb1023;bba bbi bb990*bb986;bba bb9{bb552=0 , bb581=1 ,bb593=2 ,bb813=3 ,bb585=4 ,bb570=5 ,bb575=6 ,bb562=7 ,bb579=9 ,} bb422;bba bb9{bb606=0 ,bb992,bb598,bb1009,bb930,bb923,bb926,bb916, bb924,bb922,bb915,}bb519;bb4 bb2056(bbx*bb304,bbd*bb103);bb4 bb2115( bbx*bb85,bbu bb177,bbd bb489,bb519 bb154,bbh bbf*bb1312,bbf*bb130, bb422 bb414,bbf*bb557,bbd bb103,bbd bb491,bbx*bb58);bb4 bb2043(bbx* bb85,bbu bb177,bb519 bb154,bbh bbf*bb1312,bb422 bb414,bbf*bb557,bbd* bb484,bbd*bb463,bbd*bb535,bbx*bb58);bb4 bb2072(bbx*bb304,bbd*bb103); bb4 bb2087(bbx*bb85,bbu bb177,bbd bb489,bb422 bb414,bbf*bb557,bbd bb103,bbd bb491,bbx*bb58);bb4 bb2062(bbx*bb85,bbu bb177,bb422 bb414, bbf*bb557,bbd*bb484,bbd*bb463,bbd*bb535,bbx*bb58);bb9{bb550=1 ,};bbb* bb496(bbd bb1213,bbd bb372);bb4 bb458(bbb*bb970);bba bbi bb1985 bb1971 , *bb383;bba bb9{bb1994=0 ,bb1754=1 ,bb1772=2 }bb624;bb4 bb1814(bb624 bb1871,bb383*bb360);bb4 bb1949(bb383 bb360,bbf*bb434,bbd bb418,bbf* bb309,bbd bb293,bbd*bb437,bbd*bb306);bb4 bb1942(bb383 bb360,bbf*bb309 ,bbd bb293,bbd*bb306,bbu*bb973);bb4 bb1950(bb383 bb360,bbf*bb434,bbd bb418,bbf*bb309,bbd bb293,bbd*bb437,bbd*bb306,bbu*bb963);bb4 bb1823( bb383 bb360);bb4 bb2184(bbx*bb85,bbu bb177,bbd bb489,bb624 bb1358,bbx *bb58,bbu*bb2086);bb4 bb2099(bbx*bb85,bbu bb177,bb624 bb1358,bbx* bb58);bbu bb2132(bbx*bb304);bbu bb2189(bbx*bb304);bb4 bb2060(bbx* bb304,bbd*bb103);bb4 bb2009(bbx*bb304,bbd*bb103);bb4 bb1846(bbx*bb85, bbx*bb58,bbu bb1065,bbk bb2118,bbk bb1813);bb4 bb1861(bbx*bb85,bbx* bb58,bbu bb1065); #pragma pack(push, 8) #ifdef _MSC_VER #pragma warning (disable:4200) #endif bba bbf bb178[4 ];bba bb9{bb1651=0 ,bb1466=1 ,}bb1390;bba bb9{bb1527=0 , bb1718=1 ,bb1559=2 ,bb1437=3 ,bb1657=4 ,bb1495=5 ,bb1634=6 ,bb1514=7 ,bb1605 =8 ,bb1518=9 ,bb1677=10 ,bb1506=11 ,bb1694=12 ,bb1710=13 ,bb1716=14 ,bb1422= 15 ,bb1452=16 ,bb1393=17 ,bb1598=18 ,bb1688=19 ,bb1642=20 ,bb1491=21 ,bb1504 =22 ,bb1473=23 ,bb1601=24 ,bb1604=25 ,bb1449=26 ,bb1579=27 ,bb1373=28 , bb1612=29 ,bb1686=30 ,bb1630=16300 ,bb1429=16301 ,bb1727=16384 ,bb1535= 24576 ,bb1462=24577 ,bb1436=24578 ,bb1477=34793 ,bb1380=40500 ,}bb629;bba bb9{bb1460=0 ,bb1523=1 ,bb1456=2 ,bb1424=3 ,bb1397=4 ,bb1386=5 ,bb1668=6 , bb1474=7 ,bb1528=8 ,bb1399=9 ,bb1443=21 ,bb1487=22 ,bb1488=23 ,bb1445=24 , bb1540=25 ,bb1508=26 ,bb1461=27 ,bb1382=28 ,bb1479=29 ,bb1489=80 ,}bb775; bba bb9{bb1562=0 ,bb1697=1 ,bb1693=2 ,bb1483=3 ,bb1520=4 ,}bb1646;bba bb9{ bb1685=0 ,bb1347=1 ,bb1165=2 ,bb1225=3 ,bb1289=4 ,bb1049=61440 ,bb1329= 61441 ,bb1113=61443 ,bb1301=61444 ,}bb486;bba bb9{bb1700=0 ,bb1494=1 , bb1560=2 ,}bb1679;bba bb9{bb1388=0 ,bb1726,bb1438,bb1453,bb1565,bb1496, bb1635,bb1464,bb1603,bb1492,bb1396,bb1728,}bb743;bba bb9{bb1507=0 , bb1361=2 ,bb1328=3 ,bb1375=4 ,bb1325=9 ,bb1297=12 ,bb1359=13 ,bb1310=14 , bb1348=249 ,}bb715;bba bb9{bb1357=0 ,bb1300=1 ,bb1285=2 ,bb1421=3 ,bb1633= 4 ,bb1356=5 ,bb1342=12 ,bb1320=13 ,bb1368=14 ,bb1286=61440 ,}bb481;bba bb9{ bb1293=1 ,bb1369=2 ,bb1338=3 ,bb1539=4 ,bb1600=5 ,bb1448=6 ,bb1427=7 ,bb1469 =8 ,bb1454=9 ,bb1538=10 ,bb1305=11 ,bb391=12 ,bb1339=13 ,bb390=240 ,bb1344=( 128 <<16 )|bb390,bb1343=(192 <<16 )|bb390,bb1331=(256 <<16 )|bb390,bb1303=( 128 <<16 )|bb391,bb1294=(192 <<16 )|bb391,bb1346=(256 <<16 )|bb391,}bb616; bba bb9{bb1296=0 ,bb1501=1 ,bb1365=2 ,bb1330=3 ,bb1458=4 ,}bb890;bba bb9{ bb1434=0 ,bb1573=1 ,bb1222=2 ,bb604=3 ,bb1236=4 ,}bb716;bba bb9{bb1576=0 , bb1526=1 ,bb1406=2 ,bb1669=5 ,bb1706=7 ,}bb483;bba bb9{bb1425=0 ,bb1513=1 , bb1618=2 ,bb1711=3 ,bb1482=4 ,bb1690=5 ,bb1647=6 ,bb386=7 ,bb1543=65001 , bb396=240 ,bb1484=(128 <<16 )|bb396,bb1502=(192 <<16 )|bb396,bb1511=(256 << 16 )|bb396,bb1542=(128 <<16 )|bb386,bb1556=(192 <<16 )|bb386,bb1614=(256 << 16 )|bb386,}bb810;bba bb9{bb1717=0 ,bb1457=1 ,bb1662=2 ,bb1572=3 ,bb1472=4 ,bb1529=5 ,bb1566=6 ,bb1644=65001 ,}bb625;bba bb9{bb1684=0 ,bb1524=1 , bb1661=2 ,bb1551=3 ,bb1654=4 ,bb1611=5 ,bb1553=64221 ,bb1617=64222 ,bb1658= 64223 ,bb1672=64224 ,bb1709=65001 ,bb1680=65002 ,bb1549=65003 ,bb1440= 65004 ,bb1722=65005 ,bb1486=65006 ,bb1510=65007 ,bb1476=65008 ,bb1708= 65009 ,bb1475=65010 ,}bb896;bba bb9{bb1698=0 ,bb1416=1 ,bb1431=2 ,}bb891; bba bb9{bb1409=0 ,bb1638=1 ,bb1478=2 ,bb1682=3 ,}bb739;bba bb9{bb1590=0 , bb1418=1 ,bb1433=2 ,bb1648=3 ,bb1660=4 ,bb1641=5 ,bb1500=21 ,bb1569=6 , bb1615=7 ,bb1536=8 ,bb1378=1000 ,}bb490;bba bb9{bb1410=0 ,bb1561=1 ,bb1666 =2 ,}bb730;bba bb9{bb1665=0 ,bb1629=1 ,bb1715=2 ,bb1435=3 ,bb1471=4 ,}bb676 ;bba bb9{bb1530=0 ,bb1673=1 ,bb1392=1001 ,bb1713=1002 ,}bb837;bba bb9{ bb1558=0 ,bb1134=1 ,bb1040=2 ,bb1051=3 ,bb1112=4 ,bb1126=5 ,bb1094=6 ,bb1695 =100 ,bb1581=101 ,}bb480;bba bbi bb394{bb616 bb154;bb481 bb580;bb486 bb57;}bb394;bba bbi bb400{bb715 bb1349;bb481 bb580;bb486 bb57;}bb400; bba bbi bb392{bb890 bb1003;}bb392;bba bbi bb476{bb896 bb1606;bb625 bb414;bb810 bb154;bbu bb1485;bb483 bb647;}bb476;bba bbi bb485{bbu bb599;bb394 bb308;bbu bb640;bb400 bb560;bbu bb772;bb392 bb607;bb483 bb647;}bb485;bba bbi bb450{bb178 bb957;bb178 bb1211;bb716 bb102;bb556 {bbi{bb400 bb45;bbf bb558[64 ];bbf bb551[64 ];}bb560;bbi{bb394 bb45;bbf bb1218[32 ];bbf bb1229[32 ];bbf bb558[64 ];bbf bb551[64 ];bbf bb1199[16 ]; }bb308;bbi{bb392 bb45;}bb607;}bb317;}bb450;bba bbi{bbd bb818,bb589; bbf bb1132:1 ;bbf bb1168:1 ;bbf bb102;bbk bb440;}bb185;bba bbi bb507{ bbd bb11;bb185 bbc[64 *2 ];}bb507; #ifdef UNDER_CE bba bb43 bb378; #else bba bb83 bb378; #endif bba bbi bb199{bbi bb199*bb1467, *bb1385;bbd bb26;bbd bb1114;bb185 bb914[64 ];bb480 bb504;bbd bb1299;bbk bb1062;bbd bb554;bbd bb675;bbd bb816;bbf bb488;bbf bb1335;bbf bb1103;bbd bb1029;bbd bb1381;bb378 bb568;bbk bb1280;bb450 bb409[3 ];bb378 bb1568;bbf bb1503[40 ];bbd bb590 ;bbd bb1577;}bb199;bba bbi bb399{bbi bb399*bb1723;bb185 bb475;}bb399; bba bbi bb746{bbu bb473;bbu bb488;bbd bb26;bbd bb590;bbf bb1512;bbk bb1592;bbf*bb1544;bbd bb1423;bbf*bb1583;bbd bb1714;bbf*bb1376;bbd bb1412;bbu bb1645;bbu bb1570;bb399*bb131;bbu bb1470;bb676 bb1519;bbd bb1593;bb891 bb1707;bb480 bb504;bbk bb1371;bbd bb1533;bb837 bb1403; bbd bb1650;bbd bb1720;bb743 bb1417;bbf*bb1405;bbd bb1413;bb490 bb867; bbd bb1656;bbd bb1623;bbd bb1408;bbd bb1699;bbd bb1493;bb476*bb1537; bbd bb1610;bb485*bb1505;bbd bb1395;bbd bb1531;bbd bb1441;}bb746;bba bbi bb684{bbu bb473;bbd bb26;bb185 bb475;}bb684;bba bbi bb665{bb199* bb316;bbu bb1571;bbf*bb1701;bbd bb1667;}bb665;bba bbi bb876{bbd bb26; bb185 bb475;bbf bb1432;bbf bb1447;}bb876;bba bbi bb829{bbu bb473;bbu bb1119;bbd bb26;bbf*bb1625;bbd bb1541;}bb829;bba bbi bb662{bbd bb26; bbk bb1702;bbk bb1729;bbd bb151;bbf*bb48;}bb662;bba bbi bb831{bbu bb1586;bbd bb26;bbd bb554;bbd bb675;bbd bb816;}bb831;bba bbi bb868{ bb629 bb1490;bbd bb26;bb775 bb1323;bbu bb1555;}bb868;bba bbi bb622{ bbf bb1674;bbf bb1389;bbf bb1687;bbf bb1394;bbf bb1575;bbf bb1602;bbf bb1584;bbf bb1459;bbf bb1374;bbf bb1521;bbf bb1411;bbf bb1627;bbf bb1725;bbf bb1407;bbf bb1676;bbf bb1439;bbf bb1620;bbf bb1383;bbf bb1450;bbf bb512;bbf bb1548;bbf bb1663;bbf bb1532;bbf bb1689;bbf bb1415;bbf bb1430;bbf bb1414;}bb622;bba bbi bb742{bbu bb1637;bbd bb489 ;bbd bb1621;bb739 bb1426;bbk bb1632;bbu bb1516;bbu bb1563;bbu bb1652; bbu bb1451;bbu bb1631;bbu bb1659;bbu bb1398;bbl bb1624[128 ];bbl bb1670 [128 ];bbl bb1595[128 ];bbl bb1419[256 ];bbl bb1636[128 ];bbl bb1444[128 ] ;bbd bb1589;bbf bb1564[8 ];bbf bb1404[8 ];}bb742;bba bbi bb659{bbd bb26 ;bbd bb1696;}bb659;bba bbi bb861{bbd bb26;bbu bb488;}bb861;bba bbi bb747{bbu bb1522;bbd bb510;bbd bb1171;}bb747;bba bbi bb757{bbd bb26; bb490 bb867;bb730 bb1599;bbf*bb1578;bbd bb1587;}bb757;bba bb9{bb1400= 0 ,bb1552,bb1671,bb1384,bb1613,bb1534,bb1594,bb1391,bb1525,bb1580, bb1582,bb1692,bb1705,bb1653,bb1401,bb1585,bb1463,bb1402,bb1622,bb1640 ,}bb664;bba bbi bb1649 bb815;bba bb4( *bb1550)(bb815*bb1567,bbb* bb1588,bb664 bb321,bbb*bb74); #pragma pack(pop) #ifdef _WIN32 #ifdef UNDER_CE #define bb468 bb1703 bb603("1:") #else #define bb468 bb603("\\\\.\\IPSecTL") #endif #else #define bb614 "ipsecdrvtl" #define bb468 "/dev/" bb614 #ifndef bb116 #define bb116 #ifdef _WIN32 #include"uncobf.h" #include<wtypes.h> #include"cobf.h" #else #ifdef bb120 #include"uncobf.h" #include<linux/types.h> #include"cobf.h" #else #include"uncobf.h" #include<stddef.h> #include<sys/types.h> #include"cobf.h" #endif #endif #ifdef _WIN32 bba bb119 bb215; #else bba bbe bbu, *bb133, *bb246; #define bb201 1 #define bb202 0 bba bb251 bb205, *bb240, *bb208;bba bbe bb285, *bb283, *bb262;bba bbs bbq, *bb93, *bb270;bba bb6 bb238, *bb216;bba bbs bb6 bb263, *bb250; bba bb6 bb111, *bb222;bba bbs bb6 bb63, *bb289;bba bb63 bb264, *bb207 ;bba bb63 bb219, *bb254;bba bb111 bb119, *bb226;bba bb243 bb247;bba bb279 bb124;bba bb230 bb83;bba bb118 bb112;bba bb118 bb253; #ifdef bb211 bba bb282 bb40, *bb72;bba bb258 bbk, *bb59;bba bb232 bbd, *bb28;bba bb256 bb56, *bb113; #else bba bb271 bb40, *bb72;bba bb229 bbk, *bb59;bba bb233 bbd, *bb28;bba bb277 bb56, *bb113; #endif bba bb40 bbf, *bb1, *bb214;bba bbk bb237, *bb245, *bb224;bba bbk bb255 , *bb220, *bb248;bba bbd bb60, *bb122, *bb206;bba bb83 bb37, *bb274, * bb252;bba bbd bb290, *bb275, *bb210;bba bb112 bb265, *bb291, *bb269; bba bb56 bb227, *bb261, *bb223; #define bb140 bbb bba bbb*bb221, *bb77;bba bbh bbb*bb225;bba bbl bb287;bba bbl*bb276; bba bbh bbl*bb82; #if defined( bb120) bba bbe bb115; #endif bba bb115 bb20;bba bb20*bb218;bba bbh bb20*bb187; #if defined( bb213) || defined( bb266) bba bb20 bb36;bba bb20 bb114; #else bba bbl bb36;bba bbs bbl bb114; #endif bba bbh bb36*bb257;bba bb36*bb244;bba bb60 bb212, *bb239;bba bbb* bb106;bba bb106*bb241; #define bb281( bb34) bbi bb34##__ { bbe bb228; }; bba bbi bb34##__ * \ bb34 bba bbi{bb37 bb188,bb242,bb231,bb260;}bb286, *bb234, *bb278;bba bbi{ bb37 bb8,bb193;}bb280, *bb235, *bb259;bba bbi{bb37 bb267,bb249;}bb236 , *bb217, *bb284; #endif bba bbh bbf*bb89; #endif #include"uncobf.h" #include<linux/ioctl.h> #include"cobf.h" bba bbi{bb1 bb1367;bbd bb1319;bb1 bb1240;bbd bb1144;bbd bb448;}bb1198 ; #define bb1364 1 #endif #pragma pack(push, 8) bb9{bb1355=3 ,bb1353,bb1354,bb1420,};bba bbi{bbf bb103[4 ];}bb1268;bba bbi{bbf bb103[4 ];}bb1235;bba bbi{bbd bb948;bbd bb26;}bb1265;bba bbi{ bbd bb129;bbf bb1224[8 ];}bb402;bba bb9{bb1220=0 ,bb1232,bb1250,bb1262, bb1731}bb1233;bba bbi{bbf bb1120;bbd bb1070;bbf bb1360;}bb474; #pragma pack(pop) #pragma pack(push, 8) bb9{bb1135=-5000 ,bb1104=-4000 ,bb997=-4999 ,bb988=-4998 ,bb1011=-4997 , bb981=-4996 ,bb1141=-4995 ,bb1087=-4994 ,bb1096=-4993 ,bb1024=-4992 , bb1031=-4991 };bb4 bb1129(bb4 bb1133,bbd bb1116,bbl*bb1100);bba bbi{ bb199 bb180;bbd bb1194;bbd bb1084;bbd bb1370;bbd bb1081;bbd bb1239; bbd bb1279;bbd bb1256;bbd bb1238;bbd bb1248;bbd bb1281;bbd bb1249;bbu bb1221;bb43 bb568,bb1172,bb1170;bbf bb371[6 ];}bb160;bba bbi bb478{bbi bb478*bb94;bbf bb102;bbk bb1275;bbk bb1276;bbk bb1271;bbk bb1274;} bb430;bba bbi bb787{bbi bb787*bb94;bbi bb478*bb1095;bbd bb26;bbf bb371 [6 ];}bb411;bba bb9{bb1146=0 ,bb1730,bb1037,bb1017,bb1007}bb204;bba bbi {bbd bb382;bbd bb448;bbd bb513;bb402*bb912;bb96 bb980;}bb302;bba bbi{ bb474*bb457;bb411*bb1142;bbd bb584;bb430*bb542;bb96 bb601;bbq bb1118; bbq bb548;bb160*bb508;bbu bb1269;bbk bb1157;bbk bb1106;bb302 bb1041;} bb32, *bb1607; #pragma pack(pop) bba bbi bb967 bb1334, *bb78;bba bbi bb838{bbi bb838*bb323;bb1 bb466; bbq bb565;bbd bb26;bbk bb440;bbq bb92;bb1 bb315;bbq bb442;bb1 bb546; bbq bb543;bb1 bb1498;bb100 bb1362;bbf bb1307[6 ];bb100 bb964;bb100 bb1140;bb100 bb521;bb100 bb533;}bb174, *bb87;bba bbi bb870{bbi bb870* bb94;bb174*bb323;bbd bb26;bbk bb537;bbk bb1468;bbq bb1442;bbq bb1517; bbk bb1428;}bb1455, *bb459;bbu bb1284(bb32* *bb1215);bbb bb1283(bb32* bbj);bb204 bb1267(bb32*bb109,bb376 bb451,bb312 bb138,bb341 bb413, bb318 bb200);bb204 bb1246(bb32*bb109,bb376 bb451,bb312 bb138,bb341 bb413,bb318 bb200);bb204 bb1254(bb32*bb109,bb174*bb48,bb78 bb76); bb204 bb1234(bb32*bb109,bb174*bb48,bb78 bb76);bb4 bb1244(bb32*bb109, bb174*bb48,bbd*bb103);bb4 bb1153(bb78 bb76,bb32*bb109,bb174*bb48, bb160*bb316,bbu bb594,bbu bb947);bbu bb1867(bbd bb294);bb160*bb1791( bb32*bbj,bbd bb294,bbu bb594);bb160*bb1844(bb32*bbj,bbd bb294,bbd bb103);bb160*bb1918(bb32*bbj,bb178 bb103);bbb bb1938(bb507*bb39); bb160*bb1935(bb32*bbj,bb199*bb180);bbb bb1880(bb32*bbj,bb178 bb103); bbb bb1859(bb32*bbj,bb178 bb103);bbb bb1979(bb32*bbj);bbb bb1797(bb32 *bbj);bbb bb1909(bb32*bbj,bbd bb294,bbh bbf bb1185[6 ]);bbu bb1923( bb32*bbj,bbd bb294,bb411*bb428);bbb bb2035(bb32*bbj);bbb bb2003(bb32* bbj,bbd bb294,bbh bbf bb1185[6 ],bbf bb102,bbk bb410,bbk bb412);bbu bb2011(bb32*bbj,bbd bb294,bbf bb102,bbk bb410,bbk bb412);bbu bb1853( bb32*bbj,bbf bb102,bbk bb410,bbk bb412);bbb bb1986(bb32*bbj,bb430* bb542,bbq bb584);bbu bb1870(bb302*bbj,bbq bb513);bbb bb1848(bb302*bbj );bbb bb1960(bb302*bbj);bbu bb1792(bb302*bbj,bb402*bb619);bbu bb1931( bb302*bbj,bb402*bb619);bbb bb1933(bb32*bbj,bb178 bb103);bbb bb1868( bb32*bbj,bb178 bb103);bbb bb1818(bb32*bbj,bbd bb26,bbd bb948);bb4 bb1780(bb32*bbj,bb474*bb457);bbb bb2029(bb32*bbj); #ifdef UNDER_CE #define bb1915 64 #endif bba bbi bb1886{bb121 bb1908;bb121 bb1919;bb32*bb971;}bb1044, *bb1889; bbr bb1044 bb949;bbi bb967{bb121 bb1881;bbq bb1891;bbd bb1952;bb87 bb998;bb87 bb1934;bb87 bb1858;bb87 bb1890;bb87 bb1937;bb459 bb1857; bb459 bb1951;bb459 bb1901;bb96 bb1124;bb100 bb1902;bb100 bb1946;bb100 bb1925;bb121 bb1948;bb121 bb1869;};bbr bb78 bb1956;bbr bb96 bb1912; bbd bb1862(bbb*bb518,bbb*bb1878,bb161*bb1117);bb161 bb1944(bb121 bb1955,bb121 bb1906,bb77 bb546,bbq bb543,bb77 bb1111,bbq bb1102,bbq bb1145); #ifdef UNDER_CE #define bb591 16 #define bb1125 32 #else #define bb591 128 #define bb1125 256 #endif #define bb1105 bb591 *2 #define bb563 ( bb1105 * 2) #define bb1900 bb563 * 2 #define bb1860 bb563 * 2 bbr bbq bb958;bb161 bb1784(bb60 bb959,bbb*bb39,bbq bb1089,bb122 bb1681 );bb140 bb1921(IN bb78 bb76,IN bb121 bb1916,IN bb1 bb546,IN bbq bb543 ,IN bb77 bb1111,IN bbq bb1102,IN bbq bb1145);bb140 bb1903(IN bb78 bb76 );bbd bb1898(bb77 bb518,bb121 bb1917,bb77 bb1907,bbq bb1958,bb77 bb1854,bbq bb1850,bbq bb1910,bb161*bb1117);bbb bb1230(bb78 bb76,bb87* bb540,bb87 bb48);bb87 bb1264(bb78 bb76,bb87*bb540);bbu bb1783(bb78 bb76);bbb bb1794(bb78 bb76);bb87 bb1465(bb173 bb359,bb78 bb76);bb87 bb1828(bb173 bb359,bb78 bb76);bb87 bb1775(bb173 bb359,bb78 bb76); bb140 bb1664(bb78 bb76,bb87 bb48);bb140 bb1799(bb78 bb76,bb87 bb48); bb140 bb1845(bb78 bb76,bb87 bb48);bbb bb1913();bbb bb1877();bbb bb157 (bbh bbl*bb19,...);bbb bb1999(bb187 bbg);bbb bb2034(bbb*bb27,bbq bb11 );bbb bb1379(bbb*bb5,bbq bb10);bbb bb1810(bbb*bb5,bbq bb10);bbb bb1332 (bbb*bb5,bbq bb10);bbb bb1896();bbb bb1747();bbb bb1976(bb366*bb1920); bbb bb1574(bb326*bb27);bbb bb1263(bb326*bb880,bb487*bb150);bbb bb1481 (bb326*bb880,bb416*bb1609);bbi bb2359{bbd bb1287;bbd bb92;bbd bb469; bbe bb372;bbe bb537;bbf*bb39;bbi bb2359*bb94;};bbi bb2529{bbd bb414; bbd bb469;bbd bb2617;bbi bb2529*bb2616;bbi bb2359*bb2615;bbf*bb39;}; #ifdef _WIN32 bbe bb2547(bbi bb1895*bb295); #else bbe bb2547(bbi bb2065*bb471); #endif bb2570("");bb2620("\x61\x68\x6f\x70\x65");bb1044 bb949={0 };bb41 bb1334 bb518={0 };bbl bb2396[20 ];bb41 bb6 bb2478(bbi bb25*bb19,bbs bbe bbo, bbs bb6 bb2515){bbd bb959;bb1198 bb97, *bb2384=(bb1198* )bb2515;bbf* bb39;bb139(bb2532(&bb97,bb2384,bb12(bb1198))==0 );bbm(bb97.bb1144>2048 ){bb157("\x6d\x2e\x6f\x75\x74\x53\x69\x7a\x65\x20\x3e\x20\x32\x30\x34" "\x38\n");bb157("\x6d\x2e\x6f\x75\x74\x53\x69\x7a\x65\x3a\x20\x25\x64" "\n",bb97.bb1144);bb2-1 ;}bbm(bb97.bb1319!=4 ){bb157("\x6d\x2e\x69\x6e" "\x53\x69\x7a\x65\x20\x21\x3d\x20\x34\n");bb2-2 ;}bbm(bbo!=bb1364){ bb157("\x63\x20\x21\x3d\x20\x49\x4f\x43\x54\x4c\x5f\x56\x50\x4e\n"); bb2-3 ;}bb39=bb128(bb97.bb1144);bbm(bb39==bb91){bb157("\x62\x75\x66" "\x20\x3d\x3d\x20\x4e\x55\x4c\x4c\n");bb2-4 ;}bb139(bb2633(bb959,(bb28 )bb97.bb1367)==0 );bb139(bb2532(bb39,bb97.bb1240,bb97.bb1144)==0 ); bb134(&bb518.bb1124); #ifdef _DEBUG bbm(bb959==0xFF010212 ){bb157("\x62\x65\x66\x6f\x72\x65\x2c\x20\x4f" "\x49\x44\x5f\x50\x47\x50\x5f\x53\x48\x55\x54\x44\x4f\x57\x4e"); bb1747();} #endif bbm(bb959==0xFF010216 ){bb2589(bb2396,bb39);}bbm(bb1784(bb959,bb39, bb97.bb1144,&bb97.bb448)!=0 ){bb105(bb39);bb132(&bb518.bb1124);bb2-4 ;} #ifdef _DEBUG bbm(bb959==0xFF010205 )bb1896();bbm(bb959==0xFF010207 )bb1747();bbm( bb959==0xFF010212 ){bb157("\x61\x66\x74\x65\x72\x2c\x20\x4f\x49\x44" "\x5f\x50\x47\x50\x5f\x53\x48\x55\x54\x44\x4f\x57\x4e");bb1747();} #endif bb132(&bb518.bb1124);bb139(bb2626(bb97.bb1240,bb39,bb97.bb448)==0 ); bb139(bb2632(bb97.bb448,&bb2384->bb448)==0 );bb105(bb39);bb2 0 ;}bb41 bbe bb2520(bbi bb2230*bb2230,bbi bb25*bb19){bb2 0 ;}bb41 bbe bb2521( bbi bb2230*bb2230,bbi bb25*bb19){bb2 0 ;}bb41 bbi bb2594 bb2436={. bb2591=bb2478,.bb2631=bb2520,.bb2608=bb2521};bb41 bbi bb2585 bb2333={ .bb2618=bb2630,.bb34=bb614,.bb2639=&bb2436,};bbs bbl*bb2398(bbi bb2065 *bb471,bbs bbe bb22){bbs bbl*bb2545=bb2574(bb471);bb471->bb2463+= bb22;bb471->bb22+=bb22;bbm(bb2575(bb471->bb2463>bb471->bb444))bb157("" "\x73\x6b\x62\x5f\x70\x75\x74\x2c\x20\x74\x61\x69\x6c\x20\x3e\x20\x65" "\x6e\x64\n");bb2 bb2545;}bba bbi{bb60 bb2272;bb60 bb1813;bb60 bb2303 ;}bb2549;bb41 bb2549 bb1829;bb41 bbq bb2447(bbq bb2258,bbi bb2065* bb471,bbh bbi bb2122*bb2538,bbh bbi bb2122*bb439,bbe( *bb2475)(bbi bb2065* )){bbq bb515=bb2346;bb204 bb1160;bbi bb1895*bb295;bb366 bb553 ={{0 },{0 },0 };bb326 bb423;bb341 bb1255=bb91;bb318 bb975=bb91;bb416 bb1243;bb487 bb1217;bb174*bb403=bb91;bb553.bb373=bb951;bb295=(bbi bb1895* )bb2068(bb471);bbm(!bb295){bb515=bb1270;bb107 bb164;}bb81(& bb423,bb295,bb12(bb423));bbm(bb423.bb292==2 ){bb515=bb1270;bb107 bb164 ;}bbm(bb423.bb292==6 ){bbm(!(bb295+bb295->bb1287*4 )){bb515=bb1270; bb107 bb164;}bb81(&bb1217,(bb1)bb295+bb295->bb1287*4 ,bb12(bb1217)); bb975=&bb1217; #ifdef _DEBUG bb157("\x69\x6e\x63\x6f\x6d\x69\x6e\x67\x2c\x20");bb1263(&bb423,bb975 ); #endif }bb54 bbm(bb423.bb292==17 ){bbm(!(bb295+bb295->bb1287*4 )){bb515=bb1270 ;bb107 bb164;}bb81(&bb1243,(bb1)bb295+bb295->bb1287*4 ,bb12(bb1243)); bb1255=&bb1243; #ifdef _DEBUG bbm(1 ){bb157("\x69\x6e\x63\x6f\x6d\x69\x6e\x67\x2c\x20");bb1481(& bb423,bb1255);}bb54{bbm(bb1243.bb288!=4500 &&bb1243.bb288!=bb53(4500 )){ bb157("\x6c\x6f\x63\x61\x6c\x20\x69\x6e\x2c\x20\x75\x64\x70\n");}} #endif }bb1160=bb1246(bb949.bb971,&bb553,&bb423,bb1255,bb975); #ifdef _DEBUG bb157("\x70\x6d\x73\x3a\x25\x64\x2c\x20\x69\x6e\x63\x6f\x6d\x69\x6e" "\x67\x20\x69\x70\x2c\x20",bb1160);bb1574(&bb423); #endif bbm(bb1160==bb1007){bb515=bb1270;bb107 bb164;}bbm(bb1160!=bb1017)bb107 bb164;{bbq bb361;bb161 bbg;bbe bb1178;bb403=bb1465(&bbg,&bb518);bbm(! bb403&&bb403->bb466&&bb403->bb315){bb157("\x63\x61\x6e\x27\x74\x20" "\x61\x6c\x6c\x6f\x63\x20\x70\x61\x63\x6b\x65\x74\n");bb107 bb164;} bb361=bb196(bb295->bb2514);bb81(bb403->bb466,&bb553,bb12(bb553));bb81 (bb403->bb466+bb12(bb553),bb295,bb361);bb81(bb403->bb315,&bb553,bb12( bb553));bb403->bb565=bb12(bb553)+bb361;bb403->bb964=1 ;bb403->bb26= bb499(bb295->bb2579);bb403->bb440=bb1255?bb1255->bb427:0 ;bb1160= bb1234(bb949.bb971,bb403,&bb518);bbm(bb1160!=bb1146)bb107 bb164; #ifdef _DEBUG {bbi bb1895*bb1704;bb326 bb1957;bb341 bb1243=bb91;bb318 bb1217=bb91; bb416 bb2255;bb487 bb2194;bb1704=(bbi bb1895* )(bb403->bb315+bb12( bb553));bb81(&bb1957,bb1704,bb12(bb1957));bb157("\x69\x6e\x63\x6f\x6d" "\x69\x6e\x67\x20\x78\x66\x6f\x72\x6d\x42\x6c\x6f\x63\x6b\x2c\x20"); bb1574(&bb1957);bbm(bb1957.bb292==6 ){bb81(&bb2194,(bb1)bb1704+bb1704 ->bb1287*4 ,bb12(bb2194));bb1217=&bb2194;bb157("\x69\x6e\x63\x6f\x6d" "\x69\x6e\x67\x20\x78\x66\x6f\x72\x6d\x42\x6c\x6f\x63\x6b\x20\x74\x63" "\x70\x2c\x20");bb1263(&bb1957,bb1217);bb1379(bb1704,bb403->bb442- bb12(bb553));}bb54 bbm(bb1957.bb292==17 ){bbm(!(bb295+bb295->bb1287*4 )){ bb515=bb1270;bb107 bb164;}bb81(&bb2255,(bb1)bb1704+bb1704->bb1287*4 , bb12(bb2255));bb1243=&bb2255;bb157("\x69\x6e\x63\x6f\x6d\x69\x6e\x67" "\x20\x78\x66\x6f\x72\x6d\x42\x6c\x6f\x63\x6b\x20\x75\x64\x70\x2c\x20" );bb1481(&bb1957,bb1243);bb1379(bb1704,bb403->bb442-bb12(bb553));}} #endif bb1178=bb403->bb442-bb403->bb565;bb361=bb403->bb442-bb12(bb553);bbm( bb1178>0 ){bb31(bb2537(bb471)==0 );bbm(bb2541(bb471,0 ,bb1178,bb142)!=0 ){ bb157("\x63\x61\x6e\x27\x74\x20\x65\x78\x70\x61\x6e\x64\x20\x73\x6b" "\x62\n");bb107 bb164;}bb2398(bb471,bb1178);}bb54{bbe bb2053=bb471-> bb22+bb1178;bbm(bb1178<0 )bb2445(bb471,bb2053);bbm(!bb2498(bb471, bb2053)){bb157("\x63\x61\x6e\x27\x74\x20\x6d\x61\x6b\x65\x20\x73\x6b" "\x62\x20\x77\x72\x69\x74\x61\x62\x6c\x65\n");bb107 bb164;}}bb81( bb2068(bb471),bb403->bb315+bb12(bb553),bb361);bb295=(bbi bb1895* )bb2068 (bb471);bb81(&bb423,bb295,bb12(bb423));bbm(bb423.bb292==6 ){bb81(& bb1217,(bb1)bb295+bb295->bb1287*4 ,bb12(bb1217));bb975=&bb1217;bb1829. bb2272=bb423.bb310;bb1829.bb1813=bb975->bb592;bb1829.bb2303=bb975-> bb918;}bbm(bb1064(bb2068(bb471)->bb2592)){bb157("\x66\x72\x61\x67\x6d" "\x65\x6e\x74\n");bb2600(bb471);bb2581(bb471);bb2576(bb471);bb515= bb2577;bb107 bb164;}}bb515=bb1270;bb164:bbm(bb403)bb1664(&bb518,bb403 );bb2 bb515;}bb41 bbq bb2525(bbq bb2258,bbi bb2065*bb471,bbh bbi bb2122*bb2538,bbh bbi bb2122*bb439,bbe( *bb2475)(bbi bb2065* )){bbq bb515=bb2346;bb204 bb1160;bbi bb2122*bb2146;bbi bb1895*bb295;bb366 bb553={{0 },{0 },0 };bb326 bb423;bb341 bb1255=bb91;bb318 bb975=bb91; bb416 bb1243;bb487 bb1217;bb174*bb403=bb91;bb553.bb373=bb951;bb295=( bbi bb1895* )bb2068(bb471);bbm(!bb295){bb515=bb1270;bb107 bb164;}bb81 (&bb423,bb295,bb12(bb423));bb2146=bb2621(bb471)->bb2146;bbm(!bb2146|| bb2593(bb2146->bb34,bb2396)!=0 ){bb515=bb1270;bb107 bb164;}bbm(bb1867( bb295->bb2211))bb2 bb2346;bbm(bb423.bb292==2 ){bb515=bb1270;bb107 bb164 ;}bbm(bb423.bb292==6 ){bbe bb2352=bb295->bb1287*4 ;bb318 bb2175=(bb318)( (bb1)bb295+bb2352);bb81(&bb1217,bb2175,bb12(bb1217));bb975=&bb1217; #ifdef _DEBUG bb157("\x6f\x75\x74\x67\x6f\x69\x6e\x67\x2c\x20");bb1263(&bb423,bb975 ); #endif {bb32*bb109=bb949.bb971;bbm(bb109->bb548>0 &&bb295->bb2211==bb109-> bb508[0 ].bb180.bb26&&bb975->bb549==bb1829.bb2303&&bb975->bb288== bb1829.bb1813){bbq bb2139=bb196(bb423.bb361)-bb2352;bbf*bb39=bb128( bb12(bb612)+bb2139);bb569 bb559=(bb569)bb39;bb1829.bb2303=0 ;bb559-> bb310=bb423.bb310;bb559->bb203=bb1829.bb2272;bb559->bb910=0 ;bb559-> bb292=6 ;bb559->bb919=bb53(bb2139);bb295->bb2211=bb1829.bb2272;bb975-> bb319=0 ;bb81(&bb2175->bb319,&bb975->bb319,2 );bb81(bb39+bb12(bb612), bb2175,bb2139);bb975->bb319=bb886(bb39,bb12(bb612)+bb2139);bb81(& bb2175->bb319,&bb975->bb319,2 ); #ifdef _DEBUG bb157("\x6e\x65\x77\x20\x63\x68\x65\x63\x6b\x73\x75\x6d\x3a\x25\x30" "\x34\x78\n",bb53(bb975->bb319)); #endif bb105(bb39);bb2572(bb295);bb81(&bb423,bb295,bb12(bb423));}}}bb54 bbm( bb423.bb292==17 ){bbm(!(bb295+bb295->bb1287*4 )){bb515=bb1270;bb107 bb164;}bb81(&bb1243,(bb1)bb295+bb295->bb1287*4 ,bb12(bb1243));bb1255=& bb1243; #ifdef _DEBUG bb157("\x6f\x75\x74\x67\x6f\x69\x6e\x67\x2c\x20\n");bb1481(&bb423, bb1255); #endif }bb1160=bb1267(bb949.bb971,&bb553,&bb423,bb1255,bb975); #ifdef _DEBUG bb157("\x70\x6d\x73\x3a\x25\x64\x2c\x20\x6f\x75\x74\x67\x6f\x69\x6e" "\x67\x20\x69\x70\x2c\x20",bb1160);bb1574(&bb423); #endif bbm(bb1160==bb1007){bb515=bb1270;bb107 bb164;}bbm(bb1160!=bb1017)bb107 bb164;{bbq bb361;bb161 bbg;bbe bb1178;bb403=bb1465(&bbg,&bb518);bbm(! bb403&&bb403->bb466&&bb403->bb315){bb157("\x63\x61\x6e\x27\x74\x20" "\x61\x6c\x6c\x6f\x63\x20\x70\x61\x63\x6b\x65\x74\n");bb107 bb164;} bb361=bb196(bb295->bb2514);bb81(bb403->bb466,&bb553,bb12(bb553));bb81 (bb403->bb466+bb12(bb553),bb295,bb361);bb81(bb403->bb315,&bb553,bb12( bb553)); #ifdef _DEBUG bbm(bb423.bb292==6 ){bb157("\x6f\x75\x74\x67\x6f\x69\x6e\x67\x20\x73" "\x72\x63\x42\x6c\x6f\x63\x6b\x20\x74\x63\x70\x2c\x20");bb1263(&bb423 ,bb975);bb1379(bb295,bb361);}bb54 bbm(bb423.bb292==17 ){bb157("\x6f" "\x75\x74\x67\x6f\x69\x6e\x67\x20\x73\x72\x63\x42\x6c\x6f\x63\x6b\x20" "\x75\x64\x70\x2c\x20");bb1481(&bb423,bb1255);bb1379(bb295,bb361);} #endif bb403->bb565=bb12(bb553)+bb361;bb403->bb964=1 ;bb403->bb26=bb499(bb295 ->bb2211);bb403->bb440=bb1255?bb1255->bb427:0 ;bb1160=bb1254(bb949. bb971,bb403,&bb518);bb157("\x50\x4d\x44\x6f\x54\x72\x61\x6e\x73\x66" "\x6f\x72\x6d\x4f\x75\x74\x67\x6f\x69\x6e\x67\x2c\x20\x70\x6d\x73\x3a" "\x25\x64\n",bb1160);bbm(bb1160!=bb1146)bb107 bb164; #ifdef _DEBUG {bb326 bb423;bb81(&bb423,bb403->bb315+bb12(bb553),bb12(bb423));bb157("" "\x6f\x75\x74\x67\x6f\x69\x6e\x67\x20\x78\x66\x6f\x72\x6d\x42\x6c\x6f" "\x63\x6b\x2c\x20");bb1574(&bb423);} #endif bb1178=bb403->bb442-bb403->bb565;bb361=bb403->bb442-bb12(bb553);bbm( bb1178>0 ){bb31(bb2537(bb471)==0 );bbm(bb2541(bb471,0 ,bb1178,bb142)!=0 ){ bb157("\x63\x61\x6e\x27\x74\x20\x65\x78\x70\x61\x6e\x64\x20\x73\x6b" "\x62\n");bb107 bb164;}bb2398(bb471,bb1178);}bb54{bbe bb2053=bb471-> bb22+bb1178;bbm(bb1178<0 )bb2445(bb471,bb2053);bbm(!bb2498(bb471, bb2053)){bb157("\x63\x61\x6e\x27\x74\x20\x6d\x61\x6b\x65\x20\x73\x6b" "\x62\x20\x77\x72\x69\x74\x61\x62\x6c\x65\n");bb107 bb164;}}bb81( bb2068(bb471),bb403->bb315+bb12(bb553),bb361); #ifdef _DEBUG bb157("\x73\x6b\x62\x2d\x3e\x69\x70\x5f\x73\x75\x6d\x6d\x65\x64\x3a" "\x25\x64\n",bb471->bb2559); #endif bb471->bb2559=bb2571;bb2623(bb471,bb2613);}bb515=bb1270;bb164:bbm( bb403)bb1664(&bb518,bb403);bb2 bb515;}bb41 bbi bb2627 bb2434={.bb2455 =bb2447,.bb2519=bb2453,.bb2258=bb2587,.bb2551=bb2524},bb2409={.bb2455 =bb2525,.bb2519=bb2453,.bb2258=bb2568,.bb2551=bb2524};bb41 bbe bb2452 (bbl*bb1069,bbl* *bb2237,bbl* *bb2205,bbe bb2351){bbi bb2583*bb2210; bb2625 bb2503=(bb2351==bb2602)?bb142:bb2622;bb1768("\x63\x61\x6c\x6c" "\x5f\x75\x73\x65\x72\x6d\x6f\x64\x65\x68\x65\x6c\x70\x65\x72\x5f\x2c" "\x20\x70\x61\x74\x68\x3a\x20\x25\x73\n",bb1069);bb2210=bb2635(bb1069 ,bb2237,bb2205,bb2503);bbm(bb2210==bb91)bb2-bb2614;bb2 bb2647(bb2210, bb2351);}bbe bb2641(){bbe bb35; #ifdef _DEBUG bb1913(); #endif bb141(&bb518.bb1124);bbm(!bb1284(&bb949.bb971)){bb157("\x63\x61\x6e" "\x27\x74\x20\x69\x6e\x69\x74\x20\x70\x6d\n");bb2-1 ;}{bb474 bbo={0 }; bbo.bb1120=1 ;bb1780(bb949.bb971,&bbo);}bb958=1480 ;bbm(!bb1783(&bb518)){ bb1768("\x63\x61\x6e\x27\x74\x20\x61\x6c\x6c\x6f\x63\x20\x70\x6b\x74" "\x20\x70\x6f\x6f\x6c\n");bb2-2 ;}bb35=bb2590(&bb2333);bbm(bb35!=0 ){ bb1768("\x63\x61\x6e\x27\x74\x20\x72\x65\x67\x20\x6d\x69\x73\x63\x20" "\x64\x65\x76\x2c\x20\x20\x25\x64\n",bb35);bb2-3 ;}bb157("\x73\x75\x63" "\x63\x65\x65\x64\x20\x69\x6e\x20\x72\x65\x67\x69\x73\x74\x65\x72\x69" "\x6e\x67\x20\x6d\x69\x73\x63\n");bb35=bb2450(&bb2434);bbm(bb35!=0 ){ bb1768("\x63\x61\x6e\x27\x74\x20\x72\x65\x67\x20\x68\x6f\x6f\x6b\x20" "\x69\x6e\x2c\x20\x25\x64\n",bb35);bb2-4 ;}bb157("\x73\x75\x63\x63\x65" "\x65\x64\x20\x69\x6e\x20\x72\x65\x67\x69\x73\x74\x65\x72\x69\x6e\x67" "\x20\x74\x68\x65\x20\x68\x6f\x6f\x6b\x5f\x69\x6e\n");bb35=bb2450(& bb2409);bbm(bb35!=0 ){bb1768("\x63\x61\x6e\x27\x74\x20\x72\x65\x67\x20" "\x68\x6f\x6f\x6b\x20\x6f\x75\x74\x2c\x20\x25\x64\n",bb35);bb2-5 ;} bb157("\x73\x75\x63\x63\x65\x65\x64\x20\x69\x6e\x20\x72\x65\x67\x69" "\x73\x74\x65\x72\x69\x6e\x67\x20\x74\x68\x65\x20\x68\x6f\x6f\x6b\x5f" "\x6f\x75\x74\n");{bbl*bb2237[]={"\x2f\x73\x79\x73\x74\x65\x6d\x2f" "\x62\x69\x6e\x2f\x63\x68\x6f\x77\x6e","\x73\x79\x73\x74\x65\x6d\x2e" "\x73\x79\x73\x74\x65\x6d",bb468,bb91};bb41 bbl*bb2205[]={"\x48\x4f" "\x4d\x45\x3d\x2f","\x54\x45\x52\x4d\x3d\x6c\x69\x6e\x75\x78","\x50" "\x41\x54\x48\x3d\x2f\x73\x79\x73\x74\x65\x6d\x2f\x62\x69\x6e",bb91}; bb1768("\x69\x6e\x69\x74\x5f\x6d\x6f\x64\x75\x6c\x65\x2c\x20\x63\x61" "\x6c\x6c\x5f\x75\x73\x65\x72\x6d\x6f\x64\x65\x68\x65\x6c\x70\x65\x72" "\x5f\x3a\x20\x25\x64\n",bb2452("\x2f\x73\x79\x73\x74\x65\x6d\x2f\x62" "\x69\x6e\x2f\x74\x6f\x6f\x6c\x62\x6f\x78",bb2237,bb2205,1 ));}bb2 0 ;} bbb bb2642(){bb2481(&bb2434);bb2481(&bb2409);bb2611(&bb2333);bb1794(& bb518);bb1283(bb949.bb971); #ifdef _DEBUG bb1877(); #endif }
gpl-2.0
Satius/pia-linux-kernel
drivers/sbus/char/uctrl.c
218
10993
/* uctrl.c: TS102 Microcontroller interface on Tadpole Sparcbook 3 * * Copyright 1999 Derrick J Brashear (shadow@dementia.org) * Copyright 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/system.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/pgtable.h> #define UCTRL_MINOR 174 #define DEBUG 1 #ifdef DEBUG #define dprintk(x) printk x #else #define dprintk(x) #endif struct uctrl_regs { u32 uctrl_intr; u32 uctrl_data; u32 uctrl_stat; u32 uctrl_xxx[5]; }; struct ts102_regs { u32 card_a_intr; u32 card_a_stat; u32 card_a_ctrl; u32 card_a_xxx; u32 card_b_intr; u32 card_b_stat; u32 card_b_ctrl; u32 card_b_xxx; u32 uctrl_intr; u32 uctrl_data; u32 uctrl_stat; u32 uctrl_xxx; u32 ts102_xxx[4]; }; /* Bits for uctrl_intr register */ #define UCTRL_INTR_TXE_REQ 0x01 /* transmit FIFO empty int req */ #define UCTRL_INTR_TXNF_REQ 0x02 /* transmit FIFO not full int req */ #define UCTRL_INTR_RXNE_REQ 0x04 /* receive FIFO not empty int req */ #define UCTRL_INTR_RXO_REQ 0x08 /* receive FIFO overflow int req */ #define UCTRL_INTR_TXE_MSK 0x10 /* transmit FIFO empty mask */ #define UCTRL_INTR_TXNF_MSK 0x20 /* transmit FIFO not full mask */ #define UCTRL_INTR_RXNE_MSK 0x40 /* receive FIFO not empty mask */ #define UCTRL_INTR_RXO_MSK 0x80 /* receive FIFO overflow mask */ /* Bits for uctrl_stat register */ #define UCTRL_STAT_TXE_STA 0x01 /* transmit FIFO empty status */ #define UCTRL_STAT_TXNF_STA 0x02 /* transmit FIFO not full status */ #define UCTRL_STAT_RXNE_STA 0x04 /* receive FIFO not empty status */ #define UCTRL_STAT_RXO_STA 0x08 /* receive FIFO overflow status */ static DEFINE_MUTEX(uctrl_mutex); static const char *uctrl_extstatus[16] = { "main power available", "internal battery attached", "external battery attached", "external VGA attached", "external keyboard attached", "external mouse attached", "lid down", "internal battery currently charging", "external battery currently charging", "internal battery currently discharging", "external battery currently discharging", }; /* Everything required for one transaction with the uctrl */ struct uctrl_txn { u8 opcode; u8 inbits; u8 outbits; u8 *inbuf; u8 *outbuf; }; struct uctrl_status { u8 current_temp; /* 0x07 */ u8 reset_status; /* 0x0b */ u16 event_status; /* 0x0c */ u16 error_status; /* 0x10 */ u16 external_status; /* 0x11, 0x1b */ u8 internal_charge; /* 0x18 */ u8 external_charge; /* 0x19 */ u16 control_lcd; /* 0x20 */ u8 control_bitport; /* 0x21 */ u8 speaker_volume; /* 0x23 */ u8 control_tft_brightness; /* 0x24 */ u8 control_kbd_repeat_delay; /* 0x28 */ u8 control_kbd_repeat_period; /* 0x29 */ u8 control_screen_contrast; /* 0x2F */ }; enum uctrl_opcode { READ_SERIAL_NUMBER=0x1, READ_ETHERNET_ADDRESS=0x2, READ_HARDWARE_VERSION=0x3, READ_MICROCONTROLLER_VERSION=0x4, READ_MAX_TEMPERATURE=0x5, READ_MIN_TEMPERATURE=0x6, READ_CURRENT_TEMPERATURE=0x7, READ_SYSTEM_VARIANT=0x8, READ_POWERON_CYCLES=0x9, READ_POWERON_SECONDS=0xA, READ_RESET_STATUS=0xB, READ_EVENT_STATUS=0xC, READ_REAL_TIME_CLOCK=0xD, READ_EXTERNAL_VGA_PORT=0xE, READ_MICROCONTROLLER_ROM_CHECKSUM=0xF, READ_ERROR_STATUS=0x10, READ_EXTERNAL_STATUS=0x11, READ_USER_CONFIGURATION_AREA=0x12, READ_MICROCONTROLLER_VOLTAGE=0x13, READ_INTERNAL_BATTERY_VOLTAGE=0x14, READ_DCIN_VOLTAGE=0x15, READ_HORIZONTAL_POINTER_VOLTAGE=0x16, READ_VERTICAL_POINTER_VOLTAGE=0x17, READ_INTERNAL_BATTERY_CHARGE_LEVEL=0x18, READ_EXTERNAL_BATTERY_CHARGE_LEVEL=0x19, READ_REAL_TIME_CLOCK_ALARM=0x1A, READ_EVENT_STATUS_NO_RESET=0x1B, READ_INTERNAL_KEYBOARD_LAYOUT=0x1C, READ_EXTERNAL_KEYBOARD_LAYOUT=0x1D, READ_EEPROM_STATUS=0x1E, CONTROL_LCD=0x20, CONTROL_BITPORT=0x21, SPEAKER_VOLUME=0x23, CONTROL_TFT_BRIGHTNESS=0x24, CONTROL_WATCHDOG=0x25, CONTROL_FACTORY_EEPROM_AREA=0x26, CONTROL_KBD_TIME_UNTIL_REPEAT=0x28, CONTROL_KBD_TIME_BETWEEN_REPEATS=0x29, CONTROL_TIMEZONE=0x2A, CONTROL_MARK_SPACE_RATIO=0x2B, CONTROL_DIAGNOSTIC_MODE=0x2E, CONTROL_SCREEN_CONTRAST=0x2F, RING_BELL=0x30, SET_DIAGNOSTIC_STATUS=0x32, CLEAR_KEY_COMBINATION_TABLE=0x33, PERFORM_SOFTWARE_RESET=0x34, SET_REAL_TIME_CLOCK=0x35, RECALIBRATE_POINTING_STICK=0x36, SET_BELL_FREQUENCY=0x37, SET_INTERNAL_BATTERY_CHARGE_RATE=0x39, SET_EXTERNAL_BATTERY_CHARGE_RATE=0x3A, SET_REAL_TIME_CLOCK_ALARM=0x3B, READ_EEPROM=0x40, WRITE_EEPROM=0x41, WRITE_TO_STATUS_DISPLAY=0x42, DEFINE_SPECIAL_CHARACTER=0x43, DEFINE_KEY_COMBINATION_ENTRY=0x50, DEFINE_STRING_TABLE_ENTRY=0x51, DEFINE_STATUS_SCREEN_DISPLAY=0x52, PERFORM_EMU_COMMANDS=0x64, READ_EMU_REGISTER=0x65, WRITE_EMU_REGISTER=0x66, READ_EMU_RAM=0x67, WRITE_EMU_RAM=0x68, READ_BQ_REGISTER=0x69, WRITE_BQ_REGISTER=0x6A, SET_USER_PASSWORD=0x70, VERIFY_USER_PASSWORD=0x71, GET_SYSTEM_PASSWORD_KEY=0x72, VERIFY_SYSTEM_PASSWORD=0x73, POWER_OFF=0x82, POWER_RESTART=0x83, }; static struct uctrl_driver { struct uctrl_regs __iomem *regs; int irq; int pending; struct uctrl_status status; } *global_driver; static void uctrl_get_event_status(struct uctrl_driver *); static void uctrl_get_external_status(struct uctrl_driver *); static long uctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { default: return -EINVAL; } return 0; } static int uctrl_open(struct inode *inode, struct file *file) { mutex_lock(&uctrl_mutex); uctrl_get_event_status(global_driver); uctrl_get_external_status(global_driver); mutex_unlock(&uctrl_mutex); return 0; } static irqreturn_t uctrl_interrupt(int irq, void *dev_id) { return IRQ_HANDLED; } static const struct file_operations uctrl_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = uctrl_ioctl, .open = uctrl_open, }; static struct miscdevice uctrl_dev = { UCTRL_MINOR, "uctrl", &uctrl_fops }; /* Wait for space to write, then write to it */ #define WRITEUCTLDATA(value) \ { \ unsigned int i; \ for (i = 0; i < 10000; i++) { \ if (UCTRL_STAT_TXNF_STA & sbus_readl(&driver->regs->uctrl_stat)) \ break; \ } \ dprintk(("write data 0x%02x\n", value)); \ sbus_writel(value, &driver->regs->uctrl_data); \ } /* Wait for something to read, read it, then clear the bit */ #define READUCTLDATA(value) \ { \ unsigned int i; \ value = 0; \ for (i = 0; i < 10000; i++) { \ if ((UCTRL_STAT_RXNE_STA & sbus_readl(&driver->regs->uctrl_stat)) == 0) \ break; \ udelay(1); \ } \ value = sbus_readl(&driver->regs->uctrl_data); \ dprintk(("read data 0x%02x\n", value)); \ sbus_writel(UCTRL_STAT_RXNE_STA, &driver->regs->uctrl_stat); \ } static void uctrl_do_txn(struct uctrl_driver *driver, struct uctrl_txn *txn) { int stat, incnt, outcnt, bytecnt, intr; u32 byte; stat = sbus_readl(&driver->regs->uctrl_stat); intr = sbus_readl(&driver->regs->uctrl_intr); sbus_writel(stat, &driver->regs->uctrl_stat); dprintk(("interrupt stat 0x%x int 0x%x\n", stat, intr)); incnt = txn->inbits; outcnt = txn->outbits; byte = (txn->opcode << 8); WRITEUCTLDATA(byte); bytecnt = 0; while (incnt > 0) { byte = (txn->inbuf[bytecnt] << 8); WRITEUCTLDATA(byte); incnt--; bytecnt++; } /* Get the ack */ READUCTLDATA(byte); dprintk(("ack was %x\n", (byte >> 8))); bytecnt = 0; while (outcnt > 0) { READUCTLDATA(byte); txn->outbuf[bytecnt] = (byte >> 8); dprintk(("set byte to %02x\n", byte)); outcnt--; bytecnt++; } } static void uctrl_get_event_status(struct uctrl_driver *driver) { struct uctrl_txn txn; u8 outbits[2]; txn.opcode = READ_EVENT_STATUS; txn.inbits = 0; txn.outbits = 2; txn.inbuf = NULL; txn.outbuf = outbits; uctrl_do_txn(driver, &txn); dprintk(("bytes %x %x\n", (outbits[0] & 0xff), (outbits[1] & 0xff))); driver->status.event_status = ((outbits[0] & 0xff) << 8) | (outbits[1] & 0xff); dprintk(("ev is %x\n", driver->status.event_status)); } static void uctrl_get_external_status(struct uctrl_driver *driver) { struct uctrl_txn txn; u8 outbits[2]; int i, v; txn.opcode = READ_EXTERNAL_STATUS; txn.inbits = 0; txn.outbits = 2; txn.inbuf = NULL; txn.outbuf = outbits; uctrl_do_txn(driver, &txn); dprintk(("bytes %x %x\n", (outbits[0] & 0xff), (outbits[1] & 0xff))); driver->status.external_status = ((outbits[0] * 256) + (outbits[1])); dprintk(("ex is %x\n", driver->status.external_status)); v = driver->status.external_status; for (i = 0; v != 0; i++, v >>= 1) { if (v & 1) { dprintk(("%s%s", " ", uctrl_extstatus[i])); } } dprintk(("\n")); } static int __devinit uctrl_probe(struct platform_device *op) { struct uctrl_driver *p; int err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR "uctrl: Unable to allocate device struct.\n"); goto out; } p->regs = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), "uctrl"); if (!p->regs) { printk(KERN_ERR "uctrl: Unable to map registers.\n"); goto out_free; } p->irq = op->archdata.irqs[0]; err = request_irq(p->irq, uctrl_interrupt, 0, "uctrl", p); if (err) { printk(KERN_ERR "uctrl: Unable to register irq.\n"); goto out_iounmap; } err = misc_register(&uctrl_dev); if (err) { printk(KERN_ERR "uctrl: Unable to register misc device.\n"); goto out_free_irq; } sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr); printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n", op->dev.of_node->full_name, p->regs, p->irq); uctrl_get_event_status(p); uctrl_get_external_status(p); dev_set_drvdata(&op->dev, p); global_driver = p; out: return err; out_free_irq: free_irq(p->irq, p); out_iounmap: of_iounmap(&op->resource[0], p->regs, resource_size(&op->resource[0])); out_free: kfree(p); goto out; } static int __devexit uctrl_remove(struct platform_device *op) { struct uctrl_driver *p = dev_get_drvdata(&op->dev); if (p) { misc_deregister(&uctrl_dev); free_irq(p->irq, p); of_iounmap(&op->resource[0], p->regs, resource_size(&op->resource[0])); kfree(p); } return 0; } static const struct of_device_id uctrl_match[] = { { .name = "uctrl", }, {}, }; MODULE_DEVICE_TABLE(of, uctrl_match); static struct platform_driver uctrl_driver = { .driver = { .name = "uctrl", .owner = THIS_MODULE, .of_match_table = uctrl_match, }, .probe = uctrl_probe, .remove = __devexit_p(uctrl_remove), }; module_platform_driver(uctrl_driver); MODULE_LICENSE("GPL");
gpl-2.0