repo_name
string
path
string
copies
string
size
string
content
string
license
string
str90/RK3188_tablet_kernel_sources
sound/isa/gus/gus_uart.c
13164
8087
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for the GF1 MIDI interface - like UART 6850 * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/time.h> #include <sound/core.h> #include <sound/gus.h> static void snd_gf1_interrupt_midi_in(struct snd_gus_card * gus) { int count; unsigned char stat, data, byte; unsigned long flags; count = 10; while (count) { spin_lock_irqsave(&gus->uart_cmd_lock, flags); stat = snd_gf1_uart_stat(gus); if (!(stat & 0x01)) { /* data in Rx FIFO? */ spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); count--; continue; } count = 100; /* arm counter to new value */ data = snd_gf1_uart_get(gus); if (!(gus->gf1.uart_cmd & 0x80)) { spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); continue; } if (stat & 0x10) { /* framing error */ gus->gf1.uart_framing++; spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); continue; } byte = snd_gf1_uart_get(gus); spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); snd_rawmidi_receive(gus->midi_substream_input, &byte, 1); if (stat & 0x20) { gus->gf1.uart_overrun++; } } } static void snd_gf1_interrupt_midi_out(struct snd_gus_card * gus) { char byte; unsigned long flags; /* try unlock output */ if (snd_gf1_uart_stat(gus) & 0x01) snd_gf1_interrupt_midi_in(gus); spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (snd_gf1_uart_stat(gus) & 0x02) { /* Tx FIFO free? */ if (snd_rawmidi_transmit(gus->midi_substream_output, &byte, 1) != 1) { /* no other bytes or error */ snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd & ~0x20); /* disable Tx interrupt */ } else { snd_gf1_uart_put(gus, byte); } } spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); } static void snd_gf1_uart_reset(struct snd_gus_card * gus, int close) { snd_gf1_uart_cmd(gus, 0x03); /* reset */ if (!close && gus->uart_enable) { udelay(160); snd_gf1_uart_cmd(gus, 0x00); /* normal operations */ } } static int snd_gf1_uart_output_open(struct snd_rawmidi_substream *substream) { unsigned long flags; struct snd_gus_card *gus; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (!(gus->gf1.uart_cmd & 0x80)) { /* input active? */ snd_gf1_uart_reset(gus, 0); } gus->gf1.interrupt_handler_midi_out = snd_gf1_interrupt_midi_out; gus->midi_substream_output = substream; spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); #if 0 snd_printk(KERN_DEBUG "write init - cmd = 0x%x, stat = 0x%x\n", gus->gf1.uart_cmd, snd_gf1_uart_stat(gus)); #endif return 0; } static int snd_gf1_uart_input_open(struct snd_rawmidi_substream *substream) { unsigned long flags; struct snd_gus_card *gus; int i; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (gus->gf1.interrupt_handler_midi_out != snd_gf1_interrupt_midi_out) { snd_gf1_uart_reset(gus, 0); } gus->gf1.interrupt_handler_midi_in = snd_gf1_interrupt_midi_in; gus->midi_substream_input = substream; if (gus->uart_enable) { for (i = 0; i < 1000 && (snd_gf1_uart_stat(gus) & 0x01); i++) snd_gf1_uart_get(gus); /* clean Rx */ if (i >= 1000) snd_printk(KERN_ERR "gus midi uart init read - cleanup error\n"); } spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); #if 0 snd_printk(KERN_DEBUG "read init - enable = %i, cmd = 0x%x, stat = 0x%x\n", gus->uart_enable, gus->gf1.uart_cmd, snd_gf1_uart_stat(gus)); snd_printk(KERN_DEBUG "[0x%x] reg (ctrl/status) = 0x%x, reg (data) = 0x%x " "(page = 0x%x)\n", gus->gf1.port + 0x100, inb(gus->gf1.port + 0x100), inb(gus->gf1.port + 0x101), inb(gus->gf1.port + 0x102)); #endif return 0; } static int snd_gf1_uart_output_close(struct snd_rawmidi_substream *substream) { unsigned long flags; struct snd_gus_card *gus; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (gus->gf1.interrupt_handler_midi_in != snd_gf1_interrupt_midi_in) snd_gf1_uart_reset(gus, 1); snd_gf1_set_default_handlers(gus, SNDRV_GF1_HANDLER_MIDI_OUT); gus->midi_substream_output = NULL; spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); return 0; } static int snd_gf1_uart_input_close(struct snd_rawmidi_substream *substream) { unsigned long flags; struct snd_gus_card *gus; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (gus->gf1.interrupt_handler_midi_out != snd_gf1_interrupt_midi_out) snd_gf1_uart_reset(gus, 1); snd_gf1_set_default_handlers(gus, SNDRV_GF1_HANDLER_MIDI_IN); gus->midi_substream_input = NULL; spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); return 0; } static void snd_gf1_uart_input_trigger(struct snd_rawmidi_substream *substream, int up) { struct snd_gus_card *gus; unsigned long flags; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (up) { if ((gus->gf1.uart_cmd & 0x80) == 0) snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd | 0x80); /* enable Rx interrupts */ } else { if (gus->gf1.uart_cmd & 0x80) snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd & ~0x80); /* disable Rx interrupts */ } spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); } static void snd_gf1_uart_output_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct snd_gus_card *gus; char byte; int timeout; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (up) { if ((gus->gf1.uart_cmd & 0x20) == 0) { spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); /* wait for empty Rx - Tx is probably unlocked */ timeout = 10000; while (timeout-- > 0 && snd_gf1_uart_stat(gus) & 0x01); /* Tx FIFO free? */ spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (gus->gf1.uart_cmd & 0x20) { spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); return; } if (snd_gf1_uart_stat(gus) & 0x02) { if (snd_rawmidi_transmit(substream, &byte, 1) != 1) { spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); return; } snd_gf1_uart_put(gus, byte); } snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd | 0x20); /* enable Tx interrupt */ } } else { if (gus->gf1.uart_cmd & 0x20) snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd & ~0x20); } spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); } static struct snd_rawmidi_ops snd_gf1_uart_output = { .open = snd_gf1_uart_output_open, .close = snd_gf1_uart_output_close, .trigger = snd_gf1_uart_output_trigger, }; static struct snd_rawmidi_ops snd_gf1_uart_input = { .open = snd_gf1_uart_input_open, .close = snd_gf1_uart_input_close, .trigger = snd_gf1_uart_input_trigger, }; int snd_gf1_rawmidi_new(struct snd_gus_card * gus, int device, struct snd_rawmidi ** rrawmidi) { struct snd_rawmidi *rmidi; int err; if (rrawmidi) *rrawmidi = NULL; if ((err = snd_rawmidi_new(gus->card, "GF1", device, 1, 1, &rmidi)) < 0) return err; strcpy(rmidi->name, gus->interwave ? "AMD InterWave" : "GF1"); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_gf1_uart_output); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_gf1_uart_input); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = gus; gus->midi_uart = rmidi; if (rrawmidi) *rrawmidi = rmidi; return err; }
gpl-2.0
kref/u-boot-tiny210
board/mx1ads/mx1ads.c
109
4204
/* * board/mx1ads/mx1ads.c * * (c) Copyright 2004 * Techware Information Technology, Inc. * http://www.techware.com.tw/ * * Ming-Len Wu <minglen_wu@techware.com.tw> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <netdev.h> /*#include <mc9328.h>*/ #include <asm/arch/imx-regs.h> #include <asm/io.h> DECLARE_GLOBAL_DATA_PTR; #define FCLK_SPEED 1 #if FCLK_SPEED==0 /* Fout = 203MHz, Fin = 12MHz for Audio */ #define M_MDIV 0xC3 #define M_PDIV 0x4 #define M_SDIV 0x1 #elif FCLK_SPEED==1 /* Fout = 202.8MHz */ #define M_MDIV 0xA1 #define M_PDIV 0x3 #define M_SDIV 0x1 #endif #define USB_CLOCK 1 #if USB_CLOCK==0 #define U_M_MDIV 0xA1 #define U_M_PDIV 0x3 #define U_M_SDIV 0x1 #elif USB_CLOCK==1 #define U_M_MDIV 0x48 #define U_M_PDIV 0x3 #define U_M_SDIV 0x2 #endif #if 0 static inline void delay (unsigned long loops) { __asm__ volatile ("1:\n" "subs %0, %1, #1\n" "bne 1b":"=r" (loops):"0" (loops)); } #endif /* * Miscellaneous platform dependent initialisations */ void SetAsynchMode (void) { __asm__ ("mrc p15,0,r0,c1,c0,0 \n" "mov r2, #0xC0000000 \n" "orr r0,r2,r0 \n" "mcr p15,0,r0,c1,c0,0 \n"); } static u32 mc9328sid; int board_early_init_f(void) { mc9328sid = SIDR; GPCR = 0x000003AB; /* I/O pad driving strength */ /* MX1_CS1U = 0x00000A00; */ /* SRAM initialization */ /* MX1_CS1L = 0x11110601; */ MPCTL0 = 0x04632410; /* setting for 150 MHz MCU PLL CLK */ /* set FCLK divider 1 (i.e. FCLK to MCU PLL CLK) and * BCLK divider to 2 (i.e. BCLK to 48 MHz) */ CSCR = 0xAF000403; CSCR |= 0x00200000; /* Trigger the restart bit(bit 21) */ CSCR &= 0xFFFF7FFF; /* Program PRESC bit(bit 15) to 0 to divide-by-1 */ /* setup cs4 for cs8900 ethernet */ CS4U = 0x00000F00; /* Initialize CS4 for CS8900 ethernet */ CS4L = 0x00001501; GIUS (0) &= 0xFF3FFFFF; GPR (0) &= 0xFF3FFFFF; readl(0x1500000C); readl(0x1500000C); SetAsynchMode (); icache_enable (); dcache_enable (); /* set PERCLKs */ PCDR = 0x00000055; /* set PERCLKS */ /* PERCLK3 is only used by SSI so the SSI driver can set it any value it likes * PERCLK1 and PERCLK2 are shared so DO NOT change it in any other place * all sources selected as normal interrupt */ /* MX1_INTTYPEH = 0; MX1_INTTYPEL = 0; */ return 0; } int board_init(void) { gd->bd->bi_arch_number = MACH_TYPE_MX1ADS; gd->bd->bi_boot_params = 0x08000100; /* adress of boot parameters */ return 0; } int board_late_init (void) { setenv ("stdout", "serial"); setenv ("stderr", "serial"); switch (mc9328sid) { case 0x0005901d: printf ("MX1ADS board with MC9328 MX1 (0L44N), Silicon ID 0x%08x \n\n", mc9328sid); break; case 0x04d4c01d: printf ("MX1ADS board with MC9328 MXL (1L45N), Silicon ID 0x%08x \n\n", mc9328sid); break; case 0x00d4c01d: printf ("MX1ADS board with MC9328 MXL (2L45N), Silicon ID 0x%08x \n\n", mc9328sid); break; default: printf ("MX1ADS board with UNKNOWN MC9328 cpu, Silicon ID 0x%08x \n", mc9328sid); break; } return 0; } int dram_init(void) { /* dram_init must store complete ramsize in gd->ram_size */ gd->ram_size = get_ram_size((void *)PHYS_SDRAM_1, PHYS_SDRAM_1_SIZE); return 0; } void dram_init_banksize(void) { gd->bd->bi_dram[0].start = PHYS_SDRAM_1; gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE; } #ifdef CONFIG_CMD_NET int board_eth_init(bd_t *bis) { int rc = 0; #ifdef CONFIG_CS8900 rc = cs8900_initialize(0, CONFIG_CS8900_BASE); #endif return rc; } #endif
gpl-2.0
xdje42/newlib
newlib/libm/machine/arm/sf_ceil.c
109
1727
/* sf_ceil.c -- define ceilf Copyright (c) 2011, 2012 ARM Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the company may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #if __ARM_ARCH >= 8 && !defined (__SOFTFP__) #include <math.h> float ceilf (float x) { float result; asm volatile ( "vrintp.f32\t%0, %1" : "=t" (result) : "t" (x) ); return result; } #else #include "../../math/sf_ceil.c" #endif
gpl-2.0
evnit/android_kernel_samsung_msm8660-common
drivers/mtd/mtdchar.c
109
27238
/* * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/device.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/backing-dev.h> #include <linux/compat.h> #include <linux/mount.h> #include <linux/blkpg.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/map.h> #include <asm/uaccess.h> #define MTD_INODE_FS_MAGIC 0x11307854 static DEFINE_MUTEX(mtd_mutex); static struct vfsmount *mtd_inode_mnt __read_mostly; /* * Data structure to hold the pointer to the mtd device as well * as mode information ofr various use cases. */ struct mtd_file_info { struct mtd_info *mtd; struct inode *ino; enum mtd_file_modes mode; }; static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; switch (orig) { case SEEK_SET: break; case SEEK_CUR: offset += file->f_pos; break; case SEEK_END: offset += mtd->size; break; default: return -EINVAL; } if (offset >= 0 && offset <= mtd->size) return file->f_pos = offset; return -EINVAL; } static int mtd_open(struct inode *inode, struct file *file) { int minor = iminor(inode); int devnum = minor >> 1; int ret = 0; struct mtd_info *mtd; struct mtd_file_info *mfi; struct inode *mtd_ino; DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); /* You can't open the RO devices RW */ if ((file->f_mode & FMODE_WRITE) && (minor & 1)) return -EACCES; mutex_lock(&mtd_mutex); mtd = get_mtd_device(NULL, devnum); if (IS_ERR(mtd)) { ret = PTR_ERR(mtd); goto out; } if (mtd->type == MTD_ABSENT) { put_mtd_device(mtd); ret = -ENODEV; goto out; } mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum); if (!mtd_ino) { put_mtd_device(mtd); ret = -ENOMEM; goto out; } if (mtd_ino->i_state & I_NEW) { mtd_ino->i_private = mtd; mtd_ino->i_mode = S_IFCHR; mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info; unlock_new_inode(mtd_ino); } file->f_mapping = mtd_ino->i_mapping; /* You can't open it RW if it's not a writeable device */ if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { iput(mtd_ino); put_mtd_device(mtd); ret = -EACCES; goto out; } mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); if (!mfi) { iput(mtd_ino); put_mtd_device(mtd); ret = -ENOMEM; goto out; } mfi->ino = mtd_ino; mfi->mtd = mtd; file->private_data = mfi; out: mutex_unlock(&mtd_mutex); return ret; } /* mtd_open */ /*====================================================================*/ static int mtd_close(struct inode *inode, struct file *file) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); /* Only sync if opened RW */ if ((file->f_mode & FMODE_WRITE) && mtd->sync) mtd->sync(mtd); iput(mfi->ino); put_mtd_device(mtd); file->private_data = NULL; kfree(mfi); return 0; } /* mtd_close */ /* Back in June 2001, dwmw2 wrote: * * FIXME: This _really_ needs to die. In 2.5, we should lock the * userspace buffer down and use it directly with readv/writev. * * The implementation below, using mtd_kmalloc_up_to, mitigates * allocation failures when the system is under low-memory situations * or if memory is highly fragmented at the cost of reducing the * performance of the requested transfer due to a smaller buffer size. * * A more complex but more memory-efficient implementation based on * get_user_pages and iovecs to cover extents of those pages is a * longer-term goal, as intimated by dwmw2 above. However, for the * write case, this requires yet more complex head and tail transfer * handling when those head and tail offsets and sizes are such that * alignment requirements are not met in the NAND subdriver. */ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; size_t retlen=0; size_t total_retlen=0; int ret=0; int len; size_t size = count; char *kbuf; DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); if (*ppos + count > mtd->size) count = mtd->size - *ppos; if (!count) return 0; kbuf = mtd_kmalloc_up_to(mtd, &size); if (!kbuf) return -ENOMEM; while (count) { len = min_t(size_t, count, size); switch (mfi->mode) { case MTD_MODE_OTP_FACTORY: ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); break; case MTD_MODE_OTP_USER: ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); break; case MTD_MODE_RAW: { struct mtd_oob_ops ops; ops.mode = MTD_OOB_RAW; ops.datbuf = kbuf; ops.oobbuf = NULL; ops.len = len; ret = mtd->read_oob(mtd, *ppos, &ops); retlen = ops.retlen; break; } default: ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); } /* Nand returns -EBADMSG on ecc errors, but it returns * the data. For our userspace tools it is important * to dump areas with ecc errors ! * For kernel internal usage it also might return -EUCLEAN * to signal the caller that a bitflip has occurred and has * been corrected by the ECC algorithm. * Userspace software which accesses NAND this way * must be aware of the fact that it deals with NAND */ if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { *ppos += retlen; if (copy_to_user(buf, kbuf, retlen)) { kfree(kbuf); return -EFAULT; } else total_retlen += retlen; count -= retlen; buf += retlen; if (retlen == 0) count = 0; } else { kfree(kbuf); return ret; } } kfree(kbuf); return total_retlen; } /* mtd_read */ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; size_t size = count; char *kbuf; size_t retlen; size_t total_retlen=0; int ret=0; int len; DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); if (*ppos == mtd->size) return -ENOSPC; if (*ppos + count > mtd->size) count = mtd->size - *ppos; if (!count) return 0; kbuf = mtd_kmalloc_up_to(mtd, &size); if (!kbuf) return -ENOMEM; while (count) { len = min_t(size_t, count, size); if (copy_from_user(kbuf, buf, len)) { kfree(kbuf); return -EFAULT; } switch (mfi->mode) { case MTD_MODE_OTP_FACTORY: ret = -EROFS; break; case MTD_MODE_OTP_USER: if (!mtd->write_user_prot_reg) { ret = -EOPNOTSUPP; break; } ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); break; case MTD_MODE_RAW: { struct mtd_oob_ops ops; ops.mode = MTD_OOB_RAW; ops.datbuf = kbuf; ops.oobbuf = NULL; ops.ooboffs = 0; ops.len = len; ret = mtd->write_oob(mtd, *ppos, &ops); retlen = ops.retlen; break; } default: ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); } if (!ret) { *ppos += retlen; total_retlen += retlen; count -= retlen; buf += retlen; } else { kfree(kbuf); return ret; } } kfree(kbuf); return total_retlen; } /* mtd_write */ /*====================================================================== IOCTL calls for getting device parameters. ======================================================================*/ static void mtdchar_erase_callback (struct erase_info *instr) { wake_up((wait_queue_head_t *)instr->priv); } #ifdef CONFIG_HAVE_MTD_OTP static int otp_select_filemode(struct mtd_file_info *mfi, int mode) { struct mtd_info *mtd = mfi->mtd; int ret = 0; switch (mode) { case MTD_OTP_FACTORY: if (!mtd->read_fact_prot_reg) ret = -EOPNOTSUPP; else mfi->mode = MTD_MODE_OTP_FACTORY; break; case MTD_OTP_USER: if (!mtd->read_fact_prot_reg) ret = -EOPNOTSUPP; else mfi->mode = MTD_MODE_OTP_USER; break; default: ret = -EINVAL; case MTD_OTP_OFF: break; } return ret; } #else # define otp_select_filemode(f,m) -EOPNOTSUPP #endif static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, uint64_t start, uint32_t length, void __user *ptr, uint32_t __user *retp) { struct mtd_oob_ops ops; uint32_t retlen; int ret = 0; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; if (length > 4096) return -EINVAL; if (!mtd->write_oob) ret = -EOPNOTSUPP; else ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; if (ret) return ret; ops.ooblen = length; ops.ooboffs = start & (mtd->oobsize - 1); ops.datbuf = NULL; ops.mode = MTD_OOB_PLACE; if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) return -EINVAL; ops.oobbuf = memdup_user(ptr, length); if (IS_ERR(ops.oobbuf)) return PTR_ERR(ops.oobbuf); start &= ~((uint64_t)mtd->oobsize - 1); ret = mtd->write_oob(mtd, start, &ops); if (ops.oobretlen > 0xFFFFFFFFU) ret = -EOVERFLOW; retlen = ops.oobretlen; if (copy_to_user(retp, &retlen, sizeof(length))) ret = -EFAULT; kfree(ops.oobbuf); return ret; } static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, uint32_t length, void __user *ptr, uint32_t __user *retp) { struct mtd_oob_ops ops; int ret = 0; if (length > 4096) return -EINVAL; if (!mtd->read_oob) ret = -EOPNOTSUPP; else ret = access_ok(VERIFY_WRITE, ptr, length) ? 0 : -EFAULT; if (ret) return ret; ops.ooblen = length; ops.ooboffs = start & (mtd->oobsize - 1); ops.datbuf = NULL; ops.mode = MTD_OOB_PLACE; if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) return -EINVAL; ops.oobbuf = kmalloc(length, GFP_KERNEL); if (!ops.oobbuf) return -ENOMEM; start &= ~((uint64_t)mtd->oobsize - 1); ret = mtd->read_oob(mtd, start, &ops); if (put_user(ops.oobretlen, retp)) ret = -EFAULT; else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, ops.oobretlen)) ret = -EFAULT; kfree(ops.oobbuf); return ret; } /* * Copies (and truncates, if necessary) data from the larger struct, * nand_ecclayout, to the smaller, deprecated layout struct, * nand_ecclayout_user. This is necessary only to suppport the deprecated * API ioctl ECCGETLAYOUT while allowing all new functionality to use * nand_ecclayout flexibly (i.e. the struct may change size in new * releases without requiring major rewrites). */ static int shrink_ecclayout(const struct nand_ecclayout *from, struct nand_ecclayout_user *to) { int i; if (!from || !to) return -EINVAL; memset(to, 0, sizeof(*to)); to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES); for (i = 0; i < to->eccbytes; i++) to->eccpos[i] = from->eccpos[i]; for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { if (from->oobfree[i].length == 0 && from->oobfree[i].offset == 0) break; to->oobavail += from->oobfree[i].length; to->oobfree[i] = from->oobfree[i]; } return 0; } static int mtd_blkpg_ioctl(struct mtd_info *mtd, struct blkpg_ioctl_arg __user *arg) { struct blkpg_ioctl_arg a; struct blkpg_partition p; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg))) return -EFAULT; if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition))) return -EFAULT; switch (a.op) { case BLKPG_ADD_PARTITION: /* Only master mtd device must be used to add partitions */ if (mtd_is_partition(mtd)) return -EINVAL; return mtd_add_partition(mtd, p.devname, p.start, p.length); case BLKPG_DEL_PARTITION: if (p.pno < 0) return -EINVAL; return mtd_del_partition(mtd, p.pno); default: return -EINVAL; } } static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; void __user *argp = (void __user *)arg; int ret = 0; u_long size; struct mtd_info_user info; DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; if (cmd & IOC_IN) { if (!access_ok(VERIFY_READ, argp, size)) return -EFAULT; } if (cmd & IOC_OUT) { if (!access_ok(VERIFY_WRITE, argp, size)) return -EFAULT; } switch (cmd) { case MEMGETREGIONCOUNT: if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) return -EFAULT; break; case MEMGETREGIONINFO: { uint32_t ur_idx; struct mtd_erase_region_info *kr; struct region_info_user __user *ur = argp; if (get_user(ur_idx, &(ur->regionindex))) return -EFAULT; if (ur_idx >= mtd->numeraseregions) return -EINVAL; kr = &(mtd->eraseregions[ur_idx]); if (put_user(kr->offset, &(ur->offset)) || put_user(kr->erasesize, &(ur->erasesize)) || put_user(kr->numblocks, &(ur->numblocks))) return -EFAULT; break; } case MEMGETINFO: memset(&info, 0, sizeof(info)); info.type = mtd->type; info.flags = mtd->flags; info.size = mtd->size; info.erasesize = mtd->erasesize; info.writesize = mtd->writesize; info.oobsize = mtd->oobsize; /* The below fields are obsolete */ info.ecctype = -1; if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) return -EFAULT; break; case MEMERASE: case MEMERASE64: { struct erase_info *erase; if(!(file->f_mode & FMODE_WRITE)) return -EPERM; erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); if (!erase) ret = -ENOMEM; else { wait_queue_head_t waitq; DECLARE_WAITQUEUE(wait, current); init_waitqueue_head(&waitq); if (cmd == MEMERASE64) { struct erase_info_user64 einfo64; if (copy_from_user(&einfo64, argp, sizeof(struct erase_info_user64))) { kfree(erase); return -EFAULT; } erase->addr = einfo64.start; erase->len = einfo64.length; } else { struct erase_info_user einfo32; if (copy_from_user(&einfo32, argp, sizeof(struct erase_info_user))) { kfree(erase); return -EFAULT; } erase->addr = einfo32.start; erase->len = einfo32.length; } erase->mtd = mtd; erase->callback = mtdchar_erase_callback; erase->priv = (unsigned long)&waitq; /* FIXME: Allow INTERRUPTIBLE. Which means not having the wait_queue head on the stack. If the wq_head is on the stack, and we leave because we got interrupted, then the wq_head is no longer there when the callback routine tries to wake us up. */ ret = mtd->erase(mtd, erase); if (!ret) { set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&waitq, &wait); if (erase->state != MTD_ERASE_DONE && erase->state != MTD_ERASE_FAILED) schedule(); remove_wait_queue(&waitq, &wait); set_current_state(TASK_RUNNING); ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; } kfree(erase); } break; } case MEMWRITEOOB: { struct mtd_oob_buf buf; struct mtd_oob_buf __user *buf_user = argp; /* NOTE: writes return length to buf_user->length */ if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, buf.ptr, &buf_user->length); break; } case MEMREADOOB: { struct mtd_oob_buf buf; struct mtd_oob_buf __user *buf_user = argp; /* NOTE: writes return length to buf_user->start */ if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_readoob(mtd, buf.start, buf.length, buf.ptr, &buf_user->start); break; } case MEMWRITEOOB64: { struct mtd_oob_buf64 buf; struct mtd_oob_buf64 __user *buf_user = argp; if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, (void __user *)(uintptr_t)buf.usr_ptr, &buf_user->length); break; } case MEMREADOOB64: { struct mtd_oob_buf64 buf; struct mtd_oob_buf64 __user *buf_user = argp; if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_readoob(mtd, buf.start, buf.length, (void __user *)(uintptr_t)buf.usr_ptr, &buf_user->length); break; } case MEMLOCK: { struct erase_info_user einfo; if (copy_from_user(&einfo, argp, sizeof(einfo))) return -EFAULT; if (!mtd->lock) ret = -EOPNOTSUPP; else ret = mtd->lock(mtd, einfo.start, einfo.length); break; } case MEMUNLOCK: { struct erase_info_user einfo; if (copy_from_user(&einfo, argp, sizeof(einfo))) return -EFAULT; if (!mtd->unlock) ret = -EOPNOTSUPP; else ret = mtd->unlock(mtd, einfo.start, einfo.length); break; } case MEMISLOCKED: { struct erase_info_user einfo; if (copy_from_user(&einfo, argp, sizeof(einfo))) return -EFAULT; if (!mtd->is_locked) ret = -EOPNOTSUPP; else ret = mtd->is_locked(mtd, einfo.start, einfo.length); break; } /* Legacy interface */ case MEMGETOOBSEL: { struct nand_oobinfo oi; if (!mtd->ecclayout) return -EOPNOTSUPP; if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) return -EINVAL; oi.useecc = MTD_NANDECC_AUTOPLACE; memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); memcpy(&oi.oobfree, mtd->ecclayout->oobfree, sizeof(oi.oobfree)); oi.eccbytes = mtd->ecclayout->eccbytes; if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) return -EFAULT; break; } case MEMGETBADBLOCK: { loff_t offs; if (copy_from_user(&offs, argp, sizeof(loff_t))) return -EFAULT; if (!mtd->block_isbad) ret = -EOPNOTSUPP; else return mtd->block_isbad(mtd, offs); break; } case MEMSETBADBLOCK: { loff_t offs; if (copy_from_user(&offs, argp, sizeof(loff_t))) return -EFAULT; if (!mtd->block_markbad) ret = -EOPNOTSUPP; else return mtd->block_markbad(mtd, offs); break; } #ifdef CONFIG_HAVE_MTD_OTP case OTPSELECT: { int mode; if (copy_from_user(&mode, argp, sizeof(int))) return -EFAULT; mfi->mode = MTD_MODE_NORMAL; ret = otp_select_filemode(mfi, mode); file->f_pos = 0; break; } case OTPGETREGIONCOUNT: case OTPGETREGIONINFO: { struct otp_info *buf = kmalloc(4096, GFP_KERNEL); if (!buf) return -ENOMEM; ret = -EOPNOTSUPP; switch (mfi->mode) { case MTD_MODE_OTP_FACTORY: if (mtd->get_fact_prot_info) ret = mtd->get_fact_prot_info(mtd, buf, 4096); break; case MTD_MODE_OTP_USER: if (mtd->get_user_prot_info) ret = mtd->get_user_prot_info(mtd, buf, 4096); break; default: break; } if (ret >= 0) { if (cmd == OTPGETREGIONCOUNT) { int nbr = ret / sizeof(struct otp_info); ret = copy_to_user(argp, &nbr, sizeof(int)); } else ret = copy_to_user(argp, buf, ret); if (ret) ret = -EFAULT; } kfree(buf); break; } case OTPLOCK: { struct otp_info oinfo; if (mfi->mode != MTD_MODE_OTP_USER) return -EINVAL; if (copy_from_user(&oinfo, argp, sizeof(oinfo))) return -EFAULT; if (!mtd->lock_user_prot_reg) return -EOPNOTSUPP; ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length); break; } #endif /* This ioctl is being deprecated - it truncates the ecc layout */ case ECCGETLAYOUT: { struct nand_ecclayout_user *usrlay; if (!mtd->ecclayout) return -EOPNOTSUPP; usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); if (!usrlay) return -ENOMEM; shrink_ecclayout(mtd->ecclayout, usrlay); if (copy_to_user(argp, usrlay, sizeof(*usrlay))) ret = -EFAULT; kfree(usrlay); break; } case ECCGETSTATS: { if (copy_to_user(argp, &mtd->ecc_stats, sizeof(struct mtd_ecc_stats))) return -EFAULT; break; } case MTDFILEMODE: { mfi->mode = 0; switch(arg) { case MTD_MODE_OTP_FACTORY: case MTD_MODE_OTP_USER: ret = otp_select_filemode(mfi, arg); break; case MTD_MODE_RAW: if (!mtd->read_oob || !mtd->write_oob) return -EOPNOTSUPP; mfi->mode = arg; case MTD_MODE_NORMAL: break; default: ret = -EINVAL; } file->f_pos = 0; break; } case BLKPG: { ret = mtd_blkpg_ioctl(mtd, (struct blkpg_ioctl_arg __user *)arg); break; } case BLKRRPART: { /* No reread partition feature. Just return ok */ ret = 0; break; } default: ret = -ENOTTY; } return ret; } /* memory_ioctl */ static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) { int ret; mutex_lock(&mtd_mutex); ret = mtd_ioctl(file, cmd, arg); mutex_unlock(&mtd_mutex); return ret; } #ifdef CONFIG_COMPAT struct mtd_oob_buf32 { u_int32_t start; u_int32_t length; compat_caddr_t ptr; /* unsigned char* */ }; #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) static long mtd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; void __user *argp = compat_ptr(arg); int ret = 0; mutex_lock(&mtd_mutex); switch (cmd) { case MEMWRITEOOB32: { struct mtd_oob_buf32 buf; struct mtd_oob_buf32 __user *buf_user = argp; if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, compat_ptr(buf.ptr), &buf_user->length); break; } case MEMREADOOB32: { struct mtd_oob_buf32 buf; struct mtd_oob_buf32 __user *buf_user = argp; /* NOTE: writes return length to buf->start */ if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else ret = mtd_do_readoob(mtd, buf.start, buf.length, compat_ptr(buf.ptr), &buf_user->start); break; } default: ret = mtd_ioctl(file, cmd, (unsigned long)argp); } mutex_unlock(&mtd_mutex); return ret; } #endif /* CONFIG_COMPAT */ /* * try to determine where a shared mapping can be made * - only supported for NOMMU at the moment (MMU can't doesn't copy private * mappings) */ #ifndef CONFIG_MMU static unsigned long mtd_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; if (mtd->get_unmapped_area) { unsigned long offset; if (addr != 0) return (unsigned long) -EINVAL; if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) return (unsigned long) -EINVAL; offset = pgoff << PAGE_SHIFT; if (offset > mtd->size - len) return (unsigned long) -EINVAL; return mtd->get_unmapped_area(mtd, len, offset, flags); } /* can't map directly */ return (unsigned long) -ENOSYS; } #endif static inline unsigned long get_vm_size(struct vm_area_struct *vma) { return vma->vm_end - vma->vm_start; } static inline resource_size_t get_vm_offset(struct vm_area_struct *vma) { return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT; } /* * Set a new vm offset. * * Verify that the incoming offset really works as a page offset, * and that the offset and size fit in a resource_size_t. */ static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off) { pgoff_t pgoff = off >> PAGE_SHIFT; if (off != (resource_size_t) pgoff << PAGE_SHIFT) return -EINVAL; if (off + get_vm_size(vma) - 1 < off) return -EINVAL; vma->vm_pgoff = pgoff; return 0; } /* * set up a mapping for shared memory segments */ static int mtd_mmap(struct file *file, struct vm_area_struct *vma) { #ifdef CONFIG_MMU struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; struct map_info *map = mtd->priv; /* This is broken because it assumes the MTD device is map-based and that mtd->priv is a valid struct map_info. It should be replaced with something that uses the mtd_get_unmapped_area() operation properly. */ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { #ifdef pgprot_noncached if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif return vm_iomap_memory(vma, map->phys, map->size); } return -ENOSYS; #else return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS; #endif } static const struct file_operations mtd_fops = { .owner = THIS_MODULE, .llseek = mtd_lseek, .read = mtd_read, .write = mtd_write, .unlocked_ioctl = mtd_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mtd_compat_ioctl, #endif .open = mtd_open, .release = mtd_close, .mmap = mtd_mmap, #ifndef CONFIG_MMU .get_unmapped_area = mtd_get_unmapped_area, #endif }; static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC); } static struct file_system_type mtd_inodefs_type = { .name = "mtd_inodefs", .mount = mtd_inodefs_mount, .kill_sb = kill_anon_super, }; static void mtdchar_notify_add(struct mtd_info *mtd) { } static void mtdchar_notify_remove(struct mtd_info *mtd) { struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index); if (mtd_ino) { /* Destroy the inode if it exists */ mtd_ino->i_nlink = 0; iput(mtd_ino); } } static struct mtd_notifier mtdchar_notifier = { .add = mtdchar_notify_add, .remove = mtdchar_notify_remove, }; static int __init init_mtdchar(void) { int ret; ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd", &mtd_fops); if (ret < 0) { pr_notice("Can't allocate major number %d for " "Memory Technology Devices.\n", MTD_CHAR_MAJOR); return ret; } ret = register_filesystem(&mtd_inodefs_type); if (ret) { pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); goto err_unregister_chdev; } mtd_inode_mnt = kern_mount(&mtd_inodefs_type); if (IS_ERR(mtd_inode_mnt)) { ret = PTR_ERR(mtd_inode_mnt); pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret); goto err_unregister_filesystem; } register_mtd_user(&mtdchar_notifier); return ret; err_unregister_filesystem: unregister_filesystem(&mtd_inodefs_type); err_unregister_chdev: __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); return ret; } static void __exit cleanup_mtdchar(void) { unregister_mtd_user(&mtdchar_notifier); kern_unmount(mtd_inode_mnt); unregister_filesystem(&mtd_inodefs_type); __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); } module_init(init_mtdchar); module_exit(cleanup_mtdchar); MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("Direct character-device access to MTD devices"); MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
gpl-2.0
iHateWEBos/shooter_kernel_34
kernel/kmod.c
365
10699
#include <linux/module.h> #include <linux/sched.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/cred.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/workqueue.h> #include <linux/security.h> #include <linux/mount.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/resource.h> #include <linux/notifier.h> #include <linux/suspend.h> #include <linux/rwsem.h> #include <asm/uaccess.h> #include <trace/events/module.h> extern int max_threads; static struct workqueue_struct *khelper_wq; #define CAP_BSET (void *)1 #define CAP_PI (void *)2 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; static DEFINE_SPINLOCK(umh_sysctl_lock); static DECLARE_RWSEM(umhelper_sem); #ifdef CONFIG_MODULES char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; static void free_modprobe_argv(struct subprocess_info *info) { kfree(info->argv[3]); kfree(info->argv); } static int call_modprobe(char *module_name, int wait) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL); if (!argv) goto out; module_name = kstrdup(module_name, GFP_KERNEL); if (!module_name) goto free_argv; argv[0] = modprobe_path; argv[1] = "-q"; argv[2] = "--"; argv[3] = module_name; argv[4] = NULL; return call_usermodehelper_fns(modprobe_path, argv, envp, wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL); free_argv: kfree(argv); out: return -ENOMEM; } int __request_module(bool wait, const char *fmt, ...) { va_list args; char module_name[MODULE_NAME_LEN]; unsigned int max_modprobes; int ret; static atomic_t kmod_concurrent = ATOMIC_INIT(0); #define MAX_KMOD_CONCURRENT 50 static int kmod_loop_msg; va_start(args, fmt); ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); va_end(args); if (ret >= MODULE_NAME_LEN) return -ENAMETOOLONG; ret = security_kernel_module_request(module_name); if (ret) return ret; max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT); atomic_inc(&kmod_concurrent); if (atomic_read(&kmod_concurrent) > max_modprobes) { if (kmod_loop_msg < 5) { printk(KERN_ERR "request_module: runaway loop modprobe %s\n", module_name); kmod_loop_msg++; } atomic_dec(&kmod_concurrent); return -ENOMEM; } trace_module_request(module_name, wait, _RET_IP_); ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); atomic_dec(&kmod_concurrent); return ret; } EXPORT_SYMBOL(__request_module); #endif static int ____call_usermodehelper(void *data) { struct subprocess_info *sub_info = data; struct cred *new; int retval; spin_lock_irq(&current->sighand->siglock); flush_signal_handlers(current, 1); spin_unlock_irq(&current->sighand->siglock); set_cpus_allowed_ptr(current, cpu_all_mask); set_user_nice(current, 0); retval = -ENOMEM; new = prepare_kernel_cred(current); if (!new) goto fail; spin_lock(&umh_sysctl_lock); new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); new->cap_inheritable = cap_intersect(usermodehelper_inheritable, new->cap_inheritable); spin_unlock(&umh_sysctl_lock); if (sub_info->init) { retval = sub_info->init(sub_info, new); if (retval) { abort_creds(new); goto fail; } } commit_creds(new); retval = kernel_execve(sub_info->path, (const char *const *)sub_info->argv, (const char *const *)sub_info->envp); fail: sub_info->retval = retval; return 0; } void call_usermodehelper_freeinfo(struct subprocess_info *info) { if (info->cleanup) (*info->cleanup)(info); kfree(info); } EXPORT_SYMBOL(call_usermodehelper_freeinfo); static void umh_complete(struct subprocess_info *sub_info) { struct completion *comp = xchg(&sub_info->complete, NULL); if (comp) complete(comp); else call_usermodehelper_freeinfo(sub_info); } static int wait_for_helper(void *data) { struct subprocess_info *sub_info = data; pid_t pid; spin_lock_irq(&current->sighand->siglock); current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL; spin_unlock_irq(&current->sighand->siglock); pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD); if (pid < 0) { sub_info->retval = pid; } else { int ret = -ECHILD; sys_wait4(pid, (int __user *)&ret, 0, NULL); if (ret) sub_info->retval = ret; } umh_complete(sub_info); return 0; } static void __call_usermodehelper(struct work_struct *work) { struct subprocess_info *sub_info = container_of(work, struct subprocess_info, work); int wait = sub_info->wait & ~UMH_KILLABLE; pid_t pid; if (wait == UMH_WAIT_PROC) pid = kernel_thread(wait_for_helper, sub_info, CLONE_FS | CLONE_FILES | SIGCHLD); else pid = kernel_thread(____call_usermodehelper, sub_info, CLONE_VFORK | SIGCHLD); switch (wait) { case UMH_NO_WAIT: call_usermodehelper_freeinfo(sub_info); break; case UMH_WAIT_PROC: if (pid > 0) break; case UMH_WAIT_EXEC: if (pid < 0) sub_info->retval = pid; umh_complete(sub_info); } } static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED; static atomic_t running_helpers = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq); #define RUNNING_HELPERS_TIMEOUT (5 * HZ) int usermodehelper_read_trylock(void) { DEFINE_WAIT(wait); int ret = 0; down_read(&umhelper_sem); for (;;) { prepare_to_wait(&usermodehelper_disabled_waitq, &wait, TASK_INTERRUPTIBLE); if (!usermodehelper_disabled) break; if (usermodehelper_disabled == UMH_DISABLED) ret = -EAGAIN; up_read(&umhelper_sem); if (ret) break; schedule(); try_to_freeze(); down_read(&umhelper_sem); } finish_wait(&usermodehelper_disabled_waitq, &wait); return ret; } EXPORT_SYMBOL_GPL(usermodehelper_read_trylock); long usermodehelper_read_lock_wait(long timeout) { DEFINE_WAIT(wait); if (timeout < 0) return -EINVAL; down_read(&umhelper_sem); for (;;) { prepare_to_wait(&usermodehelper_disabled_waitq, &wait, TASK_UNINTERRUPTIBLE); if (!usermodehelper_disabled) break; up_read(&umhelper_sem); timeout = schedule_timeout(timeout); if (!timeout) break; down_read(&umhelper_sem); } finish_wait(&usermodehelper_disabled_waitq, &wait); return timeout; } EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait); void usermodehelper_read_unlock(void) { up_read(&umhelper_sem); } EXPORT_SYMBOL_GPL(usermodehelper_read_unlock); void __usermodehelper_set_disable_depth(enum umh_disable_depth depth) { down_write(&umhelper_sem); usermodehelper_disabled = depth; wake_up(&usermodehelper_disabled_waitq); up_write(&umhelper_sem); } int __usermodehelper_disable(enum umh_disable_depth depth) { long retval; if (!depth) return -EINVAL; down_write(&umhelper_sem); usermodehelper_disabled = depth; up_write(&umhelper_sem); retval = wait_event_timeout(running_helpers_waitq, atomic_read(&running_helpers) == 0, RUNNING_HELPERS_TIMEOUT); if (retval) return 0; __usermodehelper_set_disable_depth(UMH_ENABLED); return -EAGAIN; } static void helper_lock(void) { atomic_inc(&running_helpers); smp_mb__after_atomic_inc(); } static void helper_unlock(void) { if (atomic_dec_and_test(&running_helpers)) wake_up(&running_helpers_waitq); } struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask) { struct subprocess_info *sub_info; sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); if (!sub_info) goto out; INIT_WORK(&sub_info->work, __call_usermodehelper); sub_info->path = path; sub_info->argv = argv; sub_info->envp = envp; out: return sub_info; } EXPORT_SYMBOL(call_usermodehelper_setup); void call_usermodehelper_setfns(struct subprocess_info *info, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *info), void *data) { info->cleanup = cleanup; info->init = init; info->data = data; } EXPORT_SYMBOL(call_usermodehelper_setfns); int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) { DECLARE_COMPLETION_ONSTACK(done); int retval = 0; helper_lock(); if (sub_info->path[0] == '\0') goto out; if (!khelper_wq || usermodehelper_disabled) { retval = -EBUSY; goto out; } sub_info->complete = &done; sub_info->wait = wait; queue_work(khelper_wq, &sub_info->work); if (wait == UMH_NO_WAIT) goto unlock; if (wait & UMH_KILLABLE) { retval = wait_for_completion_killable(&done); if (!retval) goto wait_done; if (xchg(&sub_info->complete, NULL)) goto unlock; } wait_for_completion(&done); wait_done: retval = sub_info->retval; out: call_usermodehelper_freeinfo(sub_info); unlock: helper_unlock(); return retval; } EXPORT_SYMBOL(call_usermodehelper_exec); static int proc_cap_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; unsigned long cap_array[_KERNEL_CAPABILITY_U32S]; kernel_cap_t new_cap; int err, i; if (write && (!capable(CAP_SETPCAP) || !capable(CAP_SYS_MODULE))) return -EPERM; spin_lock(&umh_sysctl_lock); for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) { if (table->data == CAP_BSET) cap_array[i] = usermodehelper_bset.cap[i]; else if (table->data == CAP_PI) cap_array[i] = usermodehelper_inheritable.cap[i]; else BUG(); } spin_unlock(&umh_sysctl_lock); t = *table; t.data = &cap_array; err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); if (err < 0) return err; for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) new_cap.cap[i] = cap_array[i]; spin_lock(&umh_sysctl_lock); if (write) { if (table->data == CAP_BSET) usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap); if (table->data == CAP_PI) usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap); } spin_unlock(&umh_sysctl_lock); return 0; } struct ctl_table usermodehelper_table[] = { { .procname = "bset", .data = CAP_BSET, .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), .mode = 0600, .proc_handler = proc_cap_handler, }, { .procname = "inheritable", .data = CAP_PI, .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), .mode = 0600, .proc_handler = proc_cap_handler, }, { } }; void __init usermodehelper_init(void) { khelper_wq = create_singlethread_workqueue("khelper"); BUG_ON(!khelper_wq); }
gpl-2.0
pantoniou/linux-beagle-track-mainline
arch/m32r/kernel/smp.c
365
23743
/* * linux/arch/m32r/kernel/smp.c * * M32R SMP support routines. * * Copyright (c) 2001, 2002 Hitoshi Yamamoto * * Taken from i386 version. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> * * This code is released under the GNU General Public License version 2 or * later. */ #undef DEBUG_SMP #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/profile.h> #include <linux/cpu.h> #include <asm/cacheflush.h> #include <asm/pgalloc.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/m32r.h> #include <asm/tlbflush.h> /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Data structures and variables */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* * For flush_cache_all() */ static DEFINE_SPINLOCK(flushcache_lock); static volatile unsigned long flushcache_cpumask = 0; /* * For flush_tlb_others() */ static cpumask_t flush_cpumask; static struct mm_struct *flush_mm; static struct vm_area_struct *flush_vma; static volatile unsigned long flush_va; static DEFINE_SPINLOCK(tlbstate_lock); #define FLUSH_ALL 0xffffffff DECLARE_PER_CPU(int, prof_multiplier); DECLARE_PER_CPU(int, prof_old_multiplier); DECLARE_PER_CPU(int, prof_counter); extern spinlock_t ipi_lock[]; /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Function Prototypes */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ void smp_reschedule_interrupt(void); void smp_flush_cache_all_interrupt(void); static void flush_tlb_all_ipi(void *); static void flush_tlb_others(cpumask_t, struct mm_struct *, struct vm_area_struct *, unsigned long); void smp_invalidate_interrupt(void); static void stop_this_cpu(void *); void smp_ipi_timer_interrupt(struct pt_regs *); void smp_local_timer_interrupt(void); static void send_IPI_allbutself(int, int); static void send_IPI_mask(const struct cpumask *, int, int); /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Rescheduling request Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* * Name: smp_send_reschedule * * Description: This routine requests other CPU to execute rescheduling. * 1.Send 'RESCHEDULE_IPI' to other CPU. * Request other CPU to execute 'smp_reschedule_interrupt()'. * * Born on Date: 2002.02.05 * * Arguments: cpu_id - Target CPU ID * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_send_reschedule(int cpu_id) { WARN_ON(cpu_is_offline(cpu_id)); send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1); } /*==========================================================================* * Name: smp_reschedule_interrupt * * Description: This routine executes on CPU which received * 'RESCHEDULE_IPI'. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_reschedule_interrupt(void) { scheduler_ipi(); } /*==========================================================================* * Name: smp_flush_cache_all * * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other * CPUs in the system. * * Born on Date: 2003-05-28 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_cache_all(void) { cpumask_t cpumask; unsigned long *mask; preempt_disable(); cpumask_copy(&cpumask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &cpumask); spin_lock(&flushcache_lock); mask=cpumask_bits(&cpumask); atomic_or(*mask, (atomic_t *)&flushcache_cpumask); send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); _flush_cache_copyback_all(); while (flushcache_cpumask) mb(); spin_unlock(&flushcache_lock); preempt_enable(); } EXPORT_SYMBOL(smp_flush_cache_all); void smp_flush_cache_all_interrupt(void) { _flush_cache_copyback_all(); clear_bit(smp_processor_id(), &flushcache_cpumask); } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* TLB flush request Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* * Name: smp_flush_tlb_all * * Description: This routine flushes all processes TLBs. * 1.Request other CPU to execute 'flush_tlb_all_ipi()'. * 2.Execute 'do_flush_tlb_all_local()'. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_tlb_all(void) { unsigned long flags; preempt_disable(); local_irq_save(flags); __flush_tlb_all(); local_irq_restore(flags); smp_call_function(flush_tlb_all_ipi, NULL, 1); preempt_enable(); } /*==========================================================================* * Name: flush_tlb_all_ipi * * Description: This routine flushes all local TLBs. * 1.Execute 'do_flush_tlb_all_local()'. * * Born on Date: 2002.02.05 * * Arguments: *info - not used * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ static void flush_tlb_all_ipi(void *info) { __flush_tlb_all(); } /*==========================================================================* * Name: smp_flush_tlb_mm * * Description: This routine flushes the specified mm context TLB's. * * Born on Date: 2002.02.05 * * Arguments: *mm - a pointer to the mm struct for flush TLB * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_tlb_mm(struct mm_struct *mm) { int cpu_id; cpumask_t cpu_mask; unsigned long *mmc; unsigned long flags; preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(cpu_id, &cpu_mask); if (*mmc != NO_CONTEXT) { local_irq_save(flags); *mmc = NO_CONTEXT; if (mm == current->mm) activate_context(mm); else cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); local_irq_restore(flags); } if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); preempt_enable(); } /*==========================================================================* * Name: smp_flush_tlb_range * * Description: This routine flushes a range of pages. * * Born on Date: 2002.02.05 * * Arguments: *mm - a pointer to the mm struct for flush TLB * start - not used * end - not used * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { smp_flush_tlb_mm(vma->vm_mm); } /*==========================================================================* * Name: smp_flush_tlb_page * * Description: This routine flushes one page. * * Born on Date: 2002.02.05 * * Arguments: *vma - a pointer to the vma struct include va * va - virtual address for flush TLB * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { struct mm_struct *mm = vma->vm_mm; int cpu_id; cpumask_t cpu_mask; unsigned long *mmc; unsigned long flags; preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(cpu_id, &cpu_mask); #ifdef DEBUG_SMP if (!mm) BUG(); #endif if (*mmc != NO_CONTEXT) { local_irq_save(flags); va &= PAGE_MASK; va |= (*mmc & MMU_CONTEXT_ASID_MASK); __flush_tlb_page(va); local_irq_restore(flags); } if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, vma, va); preempt_enable(); } /*==========================================================================* * Name: flush_tlb_others * * Description: This routine requests other CPU to execute flush TLB. * 1.Setup parameters. * 2.Send 'INVALIDATE_TLB_IPI' to other CPU. * Request other CPU to execute 'smp_invalidate_interrupt()'. * 3.Wait for other CPUs operation finished. * * Born on Date: 2002.02.05 * * Arguments: cpumask - bitmap of target CPUs * *mm - a pointer to the mm struct for flush TLB * *vma - a pointer to the vma struct include va * va - virtual address for flush TLB * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long va) { unsigned long *mask; #ifdef DEBUG_SMP unsigned long flags; __save_flags(flags); if (!(flags & 0x0040)) /* Interrupt Disable NONONO */ BUG(); #endif /* DEBUG_SMP */ /* * A couple of (to be removed) sanity checks: * * - we do not send IPIs to not-yet booted CPUs. * - current CPU must not be in mask * - mask must exist :) */ BUG_ON(cpumask_empty(&cpumask)); BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask)); BUG_ON(!mm); /* If a CPU which we ran on has gone down, OK. */ cpumask_and(&cpumask, &cpumask, cpu_online_mask); if (cpumask_empty(&cpumask)) return; /* * i'm not happy about this global shared spinlock in the * MM hot path, but we'll see how contended it is. * Temporarily this turns IRQs off, so that lockups are * detected by the NMI watchdog. */ spin_lock(&tlbstate_lock); flush_mm = mm; flush_vma = vma; flush_va = va; mask=cpumask_bits(&cpumask); atomic_or(*mask, (atomic_t *)&flush_cpumask); /* * We have to send the IPI only to * CPUs affected. */ send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); while (!cpumask_empty(&flush_cpumask)) { /* nothing. lockup detection does not belong here */ mb(); } flush_mm = NULL; flush_vma = NULL; flush_va = 0; spin_unlock(&tlbstate_lock); } /*==========================================================================* * Name: smp_invalidate_interrupt * * Description: This routine executes on CPU which received * 'INVALIDATE_TLB_IPI'. * 1.Flush local TLB. * 2.Report flush TLB process was finished. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_invalidate_interrupt(void) { int cpu_id = smp_processor_id(); unsigned long *mmc = &flush_mm->context[cpu_id]; if (!cpumask_test_cpu(cpu_id, &flush_cpumask)) return; if (flush_va == FLUSH_ALL) { *mmc = NO_CONTEXT; if (flush_mm == current->active_mm) activate_context(flush_mm); else cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm)); } else { unsigned long va = flush_va; if (*mmc != NO_CONTEXT) { va &= PAGE_MASK; va |= (*mmc & MMU_CONTEXT_ASID_MASK); __flush_tlb_page(va); } } cpumask_clear_cpu(cpu_id, &flush_cpumask); } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Stop CPU request Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* * Name: smp_send_stop * * Description: This routine requests stop all CPUs. * 1.Request other CPU to execute 'stop_this_cpu()'. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_send_stop(void) { smp_call_function(stop_this_cpu, NULL, 0); } /*==========================================================================* * Name: stop_this_cpu * * Description: This routine halt CPU. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ static void stop_this_cpu(void *dummy) { int cpu_id = smp_processor_id(); /* * Remove this CPU: */ set_cpu_online(cpu_id, false); /* * PSW IE = 1; * IMASK = 0; * goto SLEEP */ local_irq_disable(); outl(0, M32R_ICU_IMASK_PORTL); inl(M32R_ICU_IMASK_PORTL); /* dummy read */ local_irq_enable(); for ( ; ; ); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); } void arch_send_call_function_single_ipi(int cpu) { send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0); } /*==========================================================================* * Name: smp_call_function_interrupt * * Description: This routine executes on CPU which received * 'CALL_FUNCTION_IPI'. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_call_function_interrupt(void) { irq_enter(); generic_smp_call_function_interrupt(); irq_exit(); } void smp_call_function_single_interrupt(void) { irq_enter(); generic_smp_call_function_single_interrupt(); irq_exit(); } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Timer Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* * Name: smp_send_timer * * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs * in the system. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_send_timer(void) { send_IPI_allbutself(LOCAL_TIMER_IPI, 1); } /*==========================================================================* * Name: smp_send_timer * * Description: This routine executes on CPU which received * 'LOCAL_TIMER_IPI'. * * Born on Date: 2002.02.05 * * Arguments: *regs - a pointer to the saved regster info * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_ipi_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs; old_regs = set_irq_regs(regs); irq_enter(); smp_local_timer_interrupt(); irq_exit(); set_irq_regs(old_regs); } /*==========================================================================* * Name: smp_local_timer_interrupt * * Description: Local timer interrupt handler. It does both profiling and * process statistics/rescheduling. * We do profiling in every local tick, statistics/rescheduling * happen only every 'profiling multiplier' ticks. The default * multiplier is 1 and it can be changed by writing the new * multiplier value into /proc/profile. * * Born on Date: 2002.02.05 * * Arguments: *regs - a pointer to the saved regster info * * Returns: void (cannot fail) * * Original: arch/i386/kernel/apic.c * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * 2003-06-24 hy use per_cpu structure. *==========================================================================*/ void smp_local_timer_interrupt(void) { int user = user_mode(get_irq_regs()); int cpu_id = smp_processor_id(); /* * The profiling function is SMP safe. (nothing can mess * around with "current", and the profiling counters are * updated with atomic operations). This is especially * useful with a profiling multiplier != 1 */ profile_tick(CPU_PROFILING); if (--per_cpu(prof_counter, cpu_id) <= 0) { /* * The multiplier may have changed since the last time we got * to this point as a result of the user writing to * /proc/profile. In this case we need to adjust the APIC * timer accordingly. * * Interrupts are already masked off at this point. */ per_cpu(prof_counter, cpu_id) = per_cpu(prof_multiplier, cpu_id); if (per_cpu(prof_counter, cpu_id) != per_cpu(prof_old_multiplier, cpu_id)) { per_cpu(prof_old_multiplier, cpu_id) = per_cpu(prof_counter, cpu_id); } update_process_times(user); } } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /* Send IPI Routines */ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*==========================================================================* * Name: send_IPI_allbutself * * Description: This routine sends a IPI to all other CPUs in the system. * * Born on Date: 2002.02.05 * * Arguments: ipi_num - Number of IPI * try - 0 : Send IPI certainly. * !0 : The following IPI is not sent when Target CPU * has not received the before IPI. * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ static void send_IPI_allbutself(int ipi_num, int try) { cpumask_t cpumask; cpumask_copy(&cpumask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &cpumask); send_IPI_mask(&cpumask, ipi_num, try); } /*==========================================================================* * Name: send_IPI_mask * * Description: This routine sends a IPI to CPUs in the system. * * Born on Date: 2002.02.05 * * Arguments: cpu_mask - Bitmap of target CPUs logical ID * ipi_num - Number of IPI * try - 0 : Send IPI certainly. * !0 : The following IPI is not sent when Target CPU * has not received the before IPI. * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) { cpumask_t physid_mask, tmp; int cpu_id, phys_id; int num_cpus = num_online_cpus(); if (num_cpus <= 1) /* NO MP */ return; cpumask_and(&tmp, cpumask, cpu_online_mask); BUG_ON(!cpumask_equal(cpumask, &tmp)); cpumask_clear(&physid_mask); for_each_cpu(cpu_id, cpumask) { if ((phys_id = cpu_to_physid(cpu_id)) != -1) cpumask_set_cpu(phys_id, &physid_mask); } send_IPI_mask_phys(&physid_mask, ipi_num, try); } /*==========================================================================* * Name: send_IPI_mask_phys * * Description: This routine sends a IPI to other CPUs in the system. * * Born on Date: 2002.02.05 * * Arguments: cpu_mask - Bitmap of target CPUs physical ID * ipi_num - Number of IPI * try - 0 : Send IPI certainly. * !0 : The following IPI is not sent when Target CPU * has not received the before IPI. * * Returns: IPICRi regster value. * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num, int try) { spinlock_t *ipilock; volatile unsigned long *ipicr_addr; unsigned long ipicr_val; unsigned long my_physid_mask; unsigned long mask = cpumask_bits(physid_mask)[0]; if (mask & ~physids_coerce(phys_cpu_present_map)) BUG(); if (ipi_num >= NR_IPIS || ipi_num < 0) BUG(); mask <<= IPI_SHIFT; ipilock = &ipi_lock[ipi_num]; ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR + (ipi_num << 2)); my_physid_mask = ~(1 << smp_processor_id()); /* * lock ipi_lock[i] * check IPICRi == 0 * write IPICRi (send IPIi) * unlock ipi_lock[i] */ spin_lock(ipilock); __asm__ __volatile__ ( ";; CHECK IPICRi == 0 \n\t" ".fillinsn \n" "1: \n\t" "ld %0, @%1 \n\t" "and %0, %4 \n\t" "beqz %0, 2f \n\t" "bnez %3, 3f \n\t" "bra 1b \n\t" ";; WRITE IPICRi (send IPIi) \n\t" ".fillinsn \n" "2: \n\t" "st %2, @%1 \n\t" ".fillinsn \n" "3: \n\t" : "=&r"(ipicr_val) : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask) : "memory" ); spin_unlock(ipilock); return ipicr_val; }
gpl-2.0
TeamHorizon/android_kernel_htc_endeavoru
kernel/rcupdate.c
365
8807
/* * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2001 * * Authors: Dipankar Sarma <dipankar@in.ibm.com> * Manfred Spraul <manfred@colorfullife.com> * * Based on the original work by Paul McKenney <paulmck@us.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - * http://lse.sourceforge.net/locking/rcupdate.html * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/hardirq.h> #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key rcu_lock_key; struct lockdep_map rcu_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); EXPORT_SYMBOL_GPL(rcu_lock_map); static struct lock_class_key rcu_bh_lock_key; struct lockdep_map rcu_bh_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); EXPORT_SYMBOL_GPL(rcu_bh_lock_map); static struct lock_class_key rcu_sched_lock_key; struct lockdep_map rcu_sched_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); EXPORT_SYMBOL_GPL(rcu_sched_lock_map); #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC int debug_lockdep_rcu_enabled(void) { return rcu_scheduler_active && debug_locks && current->lockdep_recursion == 0; } EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); /** * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? * * Check for bottom half being disabled, which covers both the * CONFIG_PROVE_RCU and not cases. Note that if someone uses * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) * will show the situation. This is useful for debug checks in functions * that require that they be called within an RCU read-side critical * section. * * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. */ int rcu_read_lock_bh_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /* * Awaken the corresponding synchronize_rcu() instance now that a * grace period has elapsed. */ void wakeme_after_rcu(struct rcu_head *head) { struct rcu_synchronize *rcu; rcu = container_of(head, struct rcu_synchronize, head); complete(&rcu->completion); } #ifdef CONFIG_PROVE_RCU /* * wrapper function to avoid #include problems. */ int rcu_my_thread_group_empty(void) { return thread_group_empty(current); } EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); #endif /* #ifdef CONFIG_PROVE_RCU */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD static inline void debug_init_rcu_head(struct rcu_head *head) { debug_object_init(head, &rcuhead_debug_descr); } static inline void debug_rcu_head_free(struct rcu_head *head) { debug_object_free(head, &rcuhead_debug_descr); } /* * fixup_init is called when: * - an active object is initialized */ static int rcuhead_fixup_init(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. * In !PREEMPT configurations, there is no way to tell if we are * in a RCU read-side critical section or not, so we never * attempt any fixup and just print a warning. */ #ifndef CONFIG_PREEMPT WARN_ON_ONCE(1); return 0; #endif if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { WARN_ON_ONCE(1); return 0; } rcu_barrier(); rcu_barrier_sched(); rcu_barrier_bh(); debug_object_init(head, &rcuhead_debug_descr); return 1; default: return 0; } } /* * fixup_activate is called when: * - an active object is activated * - an unknown object is activated (might be a statically initialized object) * Activation is performed internally by call_rcu(). */ static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_NOTAVAILABLE: /* * This is not really a fixup. We just make sure that it is * tracked in the object tracker. */ debug_object_init(head, &rcuhead_debug_descr); debug_object_activate(head, &rcuhead_debug_descr); return 0; case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. * In !PREEMPT configurations, there is no way to tell if we are * in a RCU read-side critical section or not, so we never * attempt any fixup and just print a warning. */ #ifndef CONFIG_PREEMPT WARN_ON_ONCE(1); return 0; #endif if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { WARN_ON_ONCE(1); return 0; } rcu_barrier(); rcu_barrier_sched(); rcu_barrier_bh(); debug_object_activate(head, &rcuhead_debug_descr); return 1; default: return 0; } } /* * fixup_free is called when: * - an active object is freed */ static int rcuhead_fixup_free(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. * In !PREEMPT configurations, there is no way to tell if we are * in a RCU read-side critical section or not, so we never * attempt any fixup and just print a warning. */ #ifndef CONFIG_PREEMPT WARN_ON_ONCE(1); return 0; #endif if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { WARN_ON_ONCE(1); return 0; } rcu_barrier(); rcu_barrier_sched(); rcu_barrier_bh(); debug_object_free(head, &rcuhead_debug_descr); return 1; default: return 0; } } /** * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects * @head: pointer to rcu_head structure to be initialized * * This function informs debugobjects of a new rcu_head structure that * has been allocated as an auto variable on the stack. This function * is not required for rcu_head structures that are statically defined or * that are dynamically allocated on the heap. This function has no * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. */ void init_rcu_head_on_stack(struct rcu_head *head) { debug_object_init_on_stack(head, &rcuhead_debug_descr); } EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); /** * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects * @head: pointer to rcu_head structure to be initialized * * This function informs debugobjects that an on-stack rcu_head structure * is about to go out of scope. As with init_rcu_head_on_stack(), this * function is not required for rcu_head structures that are statically * defined or that are dynamically allocated on the heap. Also as with * init_rcu_head_on_stack(), this function has no effect for * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. */ void destroy_rcu_head_on_stack(struct rcu_head *head) { debug_object_free(head, &rcuhead_debug_descr); } EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); struct debug_obj_descr rcuhead_debug_descr = { .name = "rcu_head", .fixup_init = rcuhead_fixup_init, .fixup_activate = rcuhead_fixup_activate, .fixup_free = rcuhead_fixup_free, }; EXPORT_SYMBOL_GPL(rcuhead_debug_descr); #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
gpl-2.0
sagar846/kernel_athene
drivers/char/tpm/tpm_i2c_stm_st33.c
621
23528
/* * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24 * Copyright (C) 2009, 2010 STMicroelectronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * STMicroelectronics version 1.2.0, Copyright (C) 2010 * STMicroelectronics comes with ABSOLUTELY NO WARRANTY. * This is free software, and you are welcome to redistribute it * under certain conditions. * * @Author: Christophe RICARD tpmsupport@st.com * * @File: tpm_stm_st33_i2c.c * * @Synopsis: * 09/15/2010: First shot driver tpm_tis driver for lpc is used as model. */ #include <linux/pci.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/sysfs.h> #include <linux/gpio.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/slab.h> #include "tpm.h" #include "tpm_i2c_stm_st33.h" enum stm33zp24_access { TPM_ACCESS_VALID = 0x80, TPM_ACCESS_ACTIVE_LOCALITY = 0x20, TPM_ACCESS_REQUEST_PENDING = 0x04, TPM_ACCESS_REQUEST_USE = 0x02, }; enum stm33zp24_status { TPM_STS_VALID = 0x80, TPM_STS_COMMAND_READY = 0x40, TPM_STS_GO = 0x20, TPM_STS_DATA_AVAIL = 0x10, TPM_STS_DATA_EXPECT = 0x08, }; enum stm33zp24_int_flags { TPM_GLOBAL_INT_ENABLE = 0x80, TPM_INTF_CMD_READY_INT = 0x080, TPM_INTF_FIFO_AVALAIBLE_INT = 0x040, TPM_INTF_WAKE_UP_READY_INT = 0x020, TPM_INTF_LOCALITY_CHANGE_INT = 0x004, TPM_INTF_STS_VALID_INT = 0x002, TPM_INTF_DATA_AVAIL_INT = 0x001, }; enum tis_defaults { TIS_SHORT_TIMEOUT = 750, TIS_LONG_TIMEOUT = 2000, }; /* * write8_reg * Send byte to the TIS register according to the ST33ZP24 I2C protocol. * @param: tpm_register, the tpm tis register where the data should be written * @param: tpm_data, the tpm_data to write inside the tpm_register * @param: tpm_size, The length of the data * @return: Returns negative errno, or else the number of bytes written. */ static int write8_reg(struct i2c_client *client, u8 tpm_register, u8 *tpm_data, u16 tpm_size) { struct st33zp24_platform_data *pin_infos; pin_infos = client->dev.platform_data; pin_infos->tpm_i2c_buffer[0][0] = tpm_register; memcpy(&pin_infos->tpm_i2c_buffer[0][1], tpm_data, tpm_size); return i2c_master_send(client, pin_infos->tpm_i2c_buffer[0], tpm_size + 1); } /* write8_reg() */ /* * read8_reg * Recv byte from the TIS register according to the ST33ZP24 I2C protocol. * @param: tpm_register, the tpm tis register where the data should be read * @param: tpm_data, the TPM response * @param: tpm_size, tpm TPM response size to read. * @return: number of byte read successfully: should be one if success. */ static int read8_reg(struct i2c_client *client, u8 tpm_register, u8 *tpm_data, int tpm_size) { u8 status = 0; u8 data; data = TPM_DUMMY_BYTE; status = write8_reg(client, tpm_register, &data, 1); if (status == 2) status = i2c_master_recv(client, tpm_data, tpm_size); return status; } /* read8_reg() */ /* * I2C_WRITE_DATA * Send byte to the TIS register according to the ST33ZP24 I2C protocol. * @param: client, the chip description * @param: tpm_register, the tpm tis register where the data should be written * @param: tpm_data, the tpm_data to write inside the tpm_register * @param: tpm_size, The length of the data * @return: number of byte written successfully: should be one if success. */ #define I2C_WRITE_DATA(client, tpm_register, tpm_data, tpm_size) \ (write8_reg(client, tpm_register | \ TPM_WRITE_DIRECTION, tpm_data, tpm_size)) /* * I2C_READ_DATA * Recv byte from the TIS register according to the ST33ZP24 I2C protocol. * @param: tpm, the chip description * @param: tpm_register, the tpm tis register where the data should be read * @param: tpm_data, the TPM response * @param: tpm_size, tpm TPM response size to read. * @return: number of byte read successfully: should be one if success. */ #define I2C_READ_DATA(client, tpm_register, tpm_data, tpm_size) \ (read8_reg(client, tpm_register, tpm_data, tpm_size)) /* * clear_interruption * clear the TPM interrupt register. * @param: tpm, the chip description */ static void clear_interruption(struct i2c_client *client) { u8 interrupt; I2C_READ_DATA(client, TPM_INT_STATUS, &interrupt, 1); I2C_WRITE_DATA(client, TPM_INT_STATUS, &interrupt, 1); I2C_READ_DATA(client, TPM_INT_STATUS, &interrupt, 1); } /* clear_interruption() */ /* * _wait_for_interrupt_serirq_timeout * @param: tpm, the chip description * @param: timeout, the timeout of the interrupt * @return: the status of the interruption. */ static long _wait_for_interrupt_serirq_timeout(struct tpm_chip *chip, unsigned long timeout) { long status; struct i2c_client *client; struct st33zp24_platform_data *pin_infos; client = (struct i2c_client *)TPM_VPRIV(chip); pin_infos = client->dev.platform_data; status = wait_for_completion_interruptible_timeout( &pin_infos->irq_detection, timeout); if (status > 0) enable_irq(gpio_to_irq(pin_infos->io_serirq)); gpio_direction_input(pin_infos->io_serirq); return status; } /* wait_for_interrupt_serirq_timeout() */ static int wait_for_serirq_timeout(struct tpm_chip *chip, bool condition, unsigned long timeout) { int status = 2; struct i2c_client *client; client = (struct i2c_client *)TPM_VPRIV(chip); status = _wait_for_interrupt_serirq_timeout(chip, timeout); if (!status) { status = -EBUSY; } else { clear_interruption(client); if (condition) status = 1; } return status; } /* * tpm_stm_i2c_cancel, cancel is not implemented. * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h */ static void tpm_stm_i2c_cancel(struct tpm_chip *chip) { struct i2c_client *client; u8 data; client = (struct i2c_client *)TPM_VPRIV(chip); data = TPM_STS_COMMAND_READY; I2C_WRITE_DATA(client, TPM_STS, &data, 1); if (chip->vendor.irq) wait_for_serirq_timeout(chip, 1, chip->vendor.timeout_a); } /* tpm_stm_i2c_cancel() */ /* * tpm_stm_spi_status return the TPM_STS register * @param: chip, the tpm chip description * @return: the TPM_STS register value. */ static u8 tpm_stm_i2c_status(struct tpm_chip *chip) { struct i2c_client *client; u8 data; client = (struct i2c_client *)TPM_VPRIV(chip); I2C_READ_DATA(client, TPM_STS, &data, 1); return data; } /* tpm_stm_i2c_status() */ /* * check_locality if the locality is active * @param: chip, the tpm chip description * @return: the active locality or -EACCESS. */ static int check_locality(struct tpm_chip *chip) { struct i2c_client *client; u8 data; u8 status; client = (struct i2c_client *)TPM_VPRIV(chip); status = I2C_READ_DATA(client, TPM_ACCESS, &data, 1); if (status && (data & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) return chip->vendor.locality; return -EACCES; } /* check_locality() */ /* * request_locality request the TPM locality * @param: chip, the chip description * @return: the active locality or EACCESS. */ static int request_locality(struct tpm_chip *chip) { unsigned long stop; long rc; struct i2c_client *client; u8 data; client = (struct i2c_client *)TPM_VPRIV(chip); if (check_locality(chip) == chip->vendor.locality) return chip->vendor.locality; data = TPM_ACCESS_REQUEST_USE; rc = I2C_WRITE_DATA(client, TPM_ACCESS, &data, 1); if (rc < 0) goto end; if (chip->vendor.irq) { rc = wait_for_serirq_timeout(chip, (check_locality (chip) >= 0), chip->vendor.timeout_a); if (rc > 0) return chip->vendor.locality; } else { stop = jiffies + chip->vendor.timeout_a; do { if (check_locality(chip) >= 0) return chip->vendor.locality; msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); } rc = -EACCES; end: return rc; } /* request_locality() */ /* * release_locality release the active locality * @param: chip, the tpm chip description. */ static void release_locality(struct tpm_chip *chip) { struct i2c_client *client; u8 data; client = (struct i2c_client *)TPM_VPRIV(chip); data = TPM_ACCESS_ACTIVE_LOCALITY; I2C_WRITE_DATA(client, TPM_ACCESS, &data, 1); } /* * get_burstcount return the burstcount address 0x19 0x1A * @param: chip, the chip description * return: the burstcount. */ static int get_burstcount(struct tpm_chip *chip) { unsigned long stop; int burstcnt, status; u8 tpm_reg, temp; struct i2c_client *client = (struct i2c_client *)TPM_VPRIV(chip); stop = jiffies + chip->vendor.timeout_d; do { tpm_reg = TPM_STS + 1; status = I2C_READ_DATA(client, tpm_reg, &temp, 1); if (status < 0) goto end; tpm_reg = tpm_reg + 1; burstcnt = temp; status = I2C_READ_DATA(client, tpm_reg, &temp, 1); if (status < 0) goto end; burstcnt |= temp << 8; if (burstcnt) return burstcnt; msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); end: return -EBUSY; } /* get_burstcount() */ /* * wait_for_stat wait for a TPM_STS value * @param: chip, the tpm chip description * @param: mask, the value mask to wait * @param: timeout, the timeout * @param: queue, the wait queue. * @return: the tpm status, 0 if success, -ETIME if timeout is reached. */ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue) { unsigned long stop; long rc; u8 status; if (chip->vendor.irq) { rc = wait_for_serirq_timeout(chip, ((tpm_stm_i2c_status (chip) & mask) == mask), timeout); if (rc > 0) return 0; } else { stop = jiffies + timeout; do { msleep(TPM_TIMEOUT); status = tpm_stm_i2c_status(chip); if ((status & mask) == mask) return 0; } while (time_before(jiffies, stop)); } return -ETIME; } /* wait_for_stat() */ /* * recv_data receive data * @param: chip, the tpm chip description * @param: buf, the buffer where the data are received * @param: count, the number of data to receive * @return: the number of bytes read from TPM FIFO. */ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) { int size = 0, burstcnt, len; struct i2c_client *client; client = (struct i2c_client *)TPM_VPRIV(chip); while (size < count && wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, chip->vendor.timeout_c, &chip->vendor.read_queue) == 0) { burstcnt = get_burstcount(chip); if (burstcnt < 0) return burstcnt; len = min_t(int, burstcnt, count - size); I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len); size += len; } return size; } /* * tpm_ioserirq_handler the serirq irq handler * @param: irq, the tpm chip description * @param: dev_id, the description of the chip * @return: the status of the handler. */ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id) { struct tpm_chip *chip = dev_id; struct i2c_client *client; struct st33zp24_platform_data *pin_infos; disable_irq_nosync(irq); client = (struct i2c_client *)TPM_VPRIV(chip); pin_infos = client->dev.platform_data; complete(&pin_infos->irq_detection); return IRQ_HANDLED; } /* tpm_ioserirq_handler() */ /* * tpm_stm_i2c_send send TPM commands through the I2C bus. * * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h * @param: buf, the buffer to send. * @param: count, the number of bytes to send. * @return: In case of success the number of bytes sent. * In other case, a < 0 value describing the issue. */ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf, size_t len) { u32 status, i, size; int burstcnt = 0; int ret; u8 data; struct i2c_client *client; if (chip == NULL) return -EBUSY; if (len < TPM_HEADER_SIZE) return -EBUSY; client = (struct i2c_client *)TPM_VPRIV(chip); client->flags = 0; ret = request_locality(chip); if (ret < 0) return ret; status = tpm_stm_i2c_status(chip); if ((status & TPM_STS_COMMAND_READY) == 0) { tpm_stm_i2c_cancel(chip); if (wait_for_stat (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, &chip->vendor.int_queue) < 0) { ret = -ETIME; goto out_err; } } for (i = 0; i < len - 1;) { burstcnt = get_burstcount(chip); if (burstcnt < 0) return burstcnt; size = min_t(int, len - i - 1, burstcnt); ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + i, size); if (ret < 0) goto out_err; i += size; } status = tpm_stm_i2c_status(chip); if ((status & TPM_STS_DATA_EXPECT) == 0) { ret = -EIO; goto out_err; } ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + len - 1, 1); if (ret < 0) goto out_err; status = tpm_stm_i2c_status(chip); if ((status & TPM_STS_DATA_EXPECT) != 0) { ret = -EIO; goto out_err; } data = TPM_STS_GO; I2C_WRITE_DATA(client, TPM_STS, &data, 1); return len; out_err: tpm_stm_i2c_cancel(chip); release_locality(chip); return ret; } /* * tpm_stm_i2c_recv received TPM response through the I2C bus. * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h. * @param: buf, the buffer to store datas. * @param: count, the number of bytes to send. * @return: In case of success the number of bytes received. * In other case, a < 0 value describing the issue. */ static int tpm_stm_i2c_recv(struct tpm_chip *chip, unsigned char *buf, size_t count) { int size = 0; int expected; if (chip == NULL) return -EBUSY; if (count < TPM_HEADER_SIZE) { size = -EIO; goto out; } size = recv_data(chip, buf, TPM_HEADER_SIZE); if (size < TPM_HEADER_SIZE) { dev_err(chip->dev, "Unable to read header\n"); goto out; } expected = be32_to_cpu(*(__be32 *)(buf + 2)); if (expected > count) { size = -EIO; goto out; } size += recv_data(chip, &buf[TPM_HEADER_SIZE], expected - TPM_HEADER_SIZE); if (size < expected) { dev_err(chip->dev, "Unable to read remainder of result\n"); size = -ETIME; goto out; } out: chip->vendor.cancel(chip); release_locality(chip); return size; } static bool tpm_st33_i2c_req_canceled(struct tpm_chip *chip, u8 status) { return (status == TPM_STS_COMMAND_READY); } static const struct file_operations tpm_st33_i2c_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = tpm_read, .write = tpm_write, .open = tpm_open, .release = tpm_release, }; static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static struct attribute *stm_tpm_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, &dev_attr_enabled.attr, &dev_attr_active.attr, &dev_attr_owned.attr, &dev_attr_temp_deactivated.attr, &dev_attr_caps.attr, &dev_attr_cancel.attr, NULL, }; static struct attribute_group stm_tpm_attr_grp = { .attrs = stm_tpm_attrs }; static struct tpm_vendor_specific st_i2c_tpm = { .send = tpm_stm_i2c_send, .recv = tpm_stm_i2c_recv, .cancel = tpm_stm_i2c_cancel, .status = tpm_stm_i2c_status, .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = tpm_st33_i2c_req_canceled, .attr_group = &stm_tpm_attr_grp, .miscdev = {.fops = &tpm_st33_i2c_fops,}, }; static int interrupts; module_param(interrupts, int, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); static int power_mgt = 1; module_param(power_mgt, int, 0444); MODULE_PARM_DESC(power_mgt, "Power Management"); /* * tpm_st33_i2c_probe initialize the TPM device * @param: client, the i2c_client drescription (TPM I2C description). * @param: id, the i2c_device_id struct. * @return: 0 in case of success. * -1 in other case. */ static int tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err; u8 intmask; struct tpm_chip *chip; struct st33zp24_platform_data *platform_data; if (client == NULL) { pr_info("%s: i2c client is NULL. Device not accessible.\n", __func__); err = -ENODEV; goto end; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_info(&client->dev, "client not i2c capable\n"); err = -ENODEV; goto end; } chip = tpm_register_hardware(&client->dev, &st_i2c_tpm); if (!chip) { dev_info(&client->dev, "fail chip\n"); err = -ENODEV; goto end; } platform_data = client->dev.platform_data; if (!platform_data) { dev_info(&client->dev, "chip not available\n"); err = -ENODEV; goto _tpm_clean_answer; } platform_data->tpm_i2c_buffer[0] = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); if (platform_data->tpm_i2c_buffer[0] == NULL) { err = -ENOMEM; goto _tpm_clean_answer; } platform_data->tpm_i2c_buffer[1] = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); if (platform_data->tpm_i2c_buffer[1] == NULL) { err = -ENOMEM; goto _tpm_clean_response1; } TPM_VPRIV(chip) = client; chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->vendor.locality = LOCALITY0; if (power_mgt) { err = gpio_request(platform_data->io_lpcpd, "TPM IO_LPCPD"); if (err) goto _gpio_init1; gpio_set_value(platform_data->io_lpcpd, 1); } if (interrupts) { init_completion(&platform_data->irq_detection); if (request_locality(chip) != LOCALITY0) { err = -ENODEV; goto _tpm_clean_response2; } err = gpio_request(platform_data->io_serirq, "TPM IO_SERIRQ"); if (err) goto _gpio_init2; clear_interruption(client); err = request_irq(gpio_to_irq(platform_data->io_serirq), &tpm_ioserirq_handler, IRQF_TRIGGER_HIGH, "TPM SERIRQ management", chip); if (err < 0) { dev_err(chip->dev , "TPM SERIRQ signals %d not available\n", gpio_to_irq(platform_data->io_serirq)); goto _irq_set; } err = I2C_READ_DATA(client, TPM_INT_ENABLE, &intmask, 1); if (err < 0) goto _irq_set; intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_FIFO_AVALAIBLE_INT | TPM_INTF_WAKE_UP_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | TPM_INTF_DATA_AVAIL_INT; err = I2C_WRITE_DATA(client, TPM_INT_ENABLE, &intmask, 1); if (err < 0) goto _irq_set; intmask = TPM_GLOBAL_INT_ENABLE; err = I2C_WRITE_DATA(client, (TPM_INT_ENABLE + 3), &intmask, 1); if (err < 0) goto _irq_set; err = I2C_READ_DATA(client, TPM_INT_STATUS, &intmask, 1); if (err < 0) goto _irq_set; chip->vendor.irq = interrupts; tpm_gen_interrupt(chip); } tpm_get_timeouts(chip); i2c_set_clientdata(client, chip); dev_info(chip->dev, "TPM I2C Initialized\n"); return 0; _irq_set: free_irq(gpio_to_irq(platform_data->io_serirq), (void *)chip); _gpio_init2: if (interrupts) gpio_free(platform_data->io_serirq); _gpio_init1: if (power_mgt) gpio_free(platform_data->io_lpcpd); _tpm_clean_response2: kzfree(platform_data->tpm_i2c_buffer[1]); platform_data->tpm_i2c_buffer[1] = NULL; _tpm_clean_response1: kzfree(platform_data->tpm_i2c_buffer[0]); platform_data->tpm_i2c_buffer[0] = NULL; _tpm_clean_answer: tpm_remove_hardware(chip->dev); end: pr_info("TPM I2C initialisation fail\n"); return err; } /* * tpm_st33_i2c_remove remove the TPM device * @param: client, the i2c_client drescription (TPM I2C description). clear_bit(0, &chip->is_open); * @return: 0 in case of success. */ static int tpm_st33_i2c_remove(struct i2c_client *client) { struct tpm_chip *chip = (struct tpm_chip *)i2c_get_clientdata(client); struct st33zp24_platform_data *pin_infos = ((struct i2c_client *)TPM_VPRIV(chip))->dev.platform_data; if (pin_infos != NULL) { free_irq(pin_infos->io_serirq, chip); gpio_free(pin_infos->io_serirq); gpio_free(pin_infos->io_lpcpd); tpm_remove_hardware(chip->dev); if (pin_infos->tpm_i2c_buffer[1] != NULL) { kzfree(pin_infos->tpm_i2c_buffer[1]); pin_infos->tpm_i2c_buffer[1] = NULL; } if (pin_infos->tpm_i2c_buffer[0] != NULL) { kzfree(pin_infos->tpm_i2c_buffer[0]); pin_infos->tpm_i2c_buffer[0] = NULL; } } return 0; } #ifdef CONFIG_PM_SLEEP /* * tpm_st33_i2c_pm_suspend suspend the TPM device * Added: Work around when suspend and no tpm application is running, suspend * may fail because chip->data_buffer is not set (only set in tpm_open in Linux * TPM core) * @param: client, the i2c_client drescription (TPM I2C description). * @param: mesg, the power management message. * @return: 0 in case of success. */ static int tpm_st33_i2c_pm_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct st33zp24_platform_data *pin_infos = dev->platform_data; int ret = 0; if (power_mgt) { gpio_set_value(pin_infos->io_lpcpd, 0); } else { if (chip->data_buffer == NULL) chip->data_buffer = pin_infos->tpm_i2c_buffer[0]; ret = tpm_pm_suspend(dev); } return ret; } /* tpm_st33_i2c_suspend() */ /* * tpm_st33_i2c_pm_resume resume the TPM device * @param: client, the i2c_client drescription (TPM I2C description). * @return: 0 in case of success. */ static int tpm_st33_i2c_pm_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct st33zp24_platform_data *pin_infos = dev->platform_data; int ret = 0; if (power_mgt) { gpio_set_value(pin_infos->io_lpcpd, 1); ret = wait_for_serirq_timeout(chip, (chip->vendor.status(chip) & TPM_STS_VALID) == TPM_STS_VALID, chip->vendor.timeout_b); } else { if (chip->data_buffer == NULL) chip->data_buffer = pin_infos->tpm_i2c_buffer[0]; ret = tpm_pm_resume(dev); if (!ret) tpm_do_selftest(chip); } return ret; } /* tpm_st33_i2c_pm_resume() */ #endif static const struct i2c_device_id tpm_st33_i2c_id[] = { {TPM_ST33_I2C, 0}, {} }; MODULE_DEVICE_TABLE(i2c, tpm_st33_i2c_id); static SIMPLE_DEV_PM_OPS(tpm_st33_i2c_ops, tpm_st33_i2c_pm_suspend, tpm_st33_i2c_pm_resume); static struct i2c_driver tpm_st33_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = TPM_ST33_I2C, .pm = &tpm_st33_i2c_ops, }, .probe = tpm_st33_i2c_probe, .remove = tpm_st33_i2c_remove, .id_table = tpm_st33_i2c_id }; module_i2c_driver(tpm_st33_i2c_driver); MODULE_AUTHOR("Christophe Ricard (tpmsupport@st.com)"); MODULE_DESCRIPTION("STM TPM I2C ST33 Driver"); MODULE_VERSION("1.2.0"); MODULE_LICENSE("GPL");
gpl-2.0
networkosnet/linux
drivers/staging/rtl8712/rtl871x_recv.c
621
20869
/****************************************************************************** * rtl871x_recv.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL871X_RECV_C_ #include <linux/ip.h> #include <linux/slab.h> #include <linux/if_ether.h> #include <linux/kmemleak.h> #include <linux/etherdevice.h> #include "osdep_service.h" #include "drv_types.h" #include "recv_osdep.h" #include "mlme_osdep.h" #include "ethernet.h" #include "usb_ops.h" #include "wifi.h" static const u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37}; /* Datagram Delivery Protocol */ static const u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3}; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static const u8 bridge_tunnel_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8}; /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static const u8 rfc1042_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; void _r8712_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv) { memset((u8 *)psta_recvpriv, 0, sizeof(struct sta_recv_priv)); spin_lock_init(&psta_recvpriv->lock); _init_queue(&psta_recvpriv->defrag_q); } sint _r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter) { sint i; union recv_frame *precvframe; memset((unsigned char *)precvpriv, 0, sizeof(struct recv_priv)); spin_lock_init(&precvpriv->lock); _init_queue(&precvpriv->free_recv_queue); _init_queue(&precvpriv->recv_pending_queue); precvpriv->adapter = padapter; precvpriv->free_recvframe_cnt = NR_RECVFRAME; precvpriv->pallocated_frame_buf = kmalloc(NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ, GFP_ATOMIC); if (precvpriv->pallocated_frame_buf == NULL) return _FAIL; kmemleak_not_leak(precvpriv->pallocated_frame_buf); memset(precvpriv->pallocated_frame_buf, 0, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ); precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf + RXFRAME_ALIGN_SZ - ((addr_t)(precvpriv->pallocated_frame_buf) & (RXFRAME_ALIGN_SZ-1)); precvframe = (union recv_frame *)precvpriv->precv_frame_buf; for (i = 0; i < NR_RECVFRAME; i++) { INIT_LIST_HEAD(&(precvframe->u.list)); list_add_tail(&(precvframe->u.list), &(precvpriv->free_recv_queue.queue)); r8712_os_recv_resource_alloc(padapter, precvframe); precvframe->u.hdr.adapter = padapter; precvframe++; } precvpriv->rx_pending_cnt = 1; return r8712_init_recv_priv(precvpriv, padapter); } void _r8712_free_recv_priv(struct recv_priv *precvpriv) { kfree(precvpriv->pallocated_frame_buf); r8712_free_recv_priv(precvpriv); } union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue) { unsigned long irqL; union recv_frame *precvframe; struct list_head *plist, *phead; struct _adapter *padapter; struct recv_priv *precvpriv; spin_lock_irqsave(&pfree_recv_queue->lock, irqL); if (list_empty(&pfree_recv_queue->queue)) precvframe = NULL; else { phead = &pfree_recv_queue->queue; plist = phead->next; precvframe = LIST_CONTAINOR(plist, union recv_frame, u); list_del_init(&precvframe->u.hdr.list); padapter = precvframe->u.hdr.adapter; if (padapter != NULL) { precvpriv = &padapter->recvpriv; if (pfree_recv_queue == &precvpriv->free_recv_queue) precvpriv->free_recvframe_cnt--; } } spin_unlock_irqrestore(&pfree_recv_queue->lock, irqL); return precvframe; } /* caller : defrag; recvframe_chk_defrag in recv_thread (passive) pframequeue: defrag_queue : will be accessed in recv_thread (passive) using spin_lock to protect */ void r8712_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue) { union recv_frame *precvframe; struct list_head *plist, *phead; spin_lock(&pframequeue->lock); phead = &pframequeue->queue; plist = phead->next; while (end_of_queue_search(phead, plist) == false) { precvframe = LIST_CONTAINOR(plist, union recv_frame, u); plist = plist->next; r8712_free_recvframe(precvframe, pfree_recv_queue); } spin_unlock(&pframequeue->lock); } sint r8712_recvframe_chkmic(struct _adapter *adapter, union recv_frame *precvframe) { sint i, res = _SUCCESS; u32 datalen; u8 miccode[8]; u8 bmic_err = false; u8 *pframe, *payload, *pframemic; u8 *mickey, idx, *iv; struct sta_info *stainfo; struct rx_pkt_attrib *prxattrib = &precvframe->u.hdr.attrib; struct security_priv *psecuritypriv = &adapter->securitypriv; stainfo = r8712_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]); if (prxattrib->encrypt == _TKIP_) { /* calculate mic code */ if (stainfo != NULL) { if (IS_MCAST(prxattrib->ra)) { iv = precvframe->u.hdr.rx_data + prxattrib->hdrlen; idx = iv[3]; mickey = &psecuritypriv->XGrprxmickey[(((idx >> 6) & 0x3)) - 1].skey[0]; if (psecuritypriv->binstallGrpkey == false) return _FAIL; } else mickey = &stainfo->tkiprxmickey.skey[0]; /*icv_len included the mic code*/ datalen = precvframe->u.hdr.len - prxattrib->hdrlen - prxattrib->iv_len - prxattrib->icv_len - 8; pframe = precvframe->u.hdr.rx_data; payload = pframe + prxattrib->hdrlen + prxattrib->iv_len; seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0], (unsigned char)prxattrib->priority); pframemic = payload + datalen; bmic_err = false; for (i = 0; i < 8; i++) { if (miccode[i] != *(pframemic + i)) bmic_err = true; } if (bmic_err == true) { if (prxattrib->bdecrypted == true) r8712_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra)); res = _FAIL; } else { /* mic checked ok */ if ((psecuritypriv->bcheck_grpkey == false) && (IS_MCAST(prxattrib->ra) == true)) psecuritypriv->bcheck_grpkey = true; } recvframe_pull_tail(precvframe, 8); } } return res; } /* decrypt and set the ivlen,icvlen of the recv_frame */ union recv_frame *r8712_decryptor(struct _adapter *padapter, union recv_frame *precv_frame) { struct rx_pkt_attrib *prxattrib = &precv_frame->u.hdr.attrib; struct security_priv *psecuritypriv = &padapter->securitypriv; union recv_frame *return_packet = precv_frame; if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0) || (psecuritypriv->sw_decrypt == true))) { psecuritypriv->hw_decrypted = false; switch (prxattrib->encrypt) { case _WEP40_: case _WEP104_: r8712_wep_decrypt(padapter, (u8 *)precv_frame); break; case _TKIP_: r8712_tkip_decrypt(padapter, (u8 *)precv_frame); break; case _AES_: r8712_aes_decrypt(padapter, (u8 *)precv_frame); break; default: break; } } else if (prxattrib->bdecrypted == 1) psecuritypriv->hw_decrypted = true; return return_packet; } /*###set the security information in the recv_frame */ union recv_frame *r8712_portctrl(struct _adapter *adapter, union recv_frame *precv_frame) { u8 *psta_addr, *ptr; uint auth_alg; struct recv_frame_hdr *pfhdr; struct sta_info *psta; struct sta_priv *pstapriv; union recv_frame *prtnframe; u16 ether_type; pstapriv = &adapter->stapriv; ptr = get_recvframe_data(precv_frame); pfhdr = &precv_frame->u.hdr; psta_addr = pfhdr->attrib.ta; psta = r8712_get_stainfo(pstapriv, psta_addr); auth_alg = adapter->securitypriv.AuthAlgrthm; if (auth_alg == 2) { /* get ether_type */ ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE; memcpy(&ether_type, ptr, 2); ether_type = ntohs((unsigned short)ether_type); if ((psta != NULL) && (psta->ieee8021x_blocked)) { /* blocked * only accept EAPOL frame */ if (ether_type == 0x888e) prtnframe = precv_frame; else { /*free this frame*/ r8712_free_recvframe(precv_frame, &adapter->recvpriv.free_recv_queue); prtnframe = NULL; } } else { /* allowed * check decryption status, and decrypt the * frame if needed */ prtnframe = precv_frame; /* check is the EAPOL frame or not (Rekey) */ if (ether_type == 0x888e) { /* check Rekey */ prtnframe = precv_frame; } } } else prtnframe = precv_frame; return prtnframe; } static sint recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache) { sint tid = precv_frame->u.hdr.attrib.priority; u16 seq_ctrl = ((precv_frame->u.hdr.attrib.seq_num&0xffff) << 4) | (precv_frame->u.hdr.attrib.frag_num & 0xf); if (tid > 15) return _FAIL; if (seq_ctrl == prxcache->tid_rxseq[tid]) return _FAIL; prxcache->tid_rxseq[tid] = seq_ctrl; return _SUCCESS; } static sint sta2sta_data_frame(struct _adapter *adapter, union recv_frame *precv_frame, struct sta_info **psta) { u8 *ptr = precv_frame->u.hdr.rx_data; sint ret = _SUCCESS; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; u8 *mybssid = get_bssid(pmlmepriv); u8 *myhwaddr = myid(&adapter->eeprompriv); u8 *sta_addr = NULL; sint bmcast = IS_MCAST(pattrib->dst); if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) { /* filter packets that SA is myself or multicast or broadcast */ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) return _FAIL; if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) return _FAIL; if (is_zero_ether_addr(pattrib->bssid) || is_zero_ether_addr(mybssid) || (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) return _FAIL; sta_addr = pattrib->src; } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) { /* For Station mode, sa and bssid should always be BSSID, * and DA is my mac-address */ if (memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) return _FAIL; sta_addr = pattrib->bssid; } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) { if (bmcast) { /* For AP mode, if DA == MCAST, then BSSID should * be also MCAST */ if (!IS_MCAST(pattrib->bssid)) return _FAIL; } else { /* not mc-frame */ /* For AP mode, if DA is non-MCAST, then it must be * BSSID, and bssid == BSSID */ if (memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) return _FAIL; sta_addr = pattrib->src; } } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) { memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN); memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN); memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN); memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); memcpy(pattrib->ta, pattrib->src, ETH_ALEN); sta_addr = mybssid; } else ret = _FAIL; if (bmcast) *psta = r8712_get_bcmc_stainfo(adapter); else *psta = r8712_get_stainfo(pstapriv, sta_addr); /* get ap_info */ if (*psta == NULL) { if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) adapter->mppriv.rx_pktloss++; return _FAIL; } return ret; } static sint ap2sta_data_frame(struct _adapter *adapter, union recv_frame *precv_frame, struct sta_info **psta) { u8 *ptr = precv_frame->u.hdr.rx_data; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; u8 *mybssid = get_bssid(pmlmepriv); u8 *myhwaddr = myid(&adapter->eeprompriv); sint bmcast = IS_MCAST(pattrib->dst); if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) && (check_fwstate(pmlmepriv, _FW_LINKED) == true)) { /* if NULL-frame, drop packet */ if ((GetFrameSubType(ptr)) == WIFI_DATA_NULL) return _FAIL; /* drop QoS-SubType Data, including QoS NULL, * excluding QoS-Data */ if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) { if (GetFrameSubType(ptr) & (BIT(4) | BIT(5) | BIT(6))) return _FAIL; } /* filter packets that SA is myself or multicast or broadcast */ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) return _FAIL; /* da should be for me */ if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) return _FAIL; /* check BSSID */ if (is_zero_ether_addr(pattrib->bssid) || is_zero_ether_addr(mybssid) || (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) return _FAIL; if (bmcast) *psta = r8712_get_bcmc_stainfo(adapter); else *psta = r8712_get_stainfo(pstapriv, pattrib->bssid); if (*psta == NULL) return _FAIL; } else if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) && (check_fwstate(pmlmepriv, _FW_LINKED) == true)) { memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN); memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN); memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN); memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); memcpy(pattrib->ta, pattrib->src, ETH_ALEN); memcpy(pattrib->bssid, mybssid, ETH_ALEN); *psta = r8712_get_stainfo(pstapriv, pattrib->bssid); if (*psta == NULL) return _FAIL; } else return _FAIL; return _SUCCESS; } static sint sta2ap_data_frame(struct _adapter *adapter, union recv_frame *precv_frame, struct sta_info **psta) { struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; unsigned char *mybssid = get_bssid(pmlmepriv); if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) { /* For AP mode, if DA is non-MCAST, then it must be BSSID, * and bssid == BSSID * For AP mode, RA=BSSID, TX=STA(SRC_ADDR), A3=DST_ADDR */ if (memcmp(pattrib->bssid, mybssid, ETH_ALEN)) return _FAIL; *psta = r8712_get_stainfo(pstapriv, pattrib->src); if (*psta == NULL) return _FAIL; } return _SUCCESS; } static sint validate_recv_ctrl_frame(struct _adapter *adapter, union recv_frame *precv_frame) { return _FAIL; } static sint validate_recv_mgnt_frame(struct _adapter *adapter, union recv_frame *precv_frame) { return _FAIL; } static sint validate_recv_data_frame(struct _adapter *adapter, union recv_frame *precv_frame) { int res; u8 bretry; u8 *psa, *pda, *pbssid; struct sta_info *psta = NULL; u8 *ptr = precv_frame->u.hdr.rx_data; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; struct security_priv *psecuritypriv = &adapter->securitypriv; bretry = GetRetry(ptr); pda = get_da(ptr); psa = get_sa(ptr); pbssid = get_hdr_bssid(ptr); if (pbssid == NULL) return _FAIL; memcpy(pattrib->dst, pda, ETH_ALEN); memcpy(pattrib->src, psa, ETH_ALEN); memcpy(pattrib->bssid, pbssid, ETH_ALEN); switch (pattrib->to_fr_ds) { case 0: memcpy(pattrib->ra, pda, ETH_ALEN); memcpy(pattrib->ta, psa, ETH_ALEN); res = sta2sta_data_frame(adapter, precv_frame, &psta); break; case 1: memcpy(pattrib->ra, pda, ETH_ALEN); memcpy(pattrib->ta, pbssid, ETH_ALEN); res = ap2sta_data_frame(adapter, precv_frame, &psta); break; case 2: memcpy(pattrib->ra, pbssid, ETH_ALEN); memcpy(pattrib->ta, psa, ETH_ALEN); res = sta2ap_data_frame(adapter, precv_frame, &psta); break; case 3: memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN); memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN); return _FAIL; default: return _FAIL; } if (res == _FAIL) return _FAIL; if (psta == NULL) return _FAIL; precv_frame->u.hdr.psta = psta; pattrib->amsdu = 0; /* parsing QC field */ if (pattrib->qos == 1) { pattrib->priority = GetPriority((ptr + 24)); pattrib->ack_policy = GetAckpolicy((ptr + 24)); pattrib->amsdu = GetAMsdu((ptr + 24)); pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 32 : 26; } else { pattrib->priority = 0; pattrib->hdrlen = (pattrib->to_fr_ds == 3) ? 30 : 24; } if (pattrib->order)/*HT-CTRL 11n*/ pattrib->hdrlen += 4; precv_frame->u.hdr.preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority]; /* decache, drop duplicate recv packets */ if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) return _FAIL; if (pattrib->privacy) { GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, IS_MCAST(pattrib->ra)); SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt); } else { pattrib->encrypt = 0; pattrib->iv_len = pattrib->icv_len = 0; } return _SUCCESS; } sint r8712_validate_recv_frame(struct _adapter *adapter, union recv_frame *precv_frame) { /*shall check frame subtype, to / from ds, da, bssid */ /*then call check if rx seq/frag. duplicated.*/ u8 type; u8 subtype; sint retval = _SUCCESS; struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib; u8 *ptr = precv_frame->u.hdr.rx_data; u8 ver = (unsigned char)(*ptr) & 0x3; /*add version chk*/ if (ver != 0) return _FAIL; type = GetFrameType(ptr); subtype = GetFrameSubType(ptr); /*bit(7)~bit(2)*/ pattrib->to_fr_ds = get_tofr_ds(ptr); pattrib->frag_num = GetFragNum(ptr); pattrib->seq_num = GetSequence(ptr); pattrib->pw_save = GetPwrMgt(ptr); pattrib->mfrag = GetMFrag(ptr); pattrib->mdata = GetMData(ptr); pattrib->privacy = GetPrivacy(ptr); pattrib->order = GetOrder(ptr); switch (type) { case WIFI_MGT_TYPE: /*mgnt*/ retval = validate_recv_mgnt_frame(adapter, precv_frame); break; case WIFI_CTRL_TYPE:/*ctrl*/ retval = validate_recv_ctrl_frame(adapter, precv_frame); break; case WIFI_DATA_TYPE: /*data*/ pattrib->qos = (subtype & BIT(7)) ? 1 : 0; retval = validate_recv_data_frame(adapter, precv_frame); break; default: return _FAIL; } return retval; } sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe) { /*remove the wlanhdr and add the eth_hdr*/ sint rmv_len; u16 len; u8 bsnaphdr; u8 *psnap_type; struct ieee80211_snap_hdr *psnap; struct _adapter *adapter = precvframe->u.hdr.adapter; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; u8 *ptr = get_recvframe_data(precvframe); /*point to frame_ctrl field*/ struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib; if (pattrib->encrypt) recvframe_pull_tail(precvframe, pattrib->icv_len); psnap = (struct ieee80211_snap_hdr *)(ptr + pattrib->hdrlen + pattrib->iv_len); psnap_type = ptr + pattrib->hdrlen + pattrib->iv_len + SNAP_SIZE; /* convert hdr + possible LLC headers into Ethernet header */ if ((!memcmp(psnap, (void *)rfc1042_header, SNAP_SIZE) && (memcmp(psnap_type, (void *)SNAP_ETH_TYPE_IPX, 2)) && (memcmp(psnap_type, (void *)SNAP_ETH_TYPE_APPLETALK_AARP, 2))) || !memcmp(psnap, (void *)bridge_tunnel_header, SNAP_SIZE)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ bsnaphdr = true; } else { /* Leave Ethernet header part of hdr and full payload */ bsnaphdr = false; } rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0); len = precvframe->u.hdr.len - rmv_len; if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) { ptr += rmv_len; *ptr = 0x87; *(ptr+1) = 0x12; /* append rx status for mp test packets */ ptr = recvframe_pull(precvframe, (rmv_len - sizeof(struct ethhdr) + 2) - 24); memcpy(ptr, get_rxmem(precvframe), 24); ptr += 24; } else ptr = recvframe_pull(precvframe, (rmv_len - sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0))); memcpy(ptr, pattrib->dst, ETH_ALEN); memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN); if (!bsnaphdr) { len = htons(len); memcpy(ptr + 12, &len, 2); } return _SUCCESS; } s32 r8712_recv_entry(union recv_frame *precvframe) { struct _adapter *padapter; struct recv_priv *precvpriv; s32 ret = _SUCCESS; padapter = precvframe->u.hdr.adapter; precvpriv = &(padapter->recvpriv); padapter->ledpriv.LedControlHandler(padapter, LED_CTL_RX); ret = recv_func(padapter, precvframe); if (ret == _FAIL) goto _recv_entry_drop; precvpriv->rx_pkts++; precvpriv->rx_bytes += (uint)(precvframe->u.hdr.rx_tail - precvframe->u.hdr.rx_data); return ret; _recv_entry_drop: precvpriv->rx_drop++; padapter->mppriv.rx_pktloss = precvpriv->rx_drop; return ret; }
gpl-2.0
PaoloW8/android_kernel_nubia_nx505j
drivers/gpu/drm/nouveau/nvd0_display.c
1901
58041
/* * Copyright 2011 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <linux/dma-mapping.h> #include "drmP.h" #include "drm_crtc_helper.h" #include "nouveau_drv.h" #include "nouveau_connector.h" #include "nouveau_encoder.h" #include "nouveau_crtc.h" #include "nouveau_dma.h" #include "nouveau_fb.h" #include "nv50_display.h" #define EVO_DMA_NR 9 #define EVO_MASTER (0x00) #define EVO_FLIP(c) (0x01 + (c)) #define EVO_OVLY(c) (0x05 + (c)) #define EVO_OIMM(c) (0x09 + (c)) #define EVO_CURS(c) (0x0d + (c)) /* offsets in shared sync bo of various structures */ #define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) #define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) #define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00) #define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10) struct evo { int idx; dma_addr_t handle; u32 *ptr; struct { u32 offset; u16 value; } sem; }; struct nvd0_display { struct nouveau_gpuobj *mem; struct nouveau_bo *sync; struct evo evo[9]; struct tasklet_struct tasklet; u32 modeset; }; static struct nvd0_display * nvd0_display(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; return dev_priv->engine.display.priv; } static struct drm_crtc * nvd0_display_crtc_get(struct drm_encoder *encoder) { return nouveau_encoder(encoder)->crtc; } /****************************************************************************** * EVO channel helpers *****************************************************************************/ static inline int evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data) { int ret = 0; nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001); nv_wr32(dev, 0x610704 + (id * 0x10), data); nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd); if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000)) ret = -EBUSY; nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000); return ret; } static u32 * evo_wait(struct drm_device *dev, int id, int nr) { struct nvd0_display *disp = nvd0_display(dev); u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4; if (put + nr >= (PAGE_SIZE / 4)) { disp->evo[id].ptr[put] = 0x20000000; nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000); if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) { NV_ERROR(dev, "evo %d dma stalled\n", id); return NULL; } put = 0; } if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put); return disp->evo[id].ptr + put; } static void evo_kick(u32 *push, struct drm_device *dev, int id) { struct nvd0_display *disp = nvd0_display(dev); if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) { u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2; u32 *cur = disp->evo[id].ptr + curp; while (cur < push) NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++); NV_INFO(dev, "Evo%d: %p KICK!\n", id, push); } nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2); } #define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) #define evo_data(p,d) *((p)++) = (d) static int evo_init_dma(struct drm_device *dev, int ch) { struct nvd0_display *disp = nvd0_display(dev); u32 flags; flags = 0x00000000; if (ch == EVO_MASTER) flags |= 0x01000000; nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3); nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000); nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001); nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010); nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000); nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags); if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) { NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch, nv_rd32(dev, 0x610490 + (ch * 0x0010))); return -EBUSY; } nv_mask(dev, 0x610090, (1 << ch), (1 << ch)); nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch)); return 0; } static void evo_fini_dma(struct drm_device *dev, int ch) { if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010)) return; nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000); nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000); nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000); nv_mask(dev, 0x610090, (1 << ch), 0x00000000); nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000); } static inline void evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data) { nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data); } static int evo_init_pio(struct drm_device *dev, int ch) { nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001); if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) { NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch, nv_rd32(dev, 0x610490 + (ch * 0x0010))); return -EBUSY; } nv_mask(dev, 0x610090, (1 << ch), (1 << ch)); nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch)); return 0; } static void evo_fini_pio(struct drm_device *dev, int ch) { if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001)) return; nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010); nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000); nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000); nv_mask(dev, 0x610090, (1 << ch), 0x00000000); nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000); } static bool evo_sync_wait(void *data) { return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000; } static int evo_sync(struct drm_device *dev, int ch) { struct nvd0_display *disp = nvd0_display(dev); u32 *push = evo_wait(dev, ch, 8); if (push) { nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000); evo_mthd(push, 0x0084, 1); evo_data(push, 0x80000000 | EVO_MAST_NTFY); evo_mthd(push, 0x0080, 2); evo_data(push, 0x00000000); evo_data(push, 0x00000000); evo_kick(push, dev, ch); if (nv_wait_cb(dev, evo_sync_wait, disp->sync)) return 0; } return -EBUSY; } /****************************************************************************** * Page flipping channel *****************************************************************************/ struct nouveau_bo * nvd0_display_crtc_sema(struct drm_device *dev, int crtc) { return nvd0_display(dev)->sync; } void nvd0_display_flip_stop(struct drm_crtc *crtc) { struct nvd0_display *disp = nvd0_display(crtc->dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)]; u32 *push; push = evo_wait(crtc->dev, evo->idx, 8); if (push) { evo_mthd(push, 0x0084, 1); evo_data(push, 0x00000000); evo_mthd(push, 0x0094, 1); evo_data(push, 0x00000000); evo_mthd(push, 0x00c0, 1); evo_data(push, 0x00000000); evo_mthd(push, 0x0080, 1); evo_data(push, 0x00000000); evo_kick(push, crtc->dev, evo->idx); } } int nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct nouveau_channel *chan, u32 swap_interval) { struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); struct nvd0_display *disp = nvd0_display(crtc->dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)]; u64 offset; u32 *push; int ret; evo_sync(crtc->dev, EVO_MASTER); swap_interval <<= 4; if (swap_interval == 0) swap_interval |= 0x100; push = evo_wait(crtc->dev, evo->idx, 128); if (unlikely(push == NULL)) return -EBUSY; /* synchronise with the rendering channel, if necessary */ if (likely(chan)) { ret = RING_SPACE(chan, 10); if (ret) return ret; offset = chan->dispc_vma[nv_crtc->index].offset; offset += evo->sem.offset; BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); OUT_RING (chan, upper_32_bits(offset)); OUT_RING (chan, lower_32_bits(offset)); OUT_RING (chan, 0xf00d0000 | evo->sem.value); OUT_RING (chan, 0x1002); BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); OUT_RING (chan, upper_32_bits(offset)); OUT_RING (chan, lower_32_bits(offset ^ 0x10)); OUT_RING (chan, 0x74b1e000); OUT_RING (chan, 0x1001); FIRE_RING (chan); } else { nouveau_bo_wr32(disp->sync, evo->sem.offset / 4, 0xf00d0000 | evo->sem.value); evo_sync(crtc->dev, EVO_MASTER); } /* queue the flip */ evo_mthd(push, 0x0100, 1); evo_data(push, 0xfffe0000); evo_mthd(push, 0x0084, 1); evo_data(push, swap_interval); if (!(swap_interval & 0x00000100)) { evo_mthd(push, 0x00e0, 1); evo_data(push, 0x40000000); } evo_mthd(push, 0x0088, 4); evo_data(push, evo->sem.offset); evo_data(push, 0xf00d0000 | evo->sem.value); evo_data(push, 0x74b1e000); evo_data(push, NvEvoSync); evo_mthd(push, 0x00a0, 2); evo_data(push, 0x00000000); evo_data(push, 0x00000000); evo_mthd(push, 0x00c0, 1); evo_data(push, nv_fb->r_dma); evo_mthd(push, 0x0110, 2); evo_data(push, 0x00000000); evo_data(push, 0x00000000); evo_mthd(push, 0x0400, 5); evo_data(push, nv_fb->nvbo->bo.offset >> 8); evo_data(push, 0); evo_data(push, (fb->height << 16) | fb->width); evo_data(push, nv_fb->r_pitch); evo_data(push, nv_fb->r_format); evo_mthd(push, 0x0080, 1); evo_data(push, 0x00000000); evo_kick(push, crtc->dev, evo->idx); evo->sem.offset ^= 0x10; evo->sem.value++; return 0; } /****************************************************************************** * CRTC *****************************************************************************/ static int nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) { struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private; struct drm_device *dev = nv_crtc->base.dev; struct nouveau_connector *nv_connector; struct drm_connector *connector; u32 *push, mode = 0x00; u32 mthd; nv_connector = nouveau_crtc_connector_get(nv_crtc); connector = &nv_connector->base; if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) { if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3) mode = DITHERING_MODE_DYNAMIC2X2; } else { mode = nv_connector->dithering_mode; } if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) { if (connector->display_info.bpc >= 8) mode |= DITHERING_DEPTH_8BPC; } else { mode |= nv_connector->dithering_depth; } if (dev_priv->card_type < NV_E0) mthd = 0x0490 + (nv_crtc->index * 0x0300); else mthd = 0x04a0 + (nv_crtc->index * 0x0300); push = evo_wait(dev, EVO_MASTER, 4); if (push) { evo_mthd(push, mthd, 1); evo_data(push, mode); if (update) { evo_mthd(push, 0x0080, 1); evo_data(push, 0x00000000); } evo_kick(push, dev, EVO_MASTER); } return 0; } static int nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) { struct drm_display_mode *omode, *umode = &nv_crtc->base.mode; struct drm_device *dev = nv_crtc->base.dev; struct drm_crtc *crtc = &nv_crtc->base; struct nouveau_connector *nv_connector; int mode = DRM_MODE_SCALE_NONE; u32 oX, oY, *push; /* start off at the resolution we programmed the crtc for, this * effectively handles NONE/FULL scaling */ nv_connector = nouveau_crtc_connector_get(nv_crtc); if (nv_connector && nv_connector->native_mode) mode = nv_connector->scaling_mode; if (mode != DRM_MODE_SCALE_NONE) omode = nv_connector->native_mode; else omode = umode; oX = omode->hdisplay; oY = omode->vdisplay; if (omode->flags & DRM_MODE_FLAG_DBLSCAN) oY *= 2; /* add overscan compensation if necessary, will keep the aspect * ratio the same as the backend mode unless overridden by the * user setting both hborder and vborder properties. */ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON || (nv_connector->underscan == UNDERSCAN_AUTO && nv_connector->edid && drm_detect_hdmi_monitor(nv_connector->edid)))) { u32 bX = nv_connector->underscan_hborder; u32 bY = nv_connector->underscan_vborder; u32 aspect = (oY << 19) / oX; if (bX) { oX -= (bX * 2); if (bY) oY -= (bY * 2); else oY = ((oX * aspect) + (aspect / 2)) >> 19; } else { oX -= (oX >> 4) + 32; if (bY) oY -= (bY * 2); else oY = ((oX * aspect) + (aspect / 2)) >> 19; } } /* handle CENTER/ASPECT scaling, taking into account the areas * removed already for overscan compensation */ switch (mode) { case DRM_MODE_SCALE_CENTER: oX = min((u32)umode->hdisplay, oX); oY = min((u32)umode->vdisplay, oY); /* fall-through */ case DRM_MODE_SCALE_ASPECT: if (oY < oX) { u32 aspect = (umode->hdisplay << 19) / umode->vdisplay; oX = ((oY * aspect) + (aspect / 2)) >> 19; } else { u32 aspect = (umode->vdisplay << 19) / umode->hdisplay; oY = ((oX * aspect) + (aspect / 2)) >> 19; } break; default: break; } push = evo_wait(dev, EVO_MASTER, 8); if (push) { evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3); evo_data(push, (oY << 16) | oX); evo_data(push, (oY << 16) | oX); evo_data(push, (oY << 16) | oX); evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1); evo_data(push, 0x00000000); evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1); evo_data(push, (umode->vdisplay << 16) | umode->hdisplay); evo_kick(push, dev, EVO_MASTER); if (update) { nvd0_display_flip_stop(crtc); nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); } } return 0; } static int nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, int x, int y, bool update) { struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb); u32 *push; push = evo_wait(fb->dev, EVO_MASTER, 16); if (push) { evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); evo_data(push, nvfb->nvbo->bo.offset >> 8); evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4); evo_data(push, (fb->height << 16) | fb->width); evo_data(push, nvfb->r_pitch); evo_data(push, nvfb->r_format); evo_data(push, nvfb->r_dma); evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); evo_data(push, (y << 16) | x); if (update) { evo_mthd(push, 0x0080, 1); evo_data(push, 0x00000000); } evo_kick(push, fb->dev, EVO_MASTER); } nv_crtc->fb.tile_flags = nvfb->r_dma; return 0; } static void nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update) { struct drm_device *dev = nv_crtc->base.dev; u32 *push = evo_wait(dev, EVO_MASTER, 16); if (push) { if (show) { evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); evo_data(push, 0x85000000); evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); evo_data(push, NvEvoVRAM); } else { evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1); evo_data(push, 0x05000000); evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); evo_data(push, 0x00000000); } if (update) { evo_mthd(push, 0x0080, 1); evo_data(push, 0x00000000); } evo_kick(push, dev, EVO_MASTER); } } static void nvd0_crtc_dpms(struct drm_crtc *crtc, int mode) { } static void nvd0_crtc_prepare(struct drm_crtc *crtc) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); u32 *push; nvd0_display_flip_stop(crtc); push = evo_wait(crtc->dev, EVO_MASTER, 2); if (push) { evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); evo_data(push, 0x00000000); evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1); evo_data(push, 0x03000000); evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); evo_data(push, 0x00000000); evo_kick(push, crtc->dev, EVO_MASTER); } nvd0_crtc_cursor_show(nv_crtc, false, false); } static void nvd0_crtc_commit(struct drm_crtc *crtc) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); u32 *push; push = evo_wait(crtc->dev, EVO_MASTER, 32); if (push) { evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); evo_data(push, nv_crtc->fb.tile_flags); evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); evo_data(push, 0x83000000); evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); evo_data(push, 0x00000000); evo_data(push, 0x00000000); evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); evo_data(push, NvEvoVRAM); evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); evo_data(push, 0xffffff00); evo_kick(push, crtc->dev, EVO_MASTER); } nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true); nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); } static bool nvd0_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static int nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) { struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); int ret; ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); if (ret) return ret; if (old_fb) { nvfb = nouveau_framebuffer(old_fb); nouveau_bo_unpin(nvfb->nvbo); } return 0; } static int nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_connector *nv_connector; u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1; u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1; u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; u32 vblan2e = 0, vblan2s = 1; u32 *push; int ret; hactive = mode->htotal; hsynce = mode->hsync_end - mode->hsync_start - 1; hbackp = mode->htotal - mode->hsync_end; hblanke = hsynce + hbackp; hfrontp = mode->hsync_start - mode->hdisplay; hblanks = mode->htotal - hfrontp - 1; vactive = mode->vtotal * vscan / ilace; vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1; vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; vblanke = vsynce + vbackp; vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; vblanks = vactive - vfrontp - 1; if (mode->flags & DRM_MODE_FLAG_INTERLACE) { vblan2e = vactive + vsynce + vbackp; vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); vactive = (vactive * 2) + 1; } ret = nvd0_crtc_swap_fbs(crtc, old_fb); if (ret) return ret; push = evo_wait(crtc->dev, EVO_MASTER, 64); if (push) { evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6); evo_data(push, 0x00000000); evo_data(push, (vactive << 16) | hactive); evo_data(push, ( vsynce << 16) | hsynce); evo_data(push, (vblanke << 16) | hblanke); evo_data(push, (vblanks << 16) | hblanks); evo_data(push, (vblan2e << 16) | vblan2s); evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1); evo_data(push, 0x00000000); /* ??? */ evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3); evo_data(push, mode->clock * 1000); evo_data(push, 0x00200000); /* ??? */ evo_data(push, mode->clock * 1000); evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2); evo_data(push, 0x00000311); evo_data(push, 0x00000100); evo_kick(push, crtc->dev, EVO_MASTER); } nv_connector = nouveau_crtc_connector_get(nv_crtc); nvd0_crtc_set_dither(nv_crtc, false); nvd0_crtc_set_scale(nv_crtc, false); nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false); return 0; } static int nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); int ret; if (!crtc->fb) { NV_DEBUG_KMS(crtc->dev, "No FB bound\n"); return 0; } ret = nvd0_crtc_swap_fbs(crtc, old_fb); if (ret) return ret; nvd0_display_flip_stop(crtc); nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true); nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); return 0; } static int nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); nvd0_display_flip_stop(crtc); nvd0_crtc_set_image(nv_crtc, fb, x, y, true); return 0; } static void nvd0_crtc_lut_load(struct drm_crtc *crtc) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); int i; for (i = 0; i < 256; i++) { writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0); writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2); writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4); } } static int nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_gem_object *gem; struct nouveau_bo *nvbo; bool visible = (handle != 0); int i, ret = 0; if (visible) { if (width != 64 || height != 64) return -EINVAL; gem = drm_gem_object_lookup(dev, file_priv, handle); if (unlikely(!gem)) return -ENOENT; nvbo = nouveau_gem_object(gem); ret = nouveau_bo_map(nvbo); if (ret == 0) { for (i = 0; i < 64 * 64; i++) { u32 v = nouveau_bo_rd32(nvbo, i); nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v); } nouveau_bo_unmap(nvbo); } drm_gem_object_unreference_unlocked(gem); } if (visible != nv_crtc->cursor.visible) { nvd0_crtc_cursor_show(nv_crtc, visible, true); nv_crtc->cursor.visible = visible; } return ret; } static int nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); int ch = EVO_CURS(nv_crtc->index); evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff)); evo_piow(crtc->dev, ch, 0x0080, 0x00000000); return 0; } static void nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); u32 end = max(start + size, (u32)256); u32 i; for (i = start; i < end; i++) { nv_crtc->lut.r[i] = r[i]; nv_crtc->lut.g[i] = g[i]; nv_crtc->lut.b[i] = b[i]; } nvd0_crtc_lut_load(crtc); } static void nvd0_crtc_destroy(struct drm_crtc *crtc) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); nouveau_bo_unmap(nv_crtc->lut.nvbo); nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); drm_crtc_cleanup(crtc); kfree(crtc); } static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = { .dpms = nvd0_crtc_dpms, .prepare = nvd0_crtc_prepare, .commit = nvd0_crtc_commit, .mode_fixup = nvd0_crtc_mode_fixup, .mode_set = nvd0_crtc_mode_set, .mode_set_base = nvd0_crtc_mode_set_base, .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic, .load_lut = nvd0_crtc_lut_load, }; static const struct drm_crtc_funcs nvd0_crtc_func = { .cursor_set = nvd0_crtc_cursor_set, .cursor_move = nvd0_crtc_cursor_move, .gamma_set = nvd0_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, .destroy = nvd0_crtc_destroy, .page_flip = nouveau_crtc_page_flip, }; static void nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) { } static void nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) { } static int nvd0_crtc_create(struct drm_device *dev, int index) { struct nouveau_crtc *nv_crtc; struct drm_crtc *crtc; int ret, i; nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); if (!nv_crtc) return -ENOMEM; nv_crtc->index = index; nv_crtc->set_dither = nvd0_crtc_set_dither; nv_crtc->set_scale = nvd0_crtc_set_scale; nv_crtc->cursor.set_offset = nvd0_cursor_set_offset; nv_crtc->cursor.set_pos = nvd0_cursor_set_pos; for (i = 0; i < 256; i++) { nv_crtc->lut.r[i] = i << 8; nv_crtc->lut.g[i] = i << 8; nv_crtc->lut.b[i] = i << 8; } crtc = &nv_crtc->base; drm_crtc_init(dev, crtc, &nvd0_crtc_func); drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc); drm_mode_crtc_set_gamma_size(crtc, 256); ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, 0, 0x0000, &nv_crtc->cursor.nvbo); if (!ret) { ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); if (!ret) ret = nouveau_bo_map(nv_crtc->cursor.nvbo); if (ret) nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); } if (ret) goto out; ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, 0, 0x0000, &nv_crtc->lut.nvbo); if (!ret) { ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); if (!ret) ret = nouveau_bo_map(nv_crtc->lut.nvbo); if (ret) nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); } if (ret) goto out; nvd0_crtc_lut_load(crtc); out: if (ret) nvd0_crtc_destroy(crtc); return ret; } /****************************************************************************** * DAC *****************************************************************************/ static void nvd0_dac_dpms(struct drm_encoder *encoder, int mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; int or = nv_encoder->or; u32 dpms_ctrl; dpms_ctrl = 0x80000000; if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF) dpms_ctrl |= 0x00000001; if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) dpms_ctrl |= 0x00000004; nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); nv_mask(dev, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl); nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); } static bool nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_connector *nv_connector; nv_connector = nouveau_encoder_connector_get(nv_encoder); if (nv_connector && nv_connector->native_mode) { if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { int id = adjusted_mode->base.id; *adjusted_mode = *nv_connector->native_mode; adjusted_mode->base.id = id; } } return true; } static void nvd0_dac_commit(struct drm_encoder *encoder) { } static void nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); u32 syncs, magic, *push; syncs = 0x00000001; if (mode->flags & DRM_MODE_FLAG_NHSYNC) syncs |= 0x00000008; if (mode->flags & DRM_MODE_FLAG_NVSYNC) syncs |= 0x00000010; magic = 0x31ec6000 | (nv_crtc->index << 25); if (mode->flags & DRM_MODE_FLAG_INTERLACE) magic |= 0x00000001; nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON); push = evo_wait(encoder->dev, EVO_MASTER, 8); if (push) { evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); evo_data(push, syncs); evo_data(push, magic); evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2); evo_data(push, 1 << nv_crtc->index); evo_data(push, 0x00ff); evo_kick(push, encoder->dev, EVO_MASTER); } nv_encoder->crtc = encoder->crtc; } static void nvd0_dac_disconnect(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; u32 *push; if (nv_encoder->crtc) { nvd0_crtc_prepare(nv_encoder->crtc); push = evo_wait(dev, EVO_MASTER, 4); if (push) { evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1); evo_data(push, 0x00000000); evo_mthd(push, 0x0080, 1); evo_data(push, 0x00000000); evo_kick(push, dev, EVO_MASTER); } nv_encoder->crtc = NULL; } } static enum drm_connector_status nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { enum drm_connector_status status = connector_status_disconnected; struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; int or = nv_encoder->or; u32 load; nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00100000); udelay(9500); nv_wr32(dev, 0x61a00c + (or * 0x800), 0x80000000); load = nv_rd32(dev, 0x61a00c + (or * 0x800)); if ((load & 0x38000000) == 0x38000000) status = connector_status_connected; nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00000000); return status; } static void nvd0_dac_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); kfree(encoder); } static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = { .dpms = nvd0_dac_dpms, .mode_fixup = nvd0_dac_mode_fixup, .prepare = nvd0_dac_disconnect, .commit = nvd0_dac_commit, .mode_set = nvd0_dac_mode_set, .disable = nvd0_dac_disconnect, .get_crtc = nvd0_display_crtc_get, .detect = nvd0_dac_detect }; static const struct drm_encoder_funcs nvd0_dac_func = { .destroy = nvd0_dac_destroy, }; static int nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe) { struct drm_device *dev = connector->dev; struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); if (!nv_encoder) return -ENOMEM; nv_encoder->dcb = dcbe; nv_encoder->or = ffs(dcbe->or) - 1; encoder = to_drm_encoder(nv_encoder); encoder->possible_crtcs = dcbe->heads; encoder->possible_clones = 0; drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC); drm_encoder_helper_add(encoder, &nvd0_dac_hfunc); drm_mode_connector_attach_encoder(connector, encoder); return 0; } /****************************************************************************** * Audio *****************************************************************************/ static void nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_connector *nv_connector; struct drm_device *dev = encoder->dev; int i, or = nv_encoder->or * 0x30; nv_connector = nouveau_encoder_connector_get(nv_encoder); if (!drm_detect_monitor_audio(nv_connector->edid)) return; nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001); drm_edid_to_eld(&nv_connector->base, nv_connector->edid); if (nv_connector->base.eld[0]) { u8 *eld = nv_connector->base.eld; for (i = 0; i < eld[2] * 4; i++) nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]); for (i = eld[2] * 4; i < 0x60; i++) nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00); nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002); } } static void nvd0_audio_disconnect(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; int or = nv_encoder->or * 0x30; nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000); } /****************************************************************************** * HDMI *****************************************************************************/ static void nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); struct nouveau_connector *nv_connector; struct drm_device *dev = encoder->dev; int head = nv_crtc->index * 0x800; u32 rekey = 56; /* binary driver, and tegra constant */ u32 max_ac_packet; nv_connector = nouveau_encoder_connector_get(nv_encoder); if (!drm_detect_hdmi_monitor(nv_connector->edid)) return; max_ac_packet = mode->htotal - mode->hdisplay; max_ac_packet -= rekey; max_ac_packet -= 18; /* constant from tegra */ max_ac_packet /= 32; /* AVI InfoFrame */ nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000); nv_wr32(dev, 0x61671c + head, 0x000d0282); nv_wr32(dev, 0x616720 + head, 0x0000006f); nv_wr32(dev, 0x616724 + head, 0x00000000); nv_wr32(dev, 0x616728 + head, 0x00000000); nv_wr32(dev, 0x61672c + head, 0x00000000); nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001); /* ??? InfoFrame? */ nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000); nv_wr32(dev, 0x6167ac + head, 0x00000010); nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001); /* HDMI_CTRL */ nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey | max_ac_packet << 16); /* NFI, audio doesn't work without it though.. */ nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000); nvd0_audio_mode_set(encoder, mode); } static void nvd0_hdmi_disconnect(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); struct drm_device *dev = encoder->dev; int head = nv_crtc->index * 0x800; nvd0_audio_disconnect(encoder); nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000); nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000); nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000); } /****************************************************************************** * SOR *****************************************************************************/ static inline u32 nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane) { static const u8 nvd0[] = { 16, 8, 0, 24 }; return nvd0[lane]; } static void nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern) { const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); const u32 loff = (or * 0x800) + (link * 0x80); nv_mask(dev, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); } static void nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb, u8 lane, u8 swing, u8 preem) { const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); const u32 loff = (or * 0x800) + (link * 0x80); u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane); u32 mask = 0x000000ff << shift; u8 *table, *entry, *config = NULL; switch (swing) { case 0: preem += 0; break; case 1: preem += 4; break; case 2: preem += 7; break; case 3: preem += 9; break; } table = nouveau_dp_bios_data(dev, dcb, &entry); if (table) { if (table[0] == 0x30) { config = entry + table[4]; config += table[5] * preem; } else if (table[0] == 0x40) { config = table + table[1]; config += table[2] * table[3]; config += table[6] * preem; } } if (!config) { NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n"); return; } nv_mask(dev, 0x61c118 + loff, mask, config[1] << shift); nv_mask(dev, 0x61c120 + loff, mask, config[2] << shift); nv_mask(dev, 0x61c130 + loff, 0x0000ff00, config[3] << 8); nv_mask(dev, 0x61c13c + loff, 0x00000000, 0x00000000); } static void nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc, int link_nr, u32 link_bw, bool enhframe) { const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); const u32 loff = (or * 0x800) + (link * 0x80); const u32 soff = (or * 0x800); u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & ~0x001f4000; u32 clksor = nv_rd32(dev, 0x612300 + soff) & ~0x007c0000; u32 script = 0x0000, lane_mask = 0; u8 *table, *entry; int i; link_bw /= 27000; table = nouveau_dp_bios_data(dev, dcb, &entry); if (table) { if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]); else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]); else entry = NULL; while (entry) { if (entry[0] >= link_bw) break; entry += 3; } nouveau_bios_run_init_table(dev, script, dcb, crtc); } clksor |= link_bw << 18; dpctrl |= ((1 << link_nr) - 1) << 16; if (enhframe) dpctrl |= 0x00004000; for (i = 0; i < link_nr; i++) lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3); nv_wr32(dev, 0x612300 + soff, clksor); nv_wr32(dev, 0x61c10c + loff, dpctrl); nv_mask(dev, 0x61c130 + loff, 0x0000000f, lane_mask); } static void nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb, u32 *link_nr, u32 *link_bw) { const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); const u32 loff = (or * 0x800) + (link * 0x80); const u32 soff = (or * 0x800); u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & 0x000f0000; u32 clksor = nv_rd32(dev, 0x612300 + soff); if (dpctrl > 0x00030000) *link_nr = 4; else if (dpctrl > 0x00010000) *link_nr = 2; else *link_nr = 1; *link_bw = (clksor & 0x007c0000) >> 18; *link_bw *= 27000; } static void nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb, u32 crtc, u32 datarate) { const u32 symbol = 100000; const u32 TU = 64; u32 link_nr, link_bw; u64 ratio, value; nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw); ratio = datarate; ratio *= symbol; do_div(ratio, link_nr * link_bw); value = (symbol - ratio) * TU; value *= ratio; do_div(value, symbol); do_div(value, symbol); value += 5; value |= 0x08000000; nv_wr32(dev, 0x616610 + (crtc * 0x800), value); } static void nvd0_sor_dpms(struct drm_encoder *encoder, int mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct drm_encoder *partner; int or = nv_encoder->or; u32 dpms_ctrl; nv_encoder->last_dpms = mode; list_for_each_entry(partner, &dev->mode_config.encoder_list, head) { struct nouveau_encoder *nv_partner = nouveau_encoder(partner); if (partner->encoder_type != DRM_MODE_ENCODER_TMDS) continue; if (nv_partner != nv_encoder && nv_partner->dcb->or == nv_encoder->dcb->or) { if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) return; break; } } dpms_ctrl = (mode == DRM_MODE_DPMS_ON); dpms_ctrl |= 0x80000000; nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl); nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000); if (nv_encoder->dcb->type == OUTPUT_DP) { struct dp_train_func func = { .link_set = nvd0_sor_dp_link_set, .train_set = nvd0_sor_dp_train_set, .train_adj = nvd0_sor_dp_train_adj }; nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func); } } static bool nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_connector *nv_connector; nv_connector = nouveau_encoder_connector_get(nv_encoder); if (nv_connector && nv_connector->native_mode) { if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { int id = adjusted_mode->base.id; *adjusted_mode = *nv_connector->native_mode; adjusted_mode->base.id = id; } } return true; } static void nvd0_sor_disconnect(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; u32 *push; if (nv_encoder->crtc) { nvd0_crtc_prepare(nv_encoder->crtc); push = evo_wait(dev, EVO_MASTER, 4); if (push) { evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1); evo_data(push, 0x00000000); evo_mthd(push, 0x0080, 1); evo_data(push, 0x00000000); evo_kick(push, dev, EVO_MASTER); } nvd0_hdmi_disconnect(encoder); nv_encoder->crtc = NULL; nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; } } static void nvd0_sor_prepare(struct drm_encoder *encoder) { nvd0_sor_disconnect(encoder); if (nouveau_encoder(encoder)->dcb->type == OUTPUT_DP) evo_sync(encoder->dev, EVO_MASTER); } static void nvd0_sor_commit(struct drm_encoder *encoder) { } static void nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); struct nouveau_connector *nv_connector; struct nvbios *bios = &dev_priv->vbios; u32 mode_ctrl = (1 << nv_crtc->index); u32 syncs, magic, *push; u32 or_config; syncs = 0x00000001; if (mode->flags & DRM_MODE_FLAG_NHSYNC) syncs |= 0x00000008; if (mode->flags & DRM_MODE_FLAG_NVSYNC) syncs |= 0x00000010; magic = 0x31ec6000 | (nv_crtc->index << 25); if (mode->flags & DRM_MODE_FLAG_INTERLACE) magic |= 0x00000001; nv_connector = nouveau_encoder_connector_get(nv_encoder); switch (nv_encoder->dcb->type) { case OUTPUT_TMDS: if (nv_encoder->dcb->sorconf.link & 1) { if (mode->clock < 165000) mode_ctrl |= 0x00000100; else mode_ctrl |= 0x00000500; } else { mode_ctrl |= 0x00000200; } or_config = (mode_ctrl & 0x00000f00) >> 8; if (mode->clock >= 165000) or_config |= 0x0100; nvd0_hdmi_mode_set(encoder, mode); break; case OUTPUT_LVDS: or_config = (mode_ctrl & 0x00000f00) >> 8; if (bios->fp_no_ddc) { if (bios->fp.dual_link) or_config |= 0x0100; if (bios->fp.if_is_24bit) or_config |= 0x0200; } else { if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { if (((u8 *)nv_connector->edid)[121] == 2) or_config |= 0x0100; } else if (mode->clock >= bios->fp.duallink_transition_clk) { or_config |= 0x0100; } if (or_config & 0x0100) { if (bios->fp.strapless_is_24bit & 2) or_config |= 0x0200; } else { if (bios->fp.strapless_is_24bit & 1) or_config |= 0x0200; } if (nv_connector->base.display_info.bpc == 8) or_config |= 0x0200; } break; case OUTPUT_DP: if (nv_connector->base.display_info.bpc == 6) { nv_encoder->dp.datarate = mode->clock * 18 / 8; syncs |= 0x00000140; } else { nv_encoder->dp.datarate = mode->clock * 24 / 8; syncs |= 0x00000180; } if (nv_encoder->dcb->sorconf.link & 1) mode_ctrl |= 0x00000800; else mode_ctrl |= 0x00000900; or_config = (mode_ctrl & 0x00000f00) >> 8; break; default: BUG_ON(1); break; } nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON); if (nv_encoder->dcb->type == OUTPUT_DP) { nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index, nv_encoder->dp.datarate); } push = evo_wait(dev, EVO_MASTER, 8); if (push) { evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); evo_data(push, syncs); evo_data(push, magic); evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2); evo_data(push, mode_ctrl); evo_data(push, or_config); evo_kick(push, dev, EVO_MASTER); } nv_encoder->crtc = encoder->crtc; } static void nvd0_sor_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); kfree(encoder); } static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = { .dpms = nvd0_sor_dpms, .mode_fixup = nvd0_sor_mode_fixup, .prepare = nvd0_sor_prepare, .commit = nvd0_sor_commit, .mode_set = nvd0_sor_mode_set, .disable = nvd0_sor_disconnect, .get_crtc = nvd0_display_crtc_get, }; static const struct drm_encoder_funcs nvd0_sor_func = { .destroy = nvd0_sor_destroy, }; static int nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe) { struct drm_device *dev = connector->dev; struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); if (!nv_encoder) return -ENOMEM; nv_encoder->dcb = dcbe; nv_encoder->or = ffs(dcbe->or) - 1; nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; encoder = to_drm_encoder(nv_encoder); encoder->possible_crtcs = dcbe->heads; encoder->possible_clones = 0; drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &nvd0_sor_hfunc); drm_mode_connector_attach_encoder(connector, encoder); return 0; } /****************************************************************************** * IRQ *****************************************************************************/ static struct dcb_entry * lookup_dcb(struct drm_device *dev, int id, u32 mc) { struct drm_nouveau_private *dev_priv = dev->dev_private; int type, or, i, link = -1; if (id < 4) { type = OUTPUT_ANALOG; or = id; } else { switch (mc & 0x00000f00) { case 0x00000000: link = 0; type = OUTPUT_LVDS; break; case 0x00000100: link = 0; type = OUTPUT_TMDS; break; case 0x00000200: link = 1; type = OUTPUT_TMDS; break; case 0x00000500: link = 0; type = OUTPUT_TMDS; break; case 0x00000800: link = 0; type = OUTPUT_DP; break; case 0x00000900: link = 1; type = OUTPUT_DP; break; default: NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc); return NULL; } or = id - 4; } for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; if (dcb->type == type && (dcb->or & (1 << or)) && (link < 0 || link == !(dcb->sorconf.link & 1))) return dcb; } NV_ERROR(dev, "PDISP: DCB for %d/0x%08x not found\n", id, mc); return NULL; } static void nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask) { struct dcb_entry *dcb; int i; for (i = 0; mask && i < 8; i++) { u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20)); if (!(mcc & (1 << crtc))) continue; dcb = lookup_dcb(dev, i, mcc); if (!dcb) continue; nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc); } nv_wr32(dev, 0x6101d4, 0x00000000); nv_wr32(dev, 0x6109d4, 0x00000000); nv_wr32(dev, 0x6101d0, 0x80000000); } static void nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask) { struct dcb_entry *dcb; u32 or, tmp, pclk; int i; for (i = 0; mask && i < 8; i++) { u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20)); if (!(mcc & (1 << crtc))) continue; dcb = lookup_dcb(dev, i, mcc); if (!dcb) continue; nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc); } pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000; NV_DEBUG_KMS(dev, "PDISP: crtc %d pclk %d mask 0x%08x\n", crtc, pclk, mask); if (pclk && (mask & 0x00010000)) { nv50_crtc_set_clock(dev, crtc, pclk); } for (i = 0; mask && i < 8; i++) { u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20)); u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20)); if (!(mcp & (1 << crtc))) continue; dcb = lookup_dcb(dev, i, mcp); if (!dcb) continue; or = ffs(dcb->or) - 1; nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc); nv_wr32(dev, 0x612200 + (crtc * 0x800), 0x00000000); switch (dcb->type) { case OUTPUT_ANALOG: nv_wr32(dev, 0x612280 + (or * 0x800), 0x00000000); break; case OUTPUT_TMDS: case OUTPUT_LVDS: case OUTPUT_DP: if (cfg & 0x00000100) tmp = 0x00000101; else tmp = 0x00000000; nv_mask(dev, 0x612300 + (or * 0x800), 0x00000707, tmp); break; default: break; } break; } nv_wr32(dev, 0x6101d4, 0x00000000); nv_wr32(dev, 0x6109d4, 0x00000000); nv_wr32(dev, 0x6101d0, 0x80000000); } static void nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask) { struct dcb_entry *dcb; int pclk, i; pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000; for (i = 0; mask && i < 8; i++) { u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20)); u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20)); if (!(mcp & (1 << crtc))) continue; dcb = lookup_dcb(dev, i, mcp); if (!dcb) continue; nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc); } nv_wr32(dev, 0x6101d4, 0x00000000); nv_wr32(dev, 0x6109d4, 0x00000000); nv_wr32(dev, 0x6101d0, 0x80000000); } static void nvd0_display_bh(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; struct nvd0_display *disp = nvd0_display(dev); u32 mask = 0, crtc = ~0; int i; if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) { NV_INFO(dev, "PDISP: modeset req %d\n", disp->modeset); NV_INFO(dev, " STAT: 0x%08x 0x%08x 0x%08x\n", nv_rd32(dev, 0x6101d0), nv_rd32(dev, 0x6101d4), nv_rd32(dev, 0x6109d4)); for (i = 0; i < 8; i++) { NV_INFO(dev, " %s%d: 0x%08x 0x%08x\n", i < 4 ? "DAC" : "SOR", i, nv_rd32(dev, 0x640180 + (i * 0x20)), nv_rd32(dev, 0x660180 + (i * 0x20))); } } while (!mask && ++crtc < dev->mode_config.num_crtc) mask = nv_rd32(dev, 0x6101d4 + (crtc * 0x800)); if (disp->modeset & 0x00000001) nvd0_display_unk1_handler(dev, crtc, mask); if (disp->modeset & 0x00000002) nvd0_display_unk2_handler(dev, crtc, mask); if (disp->modeset & 0x00000004) nvd0_display_unk4_handler(dev, crtc, mask); } static void nvd0_display_intr(struct drm_device *dev) { struct nvd0_display *disp = nvd0_display(dev); u32 intr = nv_rd32(dev, 0x610088); int i; if (intr & 0x00000001) { u32 stat = nv_rd32(dev, 0x61008c); nv_wr32(dev, 0x61008c, stat); intr &= ~0x00000001; } if (intr & 0x00000002) { u32 stat = nv_rd32(dev, 0x61009c); int chid = ffs(stat) - 1; if (chid >= 0) { u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12)); u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12)); u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12)); NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x " "0x%08x 0x%08x\n", chid, (mthd & 0x0000ffc), data, mthd, unkn); nv_wr32(dev, 0x61009c, (1 << chid)); nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000); } intr &= ~0x00000002; } if (intr & 0x00100000) { u32 stat = nv_rd32(dev, 0x6100ac); if (stat & 0x00000007) { disp->modeset = stat; tasklet_schedule(&disp->tasklet); nv_wr32(dev, 0x6100ac, (stat & 0x00000007)); stat &= ~0x00000007; } if (stat) { NV_INFO(dev, "PDISP: unknown intr24 0x%08x\n", stat); nv_wr32(dev, 0x6100ac, stat); } intr &= ~0x00100000; } for (i = 0; i < dev->mode_config.num_crtc; i++) { u32 mask = 0x01000000 << i; if (intr & mask) { u32 stat = nv_rd32(dev, 0x6100bc + (i * 0x800)); nv_wr32(dev, 0x6100bc + (i * 0x800), stat); intr &= ~mask; } } if (intr) NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr); } /****************************************************************************** * Init *****************************************************************************/ void nvd0_display_fini(struct drm_device *dev) { int i; /* fini cursors + overlays + flips */ for (i = 1; i >= 0; i--) { evo_fini_pio(dev, EVO_CURS(i)); evo_fini_pio(dev, EVO_OIMM(i)); evo_fini_dma(dev, EVO_OVLY(i)); evo_fini_dma(dev, EVO_FLIP(i)); } /* fini master */ evo_fini_dma(dev, EVO_MASTER); } int nvd0_display_init(struct drm_device *dev) { struct nvd0_display *disp = nvd0_display(dev); int ret, i; u32 *push; if (nv_rd32(dev, 0x6100ac) & 0x00000100) { nv_wr32(dev, 0x6100ac, 0x00000100); nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000); if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) { NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n", nv_rd32(dev, 0x6194e8)); return -EBUSY; } } /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't * work at all unless you do the SOR part below. */ for (i = 0; i < 3; i++) { u32 dac = nv_rd32(dev, 0x61a000 + (i * 0x800)); nv_wr32(dev, 0x6101c0 + (i * 0x800), dac); } for (i = 0; i < 4; i++) { u32 sor = nv_rd32(dev, 0x61c000 + (i * 0x800)); nv_wr32(dev, 0x6301c4 + (i * 0x800), sor); } for (i = 0; i < dev->mode_config.num_crtc; i++) { u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800)); u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800)); u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800)); nv_wr32(dev, 0x6101b4 + (i * 0x800), crtc0); nv_wr32(dev, 0x6101b8 + (i * 0x800), crtc1); nv_wr32(dev, 0x6101bc + (i * 0x800), crtc2); } /* point at our hash table / objects, enable interrupts */ nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9); nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307); /* init master */ ret = evo_init_dma(dev, EVO_MASTER); if (ret) goto error; /* init flips + overlays + cursors */ for (i = 0; i < dev->mode_config.num_crtc; i++) { if ((ret = evo_init_dma(dev, EVO_FLIP(i))) || (ret = evo_init_dma(dev, EVO_OVLY(i))) || (ret = evo_init_pio(dev, EVO_OIMM(i))) || (ret = evo_init_pio(dev, EVO_CURS(i)))) goto error; } push = evo_wait(dev, EVO_MASTER, 32); if (!push) { ret = -EBUSY; goto error; } evo_mthd(push, 0x0088, 1); evo_data(push, NvEvoSync); evo_mthd(push, 0x0084, 1); evo_data(push, 0x00000000); evo_mthd(push, 0x0084, 1); evo_data(push, 0x80000000); evo_mthd(push, 0x008c, 1); evo_data(push, 0x00000000); evo_kick(push, dev, EVO_MASTER); error: if (ret) nvd0_display_fini(dev); return ret; } void nvd0_display_destroy(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvd0_display *disp = nvd0_display(dev); struct pci_dev *pdev = dev->pdev; int i; for (i = 0; i < EVO_DMA_NR; i++) { struct evo *evo = &disp->evo[i]; pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle); } nouveau_gpuobj_ref(NULL, &disp->mem); nouveau_bo_unmap(disp->sync); nouveau_bo_ref(NULL, &disp->sync); nouveau_irq_unregister(dev, 26); dev_priv->engine.display.priv = NULL; kfree(disp); } int nvd0_display_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; struct dcb_table *dcb = &dev_priv->vbios.dcb; struct drm_connector *connector, *tmp; struct pci_dev *pdev = dev->pdev; struct nvd0_display *disp; struct dcb_entry *dcbe; int crtcs, ret, i; disp = kzalloc(sizeof(*disp), GFP_KERNEL); if (!disp) return -ENOMEM; dev_priv->engine.display.priv = disp; /* create crtc objects to represent the hw heads */ crtcs = nv_rd32(dev, 0x022448); for (i = 0; i < crtcs; i++) { ret = nvd0_crtc_create(dev, i); if (ret) goto out; } /* create encoder/connector objects based on VBIOS DCB table */ for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) { connector = nouveau_connector_create(dev, dcbe->connector); if (IS_ERR(connector)) continue; if (dcbe->location != DCB_LOC_ON_CHIP) { NV_WARN(dev, "skipping off-chip encoder %d/%d\n", dcbe->type, ffs(dcbe->or) - 1); continue; } switch (dcbe->type) { case OUTPUT_TMDS: case OUTPUT_LVDS: case OUTPUT_DP: nvd0_sor_create(connector, dcbe); break; case OUTPUT_ANALOG: nvd0_dac_create(connector, dcbe); break; default: NV_WARN(dev, "skipping unsupported encoder %d/%d\n", dcbe->type, ffs(dcbe->or) - 1); continue; } } /* cull any connectors we created that don't have an encoder */ list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { if (connector->encoder_ids[0]) continue; NV_WARN(dev, "%s has no encoders, removing\n", drm_get_connector_name(connector)); connector->funcs->destroy(connector); } /* setup interrupt handling */ tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev); nouveau_irq_register(dev, 26, nvd0_display_intr); /* small shared memory area we use for notifiers and semaphores */ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 0, 0x0000, &disp->sync); if (!ret) { ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); if (!ret) ret = nouveau_bo_map(disp->sync); if (ret) nouveau_bo_ref(NULL, &disp->sync); } if (ret) goto out; /* hash table and dma objects for the memory areas we care about */ ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000, NVOBJ_FLAG_ZERO_ALLOC, &disp->mem); if (ret) goto out; /* create evo dma channels */ for (i = 0; i < EVO_DMA_NR; i++) { struct evo *evo = &disp->evo[i]; u64 offset = disp->sync->bo.offset; u32 dmao = 0x1000 + (i * 0x100); u32 hash = 0x0000 + (i * 0x040); evo->idx = i; evo->sem.offset = EVO_SYNC(evo->idx, 0x00); evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle); if (!evo->ptr) { ret = -ENOMEM; goto out; } nv_wo32(disp->mem, dmao + 0x00, 0x00000049); nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8); nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8); nv_wo32(disp->mem, dmao + 0x0c, 0x00000000); nv_wo32(disp->mem, dmao + 0x10, 0x00000000); nv_wo32(disp->mem, dmao + 0x14, 0x00000000); nv_wo32(disp->mem, hash + 0x00, NvEvoSync); nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) | ((dmao + 0x00) << 9)); nv_wo32(disp->mem, dmao + 0x20, 0x00000049); nv_wo32(disp->mem, dmao + 0x24, 0x00000000); nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8); nv_wo32(disp->mem, dmao + 0x2c, 0x00000000); nv_wo32(disp->mem, dmao + 0x30, 0x00000000); nv_wo32(disp->mem, dmao + 0x34, 0x00000000); nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM); nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) | ((dmao + 0x20) << 9)); nv_wo32(disp->mem, dmao + 0x40, 0x00000009); nv_wo32(disp->mem, dmao + 0x44, 0x00000000); nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8); nv_wo32(disp->mem, dmao + 0x4c, 0x00000000); nv_wo32(disp->mem, dmao + 0x50, 0x00000000); nv_wo32(disp->mem, dmao + 0x54, 0x00000000); nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP); nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) | ((dmao + 0x40) << 9)); nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009); nv_wo32(disp->mem, dmao + 0x64, 0x00000000); nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8); nv_wo32(disp->mem, dmao + 0x6c, 0x00000000); nv_wo32(disp->mem, dmao + 0x70, 0x00000000); nv_wo32(disp->mem, dmao + 0x74, 0x00000000); nv_wo32(disp->mem, hash + 0x18, NvEvoFB32); nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) | ((dmao + 0x60) << 9)); } pinstmem->flush(dev); out: if (ret) nvd0_display_destroy(dev); return ret; }
gpl-2.0
myjang0507/Polaris-slte-
drivers/mtd/devices/bcm47xxsflash.c
2157
3096
#include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/platform_device.h> #include <linux/bcma/bcma.h> #include "bcm47xxsflash.h" MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Serial flash driver for BCMA bus"); static const char * const probes[] = { "bcm47xxpart", NULL }; static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct bcm47xxsflash *b47s = mtd->priv; /* Check address range */ if ((from + len) > mtd->size) return -EINVAL; memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from), len); *retlen = len; return len; } static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s) { struct mtd_info *mtd = &b47s->mtd; mtd->priv = b47s; mtd->name = "bcm47xxsflash"; mtd->owner = THIS_MODULE; mtd->type = MTD_ROM; mtd->size = b47s->size; mtd->_read = bcm47xxsflash_read; /* TODO: implement writing support and verify/change following code */ mtd->flags = MTD_CAP_ROM; mtd->writebufsize = mtd->writesize = 1; } /************************************************** * BCMA **************************************************/ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev) { struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); struct bcm47xxsflash *b47s; int err; b47s = kzalloc(sizeof(*b47s), GFP_KERNEL); if (!b47s) { err = -ENOMEM; goto out; } sflash->priv = b47s; b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash); switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) { case BCMA_CC_FLASHT_STSER: b47s->type = BCM47XXSFLASH_TYPE_ST; break; case BCMA_CC_FLASHT_ATSER: b47s->type = BCM47XXSFLASH_TYPE_ATMEL; break; } b47s->window = sflash->window; b47s->blocksize = sflash->blocksize; b47s->numblocks = sflash->numblocks; b47s->size = sflash->size; bcm47xxsflash_fill_mtd(b47s); err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0); if (err) { pr_err("Failed to register MTD device: %d\n", err); goto err_dev_reg; } return 0; err_dev_reg: kfree(&b47s->mtd); out: return err; } static int bcm47xxsflash_bcma_remove(struct platform_device *pdev) { struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); struct bcm47xxsflash *b47s = sflash->priv; mtd_device_unregister(&b47s->mtd); kfree(b47s); return 0; } static struct platform_driver bcma_sflash_driver = { .probe = bcm47xxsflash_bcma_probe, .remove = bcm47xxsflash_bcma_remove, .driver = { .name = "bcma_sflash", .owner = THIS_MODULE, }, }; /************************************************** * Init **************************************************/ static int __init bcm47xxsflash_init(void) { int err; err = platform_driver_register(&bcma_sflash_driver); if (err) pr_err("Failed to register BCMA serial flash driver: %d\n", err); return err; } static void __exit bcm47xxsflash_exit(void) { platform_driver_unregister(&bcma_sflash_driver); } module_init(bcm47xxsflash_init); module_exit(bcm47xxsflash_exit);
gpl-2.0
hashem78/AGNI-pureCM-P3100
net/ipv4/icmp.c
2157
29387
/* * NET3: Implementation of the ICMP protocol layer. * * Alan Cox, <alan@lxorguk.ukuu.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Some of the function names and the icmp unreach table for this * module were derived from [icmp.c 1.0.11 06/02/93] by * Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting. * Other than that this module is a complete rewrite. * * Fixes: * Clemens Fruhwirth : introduce global icmp rate limiting * with icmp type masking ability instead * of broken per type icmp timeouts. * Mike Shaver : RFC1122 checks. * Alan Cox : Multicast ping reply as self. * Alan Cox : Fix atomicity lockup in ip_build_xmit * call. * Alan Cox : Added 216,128 byte paths to the MTU * code. * Martin Mares : RFC1812 checks. * Martin Mares : Can be configured to follow redirects * if acting as a router _without_ a * routing protocol (RFC 1812). * Martin Mares : Echo requests may be configured to * be ignored (RFC 1812). * Martin Mares : Limitation of ICMP error message * transmit rate (RFC 1812). * Martin Mares : TOS and Precedence set correctly * (RFC 1812). * Martin Mares : Now copying as much data from the * original packet as we can without * exceeding 576 bytes (RFC 1812). * Willy Konynenberg : Transparent proxying support. * Keith Owens : RFC1191 correction for 4.2BSD based * path MTU bug. * Thomas Quinot : ICMP Dest Unreach codes up to 15 are * valid (RFC 1812). * Andi Kleen : Check all packet lengths properly * and moved all kfree_skb() up to * icmp_rcv. * Andi Kleen : Move the rate limit bookkeeping * into the dest entry and use a token * bucket filter (thanks to ANK). Make * the rates sysctl configurable. * Yu Tianli : Fixed two ugly bugs in icmp_send * - IP option length was accounted wrongly * - ICMP header length was not accounted * at all. * Tristan Greaves : Added sysctl option to ignore bogus * broadcast responses from broken routers. * * To Fix: * * - Should use skb_pull() instead of all the manual checking. * This would also greatly simply some upper layer error handlers. --AK * */ #include <linux/module.h> #include <linux/types.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/string.h> #include <linux/netfilter_ipv4.h> #include <linux/slab.h> #include <net/snmp.h> #include <net/ip.h> #include <net/route.h> #include <net/protocol.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/udp.h> #include <net/raw.h> #include <net/ping.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/init.h> #include <asm/system.h> #include <asm/uaccess.h> #include <net/checksum.h> #include <net/xfrm.h> #include <net/inet_common.h> /* * Build xmit assembly blocks */ struct icmp_bxm { struct sk_buff *skb; int offset; int data_len; struct { struct icmphdr icmph; __be32 times[3]; } data; int head_len; struct ip_options_data replyopts; }; /* An array of errno for error messages from dest unreach. */ /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ const struct icmp_err icmp_err_convert[] = { { .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ .fatal = 0, }, { .errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */ .fatal = 0, }, { .errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */, .fatal = 1, }, { .errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */ .fatal = 1, }, { .errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */ .fatal = 0, }, { .errno = EOPNOTSUPP, /* ICMP_SR_FAILED */ .fatal = 0, }, { .errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */ .fatal = 1, }, { .errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */ .fatal = 1, }, { .errno = ENONET, /* ICMP_HOST_ISOLATED */ .fatal = 1, }, { .errno = ENETUNREACH, /* ICMP_NET_ANO */ .fatal = 1, }, { .errno = EHOSTUNREACH, /* ICMP_HOST_ANO */ .fatal = 1, }, { .errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */ .fatal = 0, }, { .errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */ .fatal = 0, }, { .errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */ .fatal = 1, }, { .errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */ .fatal = 1, }, { .errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */ .fatal = 1, }, }; EXPORT_SYMBOL(icmp_err_convert); /* * ICMP control array. This specifies what to do with each ICMP. */ struct icmp_control { void (*handler)(struct sk_buff *skb); short error; /* This ICMP is classed as an error message */ }; static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; /* * The ICMP socket(s). This is the most convenient way to flow control * our ICMP output as well as maintain a clean interface throughout * all layers. All Socketless IP sends will soon be gone. * * On SMP we have one ICMP socket per-cpu. */ static struct sock *icmp_sk(struct net *net) { return net->ipv4.icmp_sk[smp_processor_id()]; } static inline struct sock *icmp_xmit_lock(struct net *net) { struct sock *sk; local_bh_disable(); sk = icmp_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ local_bh_enable(); return NULL; } return sk; } static inline void icmp_xmit_unlock(struct sock *sk) { spin_unlock_bh(&sk->sk_lock.slock); } /* * Send an ICMP frame. */ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, struct flowi4 *fl4, int type, int code) { struct dst_entry *dst = &rt->dst; bool rc = true; if (type > NR_ICMP_TYPES) goto out; /* Don't limit PMTU discovery. */ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) goto out; /* No rate limit on loopback */ if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) goto out; /* Limit if icmp type is enabled in ratemask. */ if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) { if (!rt->peer) rt_bind_peer(rt, fl4->daddr, 1); rc = inet_peer_xrlim_allow(rt->peer, net->ipv4.sysctl_icmp_ratelimit); } out: return rc; } /* * Maintain the counters used in the SNMP statistics for outgoing ICMP */ void icmp_out_count(struct net *net, unsigned char type) { ICMPMSGOUT_INC_STATS(net, type); ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS); } /* * Checksum each fragment, and on the first include the headers and final * checksum. */ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct icmp_bxm *icmp_param = (struct icmp_bxm *)from; __wsum csum; csum = skb_copy_and_csum_bits(icmp_param->skb, icmp_param->offset + offset, to, len, 0); skb->csum = csum_block_add(skb->csum, csum, odd); if (icmp_pointers[icmp_param->data.icmph.type].error) nf_ct_attach(skb, icmp_param->skb); return 0; } static void icmp_push_reply(struct icmp_bxm *icmp_param, struct flowi4 *fl4, struct ipcm_cookie *ipc, struct rtable **rt) { struct sock *sk; struct sk_buff *skb; sk = icmp_sk(dev_net((*rt)->dst.dev)); if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param, icmp_param->data_len+icmp_param->head_len, icmp_param->head_len, ipc, rt, MSG_DONTWAIT) < 0) { ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS); ip_flush_pending_frames(sk); } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { struct icmphdr *icmph = icmp_hdr(skb); __wsum csum = 0; struct sk_buff *skb1; skb_queue_walk(&sk->sk_write_queue, skb1) { csum = csum_add(csum, skb1->csum); } csum = csum_partial_copy_nocheck((void *)&icmp_param->data, (char *)icmph, icmp_param->head_len, csum); icmph->checksum = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; ip_push_pending_frames(sk, fl4); } } /* * Driving logic for building and sending ICMP messages. */ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) { struct ipcm_cookie ipc; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); struct flowi4 fl4; struct sock *sk; struct inet_sock *inet; __be32 daddr; if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb)) return; sk = icmp_xmit_lock(net); if (sk == NULL) return; inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; inet->tos = ip_hdr(skb)->tos; daddr = ipc.addr = ip_hdr(skb)->saddr; ipc.opt = NULL; ipc.tx_flags = 0; if (icmp_param->replyopts.opt.opt.optlen) { ipc.opt = &icmp_param->replyopts.opt; if (ipc.opt->opt.srr) daddr = icmp_param->replyopts.opt.opt.faddr; } memset(&fl4, 0, sizeof(fl4)); fl4.daddr = daddr; fl4.saddr = rt->rt_spec_dst; fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); fl4.flowi4_proto = IPPROTO_ICMP; security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) goto out_unlock; if (icmpv4_xrlim_allow(net, rt, &fl4, icmp_param->data.icmph.type, icmp_param->data.icmph.code)) icmp_push_reply(icmp_param, &fl4, &ipc, &rt); ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); } static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, struct sk_buff *skb_in, const struct iphdr *iph, __be32 saddr, u8 tos, int type, int code, struct icmp_bxm *param) { struct rtable *rt, *rt2; struct flowi4 fl4_dec; int err; memset(fl4, 0, sizeof(*fl4)); fl4->daddr = (param->replyopts.opt.opt.srr ? param->replyopts.opt.opt.faddr : iph->saddr); fl4->saddr = saddr; fl4->flowi4_tos = RT_TOS(tos); fl4->flowi4_proto = IPPROTO_ICMP; fl4->fl4_icmp_type = type; fl4->fl4_icmp_code = code; security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); rt = __ip_route_output_key(net, fl4); if (IS_ERR(rt)) return rt; /* No need to clone since we're just using its address. */ rt2 = rt; rt = (struct rtable *) xfrm_lookup(net, &rt->dst, flowi4_to_flowi(fl4), NULL, 0); if (!IS_ERR(rt)) { if (rt != rt2) return rt; } else if (PTR_ERR(rt) == -EPERM) { rt = NULL; } else return rt; err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET); if (err) goto relookup_failed; if (inet_addr_type(net, fl4_dec.saddr) == RTN_LOCAL) { rt2 = __ip_route_output_key(net, &fl4_dec); if (IS_ERR(rt2)) err = PTR_ERR(rt2); } else { struct flowi4 fl4_2 = {}; unsigned long orefdst; fl4_2.daddr = fl4_dec.saddr; rt2 = ip_route_output_key(net, &fl4_2); if (IS_ERR(rt2)) { err = PTR_ERR(rt2); goto relookup_failed; } /* Ugh! */ orefdst = skb_in->_skb_refdst; /* save old refdst */ err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr, RT_TOS(tos), rt2->dst.dev); dst_release(&rt2->dst); rt2 = skb_rtable(skb_in); skb_in->_skb_refdst = orefdst; /* restore old refdst */ } if (err) goto relookup_failed; rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4_dec), NULL, XFRM_LOOKUP_ICMP); if (!IS_ERR(rt2)) { dst_release(&rt->dst); memcpy(fl4, &fl4_dec, sizeof(*fl4)); rt = rt2; } else if (PTR_ERR(rt2) == -EPERM) { if (rt) dst_release(&rt->dst); return rt2; } else { err = PTR_ERR(rt2); goto relookup_failed; } return rt; relookup_failed: if (rt) return rt; return ERR_PTR(err); } /* * Send an ICMP message in response to a situation * * RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header. * MAY send more (we do). * MUST NOT change this header information. * MUST NOT reply to a multicast/broadcast IP address. * MUST NOT reply to a multicast/broadcast MAC address. * MUST reply to only the first fragment. */ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) { struct iphdr *iph; int room; struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); struct ipcm_cookie ipc; struct flowi4 fl4; __be32 saddr; u8 tos; struct net *net; struct sock *sk; if (!rt) goto out; net = dev_net(rt->dst.dev); /* * Find the original header. It is expected to be valid, of course. * Check this, icmp_send is called from the most obscure devices * sometimes. */ iph = ip_hdr(skb_in); if ((u8 *)iph < skb_in->head || (skb_in->network_header + sizeof(*iph)) > skb_in->tail) goto out; /* * No replies to physical multicast/broadcast */ if (skb_in->pkt_type != PACKET_HOST) goto out; /* * Now check at the protocol level */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto out; /* * Only reply to fragment 0. We byte re-order the constant * mask for efficiency. */ if (iph->frag_off & htons(IP_OFFSET)) goto out; /* * If we send an ICMP error to an ICMP error a mess would result.. */ if (icmp_pointers[type].error) { /* * We are an error, check if we are replying to an * ICMP error */ if (iph->protocol == IPPROTO_ICMP) { u8 _inner_type, *itp; itp = skb_header_pointer(skb_in, skb_network_header(skb_in) + (iph->ihl << 2) + offsetof(struct icmphdr, type) - skb_in->data, sizeof(_inner_type), &_inner_type); if (itp == NULL) goto out; /* * Assume any unknown ICMP type is an error. This * isn't specified by the RFC, but think about it.. */ if (*itp > NR_ICMP_TYPES || icmp_pointers[*itp].error) goto out; } } sk = icmp_xmit_lock(net); if (sk == NULL) return; /* * Construct source address and options. */ saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) { struct net_device *dev = NULL; rcu_read_lock(); if (rt_is_input_route(rt) && net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) dev = dev_get_by_index_rcu(net, rt->rt_iif); if (dev) saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); else saddr = 0; rcu_read_unlock(); } tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : iph->tos; if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in)) goto out_unlock; /* * Prepare data for ICMP header. */ icmp_param.data.icmph.type = type; icmp_param.data.icmph.code = code; icmp_param.data.icmph.un.gateway = info; icmp_param.data.icmph.checksum = 0; icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts.opt; ipc.tx_flags = 0; rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, type, code, &icmp_param); if (IS_ERR(rt)) goto out_unlock; if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code)) goto ende; /* RFC says return as much as we can without exceeding 576 bytes. */ room = dst_mtu(&rt->dst); if (room > 576) room = 576; room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen; room -= sizeof(struct icmphdr); icmp_param.data_len = skb_in->len - icmp_param.offset; if (icmp_param.data_len > room) icmp_param.data_len = room; icmp_param.head_len = sizeof(struct icmphdr); icmp_push_reply(&icmp_param, &fl4, &ipc, &rt); ende: ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); out:; } EXPORT_SYMBOL(icmp_send); /* * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH. */ static void icmp_unreach(struct sk_buff *skb) { const struct iphdr *iph; struct icmphdr *icmph; int hash, protocol; const struct net_protocol *ipprot; u32 info = 0; struct net *net; net = dev_net(skb_dst(skb)->dev); /* * Incomplete header ? * Only checks for the IP header, there should be an * additional check for longer headers in upper levels. */ if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out_err; icmph = icmp_hdr(skb); iph = (const struct iphdr *)skb->data; if (iph->ihl < 5) /* Mangled header, drop. */ goto out_err; if (icmph->type == ICMP_DEST_UNREACH) { switch (icmph->code & 15) { case ICMP_NET_UNREACH: case ICMP_HOST_UNREACH: case ICMP_PROT_UNREACH: case ICMP_PORT_UNREACH: break; case ICMP_FRAG_NEEDED: if (ipv4_config.no_pmtu_disc) { LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n", &iph->daddr); } else { info = ip_rt_frag_needed(net, iph, ntohs(icmph->un.frag.mtu), skb->dev); if (!info) goto out; } break; case ICMP_SR_FAILED: LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n", &iph->daddr); break; default: break; } if (icmph->code > NR_ICMP_UNREACH) goto out; } else if (icmph->type == ICMP_PARAMETERPROB) info = ntohl(icmph->un.gateway) >> 24; /* * Throw it at our lower layers * * RFC 1122: 3.2.2 MUST extract the protocol ID from the passed * header. * RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the * transport layer. * RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to * transport layer. */ /* * Check the other end isn't violating RFC 1122. Some routers send * bogus responses to broadcast frames. If you see this message * first check your netmask matches at both ends, if it does then * get the other vendor to fix their kit. */ if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { if (net_ratelimit()) printk(KERN_WARNING "%pI4 sent an invalid ICMP " "type %u, code %u " "error to a broadcast: %pI4 on %s\n", &ip_hdr(skb)->saddr, icmph->type, icmph->code, &iph->daddr, skb->dev->name); goto out; } /* Checkin full IP header plus 8 bytes of protocol to * avoid additional coding at protocol handlers. */ if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) goto out; iph = (const struct iphdr *)skb->data; protocol = iph->protocol; /* * Deliver ICMP message to raw sockets. Pretty useless feature? */ raw_icmp_error(skb, protocol, info); hash = protocol & (MAX_INET_PROTOS - 1); rcu_read_lock(); ipprot = rcu_dereference(inet_protos[hash]); if (ipprot && ipprot->err_handler) ipprot->err_handler(skb, info); rcu_read_unlock(); out: return; out_err: ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); goto out; } /* * Handle ICMP_REDIRECT. */ static void icmp_redirect(struct sk_buff *skb) { const struct iphdr *iph; if (skb->len < sizeof(struct iphdr)) goto out_err; /* * Get the copied header of the packet that caused the redirect */ if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out; iph = (const struct iphdr *)skb->data; switch (icmp_hdr(skb)->code & 7) { case ICMP_REDIR_NET: case ICMP_REDIR_NETTOS: /* * As per RFC recommendations now handle it as a host redirect. */ case ICMP_REDIR_HOST: case ICMP_REDIR_HOSTTOS: ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr, icmp_hdr(skb)->un.gateway, iph->saddr, skb->dev); break; } /* Ping wants to see redirects. * Let's pretend they are errors of sorts... */ if (iph->protocol == IPPROTO_ICMP && iph->ihl >= 5 && pskb_may_pull(skb, (iph->ihl<<2)+8)) { ping_err(skb, icmp_hdr(skb)->un.gateway); } out: return; out_err: ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS); goto out; } /* * Handle ICMP_ECHO ("ping") requests. * * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo * requests. * RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be * included in the reply. * RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring * echo requests, MUST have default=NOT. * See also WRT handling of options once they are done and working. */ static void icmp_echo(struct sk_buff *skb) { struct net *net; net = dev_net(skb_dst(skb)->dev); if (!net->ipv4.sysctl_icmp_echo_ignore_all) { struct icmp_bxm icmp_param; icmp_param.data.icmph = *icmp_hdr(skb); icmp_param.data.icmph.type = ICMP_ECHOREPLY; icmp_param.skb = skb; icmp_param.offset = 0; icmp_param.data_len = skb->len; icmp_param.head_len = sizeof(struct icmphdr); icmp_reply(&icmp_param, skb); } } /* * Handle ICMP Timestamp requests. * RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests. * SHOULD be in the kernel for minimum random latency. * MUST be accurate to a few minutes. * MUST be updated at least at 15Hz. */ static void icmp_timestamp(struct sk_buff *skb) { struct timespec tv; struct icmp_bxm icmp_param; /* * Too short. */ if (skb->len < 4) goto out_err; /* * Fill in the current time as ms since midnight UT: */ getnstimeofday(&tv); icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); icmp_param.data.times[2] = icmp_param.data.times[1]; if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4)) BUG(); icmp_param.data.icmph = *icmp_hdr(skb); icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY; icmp_param.data.icmph.code = 0; icmp_param.skb = skb; icmp_param.offset = 0; icmp_param.data_len = 0; icmp_param.head_len = sizeof(struct icmphdr) + 12; icmp_reply(&icmp_param, skb); out: return; out_err: ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); goto out; } /* * Handle ICMP_ADDRESS_MASK requests. (RFC950) * * RFC1122 (3.2.2.9). A host MUST only send replies to * ADDRESS_MASK requests if it's been configured as an address mask * agent. Receiving a request doesn't constitute implicit permission to * act as one. Of course, implementing this correctly requires (SHOULD) * a way to turn the functionality on and off. Another one for sysctl(), * I guess. -- MS * * RFC1812 (4.3.3.9). A router MUST implement it. * A router SHOULD have switch turning it on/off. * This switch MUST be ON by default. * * Gratuitous replies, zero-source replies are not implemented, * that complies with RFC. DO NOT implement them!!! All the idea * of broadcast addrmask replies as specified in RFC950 is broken. * The problem is that it is not uncommon to have several prefixes * on one physical interface. Moreover, addrmask agent can even be * not aware of existing another prefixes. * If source is zero, addrmask agent cannot choose correct prefix. * Gratuitous mask announcements suffer from the same problem. * RFC1812 explains it, but still allows to use ADDRMASK, * that is pretty silly. --ANK * * All these rules are so bizarre, that I removed kernel addrmask * support at all. It is wrong, it is obsolete, nobody uses it in * any case. --ANK * * Furthermore you can do it with a usermode address agent program * anyway... */ static void icmp_address(struct sk_buff *skb) { #if 0 if (net_ratelimit()) printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n"); #endif } /* * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain * loudly if an inconsistency is found. * called with rcu_read_lock() */ static void icmp_address_reply(struct sk_buff *skb) { struct rtable *rt = skb_rtable(skb); struct net_device *dev = skb->dev; struct in_device *in_dev; struct in_ifaddr *ifa; if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC)) return; in_dev = __in_dev_get_rcu(dev); if (!in_dev) return; if (in_dev->ifa_list && IN_DEV_LOG_MARTIANS(in_dev) && IN_DEV_FORWARD(in_dev)) { __be32 _mask, *mp; mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask); BUG_ON(mp == NULL); for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { if (*mp == ifa->ifa_mask && inet_ifa_match(ip_hdr(skb)->saddr, ifa)) break; } if (!ifa && net_ratelimit()) { printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n", mp, dev->name, &ip_hdr(skb)->saddr); } } } static void icmp_discard(struct sk_buff *skb) { } /* * Deal with incoming ICMP packets. */ int icmp_rcv(struct sk_buff *skb) { struct icmphdr *icmph; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); int nh; if (!(sp && sp->xvec[sp->len - 1]->props.flags & XFRM_STATE_ICMP)) goto drop; if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr))) goto drop; nh = skb_network_offset(skb); skb_set_network_header(skb, sizeof(*icmph)); if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb)) goto drop; skb_set_network_header(skb, nh); } ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS); switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_fold(skb->csum)) break; /* fall through */ case CHECKSUM_NONE: skb->csum = 0; if (__skb_checksum_complete(skb)) goto error; } if (!pskb_pull(skb, sizeof(*icmph))) goto error; icmph = icmp_hdr(skb); ICMPMSGIN_INC_STATS_BH(net, icmph->type); /* * 18 is the highest 'known' ICMP type. Anything else is a mystery * * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently * discarded. */ if (icmph->type > NR_ICMP_TYPES) goto error; /* * Parse the ICMP message */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { /* * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be * silently ignored (we let user decide with a sysctl). * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently * discarded if to broadcast/multicast. */ if ((icmph->type == ICMP_ECHO || icmph->type == ICMP_TIMESTAMP) && net->ipv4.sysctl_icmp_echo_ignore_broadcasts) { goto error; } if (icmph->type != ICMP_ECHO && icmph->type != ICMP_TIMESTAMP && icmph->type != ICMP_ADDRESS && icmph->type != ICMP_ADDRESSREPLY) { goto error; } } icmp_pointers[icmph->type].handler(skb); drop: kfree_skb(skb); return 0; error: ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); goto drop; } /* * This table is the definition of how we handle ICMP. */ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { [ICMP_ECHOREPLY] = { .handler = ping_rcv, }, [1] = { .handler = icmp_discard, .error = 1, }, [2] = { .handler = icmp_discard, .error = 1, }, [ICMP_DEST_UNREACH] = { .handler = icmp_unreach, .error = 1, }, [ICMP_SOURCE_QUENCH] = { .handler = icmp_unreach, .error = 1, }, [ICMP_REDIRECT] = { .handler = icmp_redirect, .error = 1, }, [6] = { .handler = icmp_discard, .error = 1, }, [7] = { .handler = icmp_discard, .error = 1, }, [ICMP_ECHO] = { .handler = icmp_echo, }, [9] = { .handler = icmp_discard, .error = 1, }, [10] = { .handler = icmp_discard, .error = 1, }, [ICMP_TIME_EXCEEDED] = { .handler = icmp_unreach, .error = 1, }, [ICMP_PARAMETERPROB] = { .handler = icmp_unreach, .error = 1, }, [ICMP_TIMESTAMP] = { .handler = icmp_timestamp, }, [ICMP_TIMESTAMPREPLY] = { .handler = icmp_discard, }, [ICMP_INFO_REQUEST] = { .handler = icmp_discard, }, [ICMP_INFO_REPLY] = { .handler = icmp_discard, }, [ICMP_ADDRESS] = { .handler = icmp_address, }, [ICMP_ADDRESSREPLY] = { .handler = icmp_address_reply, }, }; static void __net_exit icmp_sk_exit(struct net *net) { int i; for_each_possible_cpu(i) inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]); kfree(net->ipv4.icmp_sk); net->ipv4.icmp_sk = NULL; } static int __net_init icmp_sk_init(struct net *net) { int i, err; net->ipv4.icmp_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); if (net->ipv4.icmp_sk == NULL) return -ENOMEM; for_each_possible_cpu(i) { struct sock *sk; err = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, IPPROTO_ICMP, net); if (err < 0) goto fail; net->ipv4.icmp_sk[i] = sk; /* Enough space for 2 64K ICMP packets, including * sk_buff struct overhead. */ sk->sk_sndbuf = (2 * ((64 * 1024) + sizeof(struct sk_buff))); /* * Speedup sock_wfree() */ sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT; } /* Control parameters for ECHO replies. */ net->ipv4.sysctl_icmp_echo_ignore_all = 0; net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1; /* Control parameter - ignore bogus broadcast responses? */ net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1; /* * Configurable global rate limit. * * ratelimit defines tokens/packet consumed for dst->rate_token * bucket ratemask defines which icmp types are ratelimited by * setting it's bit position. * * default: * dest unreachable (3), source quench (4), * time exceeded (11), parameter problem (12) */ net->ipv4.sysctl_icmp_ratelimit = 1 * HZ; net->ipv4.sysctl_icmp_ratemask = 0x1818; net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0; return 0; fail: for_each_possible_cpu(i) inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]); kfree(net->ipv4.icmp_sk); return err; } static struct pernet_operations __net_initdata icmp_sk_ops = { .init = icmp_sk_init, .exit = icmp_sk_exit, }; int __init icmp_init(void) { return register_pernet_subsys(&icmp_sk_ops); }
gpl-2.0
Thunderoar/android_kernel_samsung_goyave3g
drivers/hwmon/ibmpex.c
2157
15440
/* * A hwmon driver for the IBM PowerExecutive temperature/power sensors * Copyright (C) 2007 IBM * * Author: Darrick J. Wong <djwong@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/ipmi.h> #include <linux/module.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/err.h> #define REFRESH_INTERVAL (2 * HZ) #define DRVNAME "ibmpex" #define PEX_GET_VERSION 1 #define PEX_GET_SENSOR_COUNT 2 #define PEX_GET_SENSOR_NAME 3 #define PEX_RESET_HIGH_LOW 4 #define PEX_GET_SENSOR_DATA 6 #define PEX_NET_FUNCTION 0x3A #define PEX_COMMAND 0x3C static inline u16 extract_value(const char *data, int offset) { return be16_to_cpup((__be16 *)&data[offset]); } #define TEMP_SENSOR 1 #define POWER_SENSOR 2 #define PEX_SENSOR_TYPE_LEN 3 static u8 const power_sensor_sig[] = {0x70, 0x77, 0x72}; static u8 const temp_sensor_sig[] = {0x74, 0x65, 0x6D}; #define PEX_MULT_LEN 2 static u8 const watt_sensor_sig[] = {0x41, 0x43}; #define PEX_NUM_SENSOR_FUNCS 3 static char const * const power_sensor_name_templates[] = { "%s%d_average", "%s%d_average_lowest", "%s%d_average_highest" }; static char const * const temp_sensor_name_templates[] = { "%s%d_input", "%s%d_input_lowest", "%s%d_input_highest" }; static void ibmpex_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); static void ibmpex_register_bmc(int iface, struct device *dev); static void ibmpex_bmc_gone(int iface); struct ibmpex_sensor_data { int in_use; s16 values[PEX_NUM_SENSOR_FUNCS]; int multiplier; struct sensor_device_attribute_2 attr[PEX_NUM_SENSOR_FUNCS]; }; struct ibmpex_bmc_data { struct list_head list; struct device *hwmon_dev; struct device *bmc_device; struct mutex lock; char valid; unsigned long last_updated; /* In jiffies */ struct ipmi_addr address; struct completion read_complete; ipmi_user_t user; int interface; struct kernel_ipmi_msg tx_message; unsigned char tx_msg_data[IPMI_MAX_MSG_LENGTH]; long tx_msgid; unsigned char rx_msg_data[IPMI_MAX_MSG_LENGTH]; unsigned long rx_msg_len; unsigned char rx_result; int rx_recv_type; unsigned char sensor_major; unsigned char sensor_minor; unsigned char num_sensors; struct ibmpex_sensor_data *sensors; }; struct ibmpex_driver_data { struct list_head bmc_data; struct ipmi_smi_watcher bmc_events; struct ipmi_user_hndl ipmi_hndlrs; }; static struct ibmpex_driver_data driver_data = { .bmc_data = LIST_HEAD_INIT(driver_data.bmc_data), .bmc_events = { .owner = THIS_MODULE, .new_smi = ibmpex_register_bmc, .smi_gone = ibmpex_bmc_gone, }, .ipmi_hndlrs = { .ipmi_recv_hndl = ibmpex_msg_handler, }, }; static int ibmpex_send_message(struct ibmpex_bmc_data *data) { int err; err = ipmi_validate_addr(&data->address, sizeof(data->address)); if (err) goto out; data->tx_msgid++; err = ipmi_request_settime(data->user, &data->address, data->tx_msgid, &data->tx_message, data, 0, 0, 0); if (err) goto out1; return 0; out1: dev_err(data->bmc_device, "request_settime=%x\n", err); return err; out: dev_err(data->bmc_device, "validate_addr=%x\n", err); return err; } static int ibmpex_ver_check(struct ibmpex_bmc_data *data) { data->tx_msg_data[0] = PEX_GET_VERSION; data->tx_message.data_len = 1; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len != 6) return -ENOENT; data->sensor_major = data->rx_msg_data[0]; data->sensor_minor = data->rx_msg_data[1]; dev_info(data->bmc_device, "Found BMC with sensor interface v%d.%d %d-%02d-%02d on interface %d\n", data->sensor_major, data->sensor_minor, extract_value(data->rx_msg_data, 2), data->rx_msg_data[4], data->rx_msg_data[5], data->interface); return 0; } static int ibmpex_query_sensor_count(struct ibmpex_bmc_data *data) { data->tx_msg_data[0] = PEX_GET_SENSOR_COUNT; data->tx_message.data_len = 1; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len != 1) return -ENOENT; return data->rx_msg_data[0]; } static int ibmpex_query_sensor_name(struct ibmpex_bmc_data *data, int sensor) { data->tx_msg_data[0] = PEX_GET_SENSOR_NAME; data->tx_msg_data[1] = sensor; data->tx_message.data_len = 2; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len < 1) return -ENOENT; return 0; } static int ibmpex_query_sensor_data(struct ibmpex_bmc_data *data, int sensor) { data->tx_msg_data[0] = PEX_GET_SENSOR_DATA; data->tx_msg_data[1] = sensor; data->tx_message.data_len = 2; ibmpex_send_message(data); wait_for_completion(&data->read_complete); if (data->rx_result || data->rx_msg_len < 26) { dev_err(data->bmc_device, "Error reading sensor %d.\n", sensor); return -ENOENT; } return 0; } static int ibmpex_reset_high_low_data(struct ibmpex_bmc_data *data) { data->tx_msg_data[0] = PEX_RESET_HIGH_LOW; data->tx_message.data_len = 1; ibmpex_send_message(data); wait_for_completion(&data->read_complete); return 0; } static void ibmpex_update_device(struct ibmpex_bmc_data *data) { int i, err; mutex_lock(&data->lock); if (time_before(jiffies, data->last_updated + REFRESH_INTERVAL) && data->valid) goto out; for (i = 0; i < data->num_sensors; i++) { if (!data->sensors[i].in_use) continue; err = ibmpex_query_sensor_data(data, i); if (err) continue; data->sensors[i].values[0] = extract_value(data->rx_msg_data, 16); data->sensors[i].values[1] = extract_value(data->rx_msg_data, 18); data->sensors[i].values[2] = extract_value(data->rx_msg_data, 20); } data->last_updated = jiffies; data->valid = 1; out: mutex_unlock(&data->lock); } static struct ibmpex_bmc_data *get_bmc_data(int iface) { struct ibmpex_bmc_data *p, *next; list_for_each_entry_safe(p, next, &driver_data.bmc_data, list) if (p->interface == iface) return p; return NULL; } static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return sprintf(buf, "%s\n", DRVNAME); } static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); static ssize_t ibmpex_show_sensor(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); struct ibmpex_bmc_data *data = dev_get_drvdata(dev); int mult = data->sensors[attr->index].multiplier; ibmpex_update_device(data); return sprintf(buf, "%d\n", data->sensors[attr->index].values[attr->nr] * mult); } static ssize_t ibmpex_reset_high_low(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct ibmpex_bmc_data *data = dev_get_drvdata(dev); ibmpex_reset_high_low_data(data); return count; } static SENSOR_DEVICE_ATTR(reset_high_low, S_IWUSR, NULL, ibmpex_reset_high_low, 0); static int is_power_sensor(const char *sensor_id, int len) { if (len < PEX_SENSOR_TYPE_LEN) return 0; if (!memcmp(sensor_id, power_sensor_sig, PEX_SENSOR_TYPE_LEN)) return 1; return 0; } static int is_temp_sensor(const char *sensor_id, int len) { if (len < PEX_SENSOR_TYPE_LEN) return 0; if (!memcmp(sensor_id, temp_sensor_sig, PEX_SENSOR_TYPE_LEN)) return 1; return 0; } static int power_sensor_multiplier(struct ibmpex_bmc_data *data, const char *sensor_id, int len) { int i; if (data->sensor_major == 2) return 1000000; for (i = PEX_SENSOR_TYPE_LEN; i < len - 1; i++) if (!memcmp(&sensor_id[i], watt_sensor_sig, PEX_MULT_LEN)) return 1000000; return 100000; } static int create_sensor(struct ibmpex_bmc_data *data, int type, int counter, int sensor, int func) { int err; char *n; n = kmalloc(32, GFP_KERNEL); if (!n) return -ENOMEM; if (type == TEMP_SENSOR) sprintf(n, temp_sensor_name_templates[func], "temp", counter); else if (type == POWER_SENSOR) sprintf(n, power_sensor_name_templates[func], "power", counter); sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr); data->sensors[sensor].attr[func].dev_attr.attr.name = n; data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO; data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor; data->sensors[sensor].attr[func].index = sensor; data->sensors[sensor].attr[func].nr = func; err = device_create_file(data->bmc_device, &data->sensors[sensor].attr[func].dev_attr); if (err) { data->sensors[sensor].attr[func].dev_attr.attr.name = NULL; kfree(n); return err; } return 0; } static int ibmpex_find_sensors(struct ibmpex_bmc_data *data) { int i, j, err; int sensor_type; int sensor_counter; int num_power = 0; int num_temp = 0; err = ibmpex_query_sensor_count(data); if (err <= 0) return -ENOENT; data->num_sensors = err; data->sensors = kzalloc(data->num_sensors * sizeof(*data->sensors), GFP_KERNEL); if (!data->sensors) return -ENOMEM; for (i = 0; i < data->num_sensors; i++) { err = ibmpex_query_sensor_name(data, i); if (err) continue; if (is_power_sensor(data->rx_msg_data, data->rx_msg_len)) { sensor_type = POWER_SENSOR; num_power++; sensor_counter = num_power; data->sensors[i].multiplier = power_sensor_multiplier(data, data->rx_msg_data, data->rx_msg_len); } else if (is_temp_sensor(data->rx_msg_data, data->rx_msg_len)) { sensor_type = TEMP_SENSOR; num_temp++; sensor_counter = num_temp; data->sensors[i].multiplier = 1000; } else continue; data->sensors[i].in_use = 1; /* Create attributes */ for (j = 0; j < PEX_NUM_SENSOR_FUNCS; j++) { err = create_sensor(data, sensor_type, sensor_counter, i, j); if (err) goto exit_remove; } } err = device_create_file(data->bmc_device, &sensor_dev_attr_reset_high_low.dev_attr); if (err) goto exit_remove; err = device_create_file(data->bmc_device, &sensor_dev_attr_name.dev_attr); if (err) goto exit_remove; return 0; exit_remove: device_remove_file(data->bmc_device, &sensor_dev_attr_reset_high_low.dev_attr); device_remove_file(data->bmc_device, &sensor_dev_attr_name.dev_attr); for (i = 0; i < data->num_sensors; i++) for (j = 0; j < PEX_NUM_SENSOR_FUNCS; j++) { if (!data->sensors[i].attr[j].dev_attr.attr.name) continue; device_remove_file(data->bmc_device, &data->sensors[i].attr[j].dev_attr); kfree(data->sensors[i].attr[j].dev_attr.attr.name); } kfree(data->sensors); return err; } static void ibmpex_register_bmc(int iface, struct device *dev) { struct ibmpex_bmc_data *data; int err; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { dev_err(dev, "Insufficient memory for BMC interface.\n"); return; } data->address.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; data->address.channel = IPMI_BMC_CHANNEL; data->address.data[0] = 0; data->interface = iface; data->bmc_device = dev; /* Create IPMI messaging interface user */ err = ipmi_create_user(data->interface, &driver_data.ipmi_hndlrs, data, &data->user); if (err < 0) { dev_err(dev, "Unable to register user with IPMI interface %d\n", data->interface); goto out; } mutex_init(&data->lock); /* Initialize message */ data->tx_msgid = 0; init_completion(&data->read_complete); data->tx_message.netfn = PEX_NET_FUNCTION; data->tx_message.cmd = PEX_COMMAND; data->tx_message.data = data->tx_msg_data; /* Does this BMC support PowerExecutive? */ err = ibmpex_ver_check(data); if (err) goto out_user; /* Register the BMC as a HWMON class device */ data->hwmon_dev = hwmon_device_register(data->bmc_device); if (IS_ERR(data->hwmon_dev)) { dev_err(data->bmc_device, "Unable to register hwmon device for IPMI interface %d\n", data->interface); goto out_user; } /* finally add the new bmc data to the bmc data list */ dev_set_drvdata(dev, data); list_add_tail(&data->list, &driver_data.bmc_data); /* Now go find all the sensors */ err = ibmpex_find_sensors(data); if (err) { dev_err(data->bmc_device, "Error %d finding sensors\n", err); goto out_register; } return; out_register: hwmon_device_unregister(data->hwmon_dev); out_user: ipmi_destroy_user(data->user); out: kfree(data); } static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data) { int i, j; device_remove_file(data->bmc_device, &sensor_dev_attr_reset_high_low.dev_attr); device_remove_file(data->bmc_device, &sensor_dev_attr_name.dev_attr); for (i = 0; i < data->num_sensors; i++) for (j = 0; j < PEX_NUM_SENSOR_FUNCS; j++) { if (!data->sensors[i].attr[j].dev_attr.attr.name) continue; device_remove_file(data->bmc_device, &data->sensors[i].attr[j].dev_attr); kfree(data->sensors[i].attr[j].dev_attr.attr.name); } list_del(&data->list); dev_set_drvdata(data->bmc_device, NULL); hwmon_device_unregister(data->hwmon_dev); ipmi_destroy_user(data->user); kfree(data->sensors); kfree(data); } static void ibmpex_bmc_gone(int iface) { struct ibmpex_bmc_data *data = get_bmc_data(iface); if (!data) return; ibmpex_bmc_delete(data); } static void ibmpex_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) { struct ibmpex_bmc_data *data = (struct ibmpex_bmc_data *)user_msg_data; if (msg->msgid != data->tx_msgid) { dev_err(data->bmc_device, "Mismatch between received msgid (%02x) and transmitted msgid (%02x)!\n", (int)msg->msgid, (int)data->tx_msgid); ipmi_free_recv_msg(msg); return; } data->rx_recv_type = msg->recv_type; if (msg->msg.data_len > 0) data->rx_result = msg->msg.data[0]; else data->rx_result = IPMI_UNKNOWN_ERR_COMPLETION_CODE; if (msg->msg.data_len > 1) { data->rx_msg_len = msg->msg.data_len - 1; memcpy(data->rx_msg_data, msg->msg.data + 1, data->rx_msg_len); } else data->rx_msg_len = 0; ipmi_free_recv_msg(msg); complete(&data->read_complete); } static int __init ibmpex_init(void) { return ipmi_smi_watcher_register(&driver_data.bmc_events); } static void __exit ibmpex_exit(void) { struct ibmpex_bmc_data *p, *next; ipmi_smi_watcher_unregister(&driver_data.bmc_events); list_for_each_entry_safe(p, next, &driver_data.bmc_data, list) ibmpex_bmc_delete(p); } MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); MODULE_DESCRIPTION("IBM PowerExecutive power/temperature sensor driver"); MODULE_LICENSE("GPL"); module_init(ibmpex_init); module_exit(ibmpex_exit); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3350-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3655-*"); MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3755-*");
gpl-2.0
codename13/android_kernel_samsung_kylessopen-CAF
sound/oss/pss.c
2669
32862
/* * sound/oss/pss.c * * The low level driver for the Personal Sound System (ECHO ESC614). * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * * Thomas Sailer ioctl code reworked (vmalloc/vfree removed) * Alan Cox modularisation, clean up. * * 98-02-21: Vladimir Michl <vladimir.michl@upol.cz> * Added mixer device for Beethoven ADSP-16 (master volume, * bass, treble, synth), only for speakers. * Fixed bug in pss_write (exchange parameters) * Fixed config port of SB * Requested two regions for PSS (PSS mixer, PSS config) * Modified pss_download_boot * To probe_pss_mss added test for initialize AD1848 * 98-05-28: Vladimir Michl <vladimir.michl@upol.cz> * Fixed computation of mixer volumes * 04-05-1999: Anthony Barbachan <barbcode@xmen.cis.fordham.edu> * Added code that allows the user to enable his cdrom and/or * joystick through the module parameters pss_cdrom_port and * pss_enable_joystick. pss_cdrom_port takes a port address as its * argument. pss_enable_joystick takes either a 0 or a non-0 as its * argument. * 04-06-1999: Anthony Barbachan <barbcode@xmen.cis.fordham.edu> * Separated some code into new functions for easier reuse. * Cleaned up and streamlined new code. Added code to allow a user * to only use this driver for enabling non-sound components * through the new module parameter pss_no_sound (flag). Added * code that would allow a user to decide whether the driver should * reset the configured hardware settings for the PSS board through * the module parameter pss_keep_settings (flag). This flag will * allow a user to free up resources in use by this card if needbe, * furthermore it allows him to use this driver to just enable the * emulations and then be unloaded as it is no longer needed. Both * new settings are only available to this driver if compiled as a * module. The default settings of all new parameters are set to * load the driver as it did in previous versions. * 04-07-1999: Anthony Barbachan <barbcode@xmen.cis.fordham.edu> * Added module parameter pss_firmware to allow the user to tell * the driver where the firmware file is located. The default * setting is the previous hardcoded setting "/etc/sound/pss_synth". * 00-03-03: Christoph Hellwig <chhellwig@infradead.org> * Adapted to module_init/module_exit * 11-10-2000: Bartlomiej Zolnierkiewicz <bkz@linux-ide.org> * Added __init to probe_pss(), attach_pss() and probe_pss_mpu() * 02-Jan-2001: Chris Rankin * Specify that this module owns the coprocessor */ #include <linux/init.h> #include <linux/module.h> #include <linux/spinlock.h> #include "sound_config.h" #include "sound_firmware.h" #include "ad1848.h" #include "mpu401.h" /* * PSS registers. */ #define REG(x) (devc->base+x) #define PSS_DATA 0 #define PSS_STATUS 2 #define PSS_CONTROL 2 #define PSS_ID 4 #define PSS_IRQACK 4 #define PSS_PIO 0x1a /* * Config registers */ #define CONF_PSS 0x10 #define CONF_WSS 0x12 #define CONF_SB 0x14 #define CONF_CDROM 0x16 #define CONF_MIDI 0x18 /* * Status bits. */ #define PSS_FLAG3 0x0800 #define PSS_FLAG2 0x0400 #define PSS_FLAG1 0x1000 #define PSS_FLAG0 0x0800 #define PSS_WRITE_EMPTY 0x8000 #define PSS_READ_FULL 0x4000 /* * WSS registers */ #define WSS_INDEX 4 #define WSS_DATA 5 /* * WSS status bits */ #define WSS_INITIALIZING 0x80 #define WSS_AUTOCALIBRATION 0x20 #define NO_WSS_MIXER -1 #include "coproc.h" #include "pss_boot.h" /* If compiled into kernel, it enable or disable pss mixer */ #ifdef CONFIG_PSS_MIXER static int pss_mixer = 1; #else static int pss_mixer; #endif typedef struct pss_mixerdata { unsigned int volume_l; unsigned int volume_r; unsigned int bass; unsigned int treble; unsigned int synth; } pss_mixerdata; typedef struct pss_confdata { int base; int irq; int dma; int *osp; pss_mixerdata mixer; int ad_mixer_dev; } pss_confdata; static pss_confdata pss_data; static pss_confdata *devc = &pss_data; static DEFINE_SPINLOCK(lock); static int pss_initialized; static int nonstandard_microcode; static int pss_cdrom_port = -1; /* Parameter for the PSS cdrom port */ static int pss_enable_joystick; /* Parameter for enabling the joystick */ static coproc_operations pss_coproc_operations; static void pss_write(pss_confdata *devc, int data) { unsigned long i, limit; limit = jiffies + HZ/10; /* The timeout is 0.1 seconds */ /* * Note! the i<5000000 is an emergency exit. The dsp_command() is sometimes * called while interrupts are disabled. This means that the timer is * disabled also. However the timeout situation is a abnormal condition. * Normally the DSP should be ready to accept commands after just couple of * loops. */ for (i = 0; i < 5000000 && time_before(jiffies, limit); i++) { if (inw(REG(PSS_STATUS)) & PSS_WRITE_EMPTY) { outw(data, REG(PSS_DATA)); return; } } printk(KERN_WARNING "PSS: DSP Command (%04x) Timeout.\n", data); } static int __init probe_pss(struct address_info *hw_config) { unsigned short id; int irq, dma; devc->base = hw_config->io_base; irq = devc->irq = hw_config->irq; dma = devc->dma = hw_config->dma; devc->osp = hw_config->osp; if (devc->base != 0x220 && devc->base != 0x240) if (devc->base != 0x230 && devc->base != 0x250) /* Some cards use these */ return 0; if (!request_region(devc->base, 0x10, "PSS mixer, SB emulation")) { printk(KERN_ERR "PSS: I/O port conflict\n"); return 0; } id = inw(REG(PSS_ID)); if ((id >> 8) != 'E') { printk(KERN_ERR "No PSS signature detected at 0x%x (0x%x)\n", devc->base, id); release_region(devc->base, 0x10); return 0; } if (!request_region(devc->base + 0x10, 0x9, "PSS config")) { printk(KERN_ERR "PSS: I/O port conflict\n"); release_region(devc->base, 0x10); return 0; } return 1; } static int set_irq(pss_confdata * devc, int dev, int irq) { static unsigned short irq_bits[16] = { 0x0000, 0x0000, 0x0000, 0x0008, 0x0000, 0x0010, 0x0000, 0x0018, 0x0000, 0x0020, 0x0028, 0x0030, 0x0038, 0x0000, 0x0000, 0x0000 }; unsigned short tmp, bits; if (irq < 0 || irq > 15) return 0; tmp = inw(REG(dev)) & ~0x38; /* Load confreg, mask IRQ bits out */ if ((bits = irq_bits[irq]) == 0 && irq != 0) { printk(KERN_ERR "PSS: Invalid IRQ %d\n", irq); return 0; } outw(tmp | bits, REG(dev)); return 1; } static void set_io_base(pss_confdata * devc, int dev, int base) { unsigned short tmp = inw(REG(dev)) & 0x003f; unsigned short bits = (base & 0x0ffc) << 4; outw(bits | tmp, REG(dev)); } static int set_dma(pss_confdata * devc, int dev, int dma) { static unsigned short dma_bits[8] = { 0x0001, 0x0002, 0x0000, 0x0003, 0x0000, 0x0005, 0x0006, 0x0007 }; unsigned short tmp, bits; if (dma < 0 || dma > 7) return 0; tmp = inw(REG(dev)) & ~0x07; /* Load confreg, mask DMA bits out */ if ((bits = dma_bits[dma]) == 0 && dma != 4) { printk(KERN_ERR "PSS: Invalid DMA %d\n", dma); return 0; } outw(tmp | bits, REG(dev)); return 1; } static int pss_reset_dsp(pss_confdata * devc) { unsigned long i, limit = jiffies + HZ/10; outw(0x2000, REG(PSS_CONTROL)); for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++) inw(REG(PSS_CONTROL)); outw(0x0000, REG(PSS_CONTROL)); return 1; } static int pss_put_dspword(pss_confdata * devc, unsigned short word) { int i, val; for (i = 0; i < 327680; i++) { val = inw(REG(PSS_STATUS)); if (val & PSS_WRITE_EMPTY) { outw(word, REG(PSS_DATA)); return 1; } } return 0; } static int pss_get_dspword(pss_confdata * devc, unsigned short *word) { int i, val; for (i = 0; i < 327680; i++) { val = inw(REG(PSS_STATUS)); if (val & PSS_READ_FULL) { *word = inw(REG(PSS_DATA)); return 1; } } return 0; } static int pss_download_boot(pss_confdata * devc, unsigned char *block, int size, int flags) { int i, val, count; unsigned long limit; if (flags & CPF_FIRST) { /*_____ Warn DSP software that a boot is coming */ outw(0x00fe, REG(PSS_DATA)); limit = jiffies + HZ/10; for (i = 0; i < 32768 && time_before(jiffies, limit); i++) if (inw(REG(PSS_DATA)) == 0x5500) break; outw(*block++, REG(PSS_DATA)); pss_reset_dsp(devc); } count = 1; while ((flags&CPF_LAST) || count<size ) { int j; for (j = 0; j < 327670; j++) { /*_____ Wait for BG to appear */ if (inw(REG(PSS_STATUS)) & PSS_FLAG3) break; } if (j == 327670) { /* It's ok we timed out when the file was empty */ if (count >= size && flags & CPF_LAST) break; else { printk("\n"); printk(KERN_ERR "PSS: Download timeout problems, byte %d=%d\n", count, size); return 0; } } /*_____ Send the next byte */ if (count >= size) { /* If not data in block send 0xffff */ outw (0xffff, REG (PSS_DATA)); } else { /*_____ Send the next byte */ outw (*block++, REG (PSS_DATA)); }; count++; } if (flags & CPF_LAST) { /*_____ Why */ outw(0, REG(PSS_DATA)); limit = jiffies + HZ/10; for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++) val = inw(REG(PSS_STATUS)); limit = jiffies + HZ/10; for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++) { val = inw(REG(PSS_STATUS)); if (val & 0x4000) break; } /* now read the version */ for (i = 0; i < 32000; i++) { val = inw(REG(PSS_STATUS)); if (val & PSS_READ_FULL) break; } if (i == 32000) return 0; val = inw(REG(PSS_DATA)); /* printk( "<PSS: microcode version %d.%d loaded>", val/16, val % 16); */ } return 1; } /* Mixer */ static void set_master_volume(pss_confdata *devc, int left, int right) { static unsigned char log_scale[101] = { 0xdb, 0xe0, 0xe3, 0xe5, 0xe7, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xed, 0xee, 0xef, 0xef, 0xf0, 0xf0, 0xf1, 0xf1, 0xf2, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff }; pss_write(devc, 0x0010); pss_write(devc, log_scale[left] | 0x0000); pss_write(devc, 0x0010); pss_write(devc, log_scale[right] | 0x0100); } static void set_synth_volume(pss_confdata *devc, int volume) { int vol = ((0x8000*volume)/100L); pss_write(devc, 0x0080); pss_write(devc, vol); pss_write(devc, 0x0081); pss_write(devc, vol); } static void set_bass(pss_confdata *devc, int level) { int vol = (int)(((0xfd - 0xf0) * level)/100L) + 0xf0; pss_write(devc, 0x0010); pss_write(devc, vol | 0x0200); }; static void set_treble(pss_confdata *devc, int level) { int vol = (((0xfd - 0xf0) * level)/100L) + 0xf0; pss_write(devc, 0x0010); pss_write(devc, vol | 0x0300); }; static void pss_mixer_reset(pss_confdata *devc) { set_master_volume(devc, 33, 33); set_bass(devc, 50); set_treble(devc, 50); set_synth_volume(devc, 30); pss_write (devc, 0x0010); pss_write (devc, 0x0800 | 0xce); /* Stereo */ if(pss_mixer) { devc->mixer.volume_l = devc->mixer.volume_r = 33; devc->mixer.bass = 50; devc->mixer.treble = 50; devc->mixer.synth = 30; } } static int set_volume_mono(unsigned __user *p, unsigned int *aleft) { unsigned int left, volume; if (get_user(volume, p)) return -EFAULT; left = volume & 0xff; if (left > 100) left = 100; *aleft = left; return 0; } static int set_volume_stereo(unsigned __user *p, unsigned int *aleft, unsigned int *aright) { unsigned int left, right, volume; if (get_user(volume, p)) return -EFAULT; left = volume & 0xff; if (left > 100) left = 100; right = (volume >> 8) & 0xff; if (right > 100) right = 100; *aleft = left; *aright = right; return 0; } static int ret_vol_mono(int left) { return ((left << 8) | left); } static int ret_vol_stereo(int left, int right) { return ((right << 8) | left); } static int call_ad_mixer(pss_confdata *devc,unsigned int cmd, void __user *arg) { if (devc->ad_mixer_dev != NO_WSS_MIXER) return mixer_devs[devc->ad_mixer_dev]->ioctl(devc->ad_mixer_dev, cmd, arg); else return -EINVAL; } static int pss_mixer_ioctl (int dev, unsigned int cmd, void __user *arg) { pss_confdata *devc = mixer_devs[dev]->devc; int cmdf = cmd & 0xff; if ((cmdf != SOUND_MIXER_VOLUME) && (cmdf != SOUND_MIXER_BASS) && (cmdf != SOUND_MIXER_TREBLE) && (cmdf != SOUND_MIXER_SYNTH) && (cmdf != SOUND_MIXER_DEVMASK) && (cmdf != SOUND_MIXER_STEREODEVS) && (cmdf != SOUND_MIXER_RECMASK) && (cmdf != SOUND_MIXER_CAPS) && (cmdf != SOUND_MIXER_RECSRC)) { return call_ad_mixer(devc, cmd, arg); } if (((cmd >> 8) & 0xff) != 'M') return -EINVAL; if (_SIOC_DIR (cmd) & _SIOC_WRITE) { switch (cmdf) { case SOUND_MIXER_RECSRC: if (devc->ad_mixer_dev != NO_WSS_MIXER) return call_ad_mixer(devc, cmd, arg); else { int v; if (get_user(v, (int __user *)arg)) return -EFAULT; if (v != 0) return -EINVAL; return 0; } case SOUND_MIXER_VOLUME: if (set_volume_stereo(arg, &devc->mixer.volume_l, &devc->mixer.volume_r)) return -EFAULT; set_master_volume(devc, devc->mixer.volume_l, devc->mixer.volume_r); return ret_vol_stereo(devc->mixer.volume_l, devc->mixer.volume_r); case SOUND_MIXER_BASS: if (set_volume_mono(arg, &devc->mixer.bass)) return -EFAULT; set_bass(devc, devc->mixer.bass); return ret_vol_mono(devc->mixer.bass); case SOUND_MIXER_TREBLE: if (set_volume_mono(arg, &devc->mixer.treble)) return -EFAULT; set_treble(devc, devc->mixer.treble); return ret_vol_mono(devc->mixer.treble); case SOUND_MIXER_SYNTH: if (set_volume_mono(arg, &devc->mixer.synth)) return -EFAULT; set_synth_volume(devc, devc->mixer.synth); return ret_vol_mono(devc->mixer.synth); default: return -EINVAL; } } else { int val, and_mask = 0, or_mask = 0; /* * Return parameters */ switch (cmdf) { case SOUND_MIXER_DEVMASK: if (call_ad_mixer(devc, cmd, arg) == -EINVAL) break; and_mask = ~0; or_mask = SOUND_MASK_VOLUME | SOUND_MASK_BASS | SOUND_MASK_TREBLE | SOUND_MASK_SYNTH; break; case SOUND_MIXER_STEREODEVS: if (call_ad_mixer(devc, cmd, arg) == -EINVAL) break; and_mask = ~0; or_mask = SOUND_MASK_VOLUME; break; case SOUND_MIXER_RECMASK: if (devc->ad_mixer_dev != NO_WSS_MIXER) return call_ad_mixer(devc, cmd, arg); break; case SOUND_MIXER_CAPS: if (devc->ad_mixer_dev != NO_WSS_MIXER) return call_ad_mixer(devc, cmd, arg); or_mask = SOUND_CAP_EXCL_INPUT; break; case SOUND_MIXER_RECSRC: if (devc->ad_mixer_dev != NO_WSS_MIXER) return call_ad_mixer(devc, cmd, arg); break; case SOUND_MIXER_VOLUME: or_mask = ret_vol_stereo(devc->mixer.volume_l, devc->mixer.volume_r); break; case SOUND_MIXER_BASS: or_mask = ret_vol_mono(devc->mixer.bass); break; case SOUND_MIXER_TREBLE: or_mask = ret_vol_mono(devc->mixer.treble); break; case SOUND_MIXER_SYNTH: or_mask = ret_vol_mono(devc->mixer.synth); break; default: return -EINVAL; } if (get_user(val, (int __user *)arg)) return -EFAULT; val &= and_mask; val |= or_mask; if (put_user(val, (int __user *)arg)) return -EFAULT; return val; } } static struct mixer_operations pss_mixer_operations = { .owner = THIS_MODULE, .id = "SOUNDPORT", .name = "PSS-AD1848", .ioctl = pss_mixer_ioctl }; static void disable_all_emulations(void) { outw(0x0000, REG(CONF_PSS)); /* 0x0400 enables joystick */ outw(0x0000, REG(CONF_WSS)); outw(0x0000, REG(CONF_SB)); outw(0x0000, REG(CONF_MIDI)); outw(0x0000, REG(CONF_CDROM)); } static void configure_nonsound_components(void) { /* Configure Joystick port */ if(pss_enable_joystick) { outw(0x0400, REG(CONF_PSS)); /* 0x0400 enables joystick */ printk(KERN_INFO "PSS: joystick enabled.\n"); } else { printk(KERN_INFO "PSS: joystick port not enabled.\n"); } /* Configure CDROM port */ if (pss_cdrom_port == -1) { /* If cdrom port enablation wasn't requested */ printk(KERN_INFO "PSS: CDROM port not enabled.\n"); } else if (check_region(pss_cdrom_port, 2)) { printk(KERN_ERR "PSS: CDROM I/O port conflict.\n"); } else { set_io_base(devc, CONF_CDROM, pss_cdrom_port); printk(KERN_INFO "PSS: CDROM I/O port set to 0x%x.\n", pss_cdrom_port); } } static int __init attach_pss(struct address_info *hw_config) { unsigned short id; char tmp[100]; devc->base = hw_config->io_base; devc->irq = hw_config->irq; devc->dma = hw_config->dma; devc->osp = hw_config->osp; devc->ad_mixer_dev = NO_WSS_MIXER; if (!probe_pss(hw_config)) return 0; id = inw(REG(PSS_ID)) & 0x00ff; /* * Disable all emulations. Will be enabled later (if required). */ disable_all_emulations(); #ifdef YOU_REALLY_WANT_TO_ALLOCATE_THESE_RESOURCES if (sound_alloc_dma(hw_config->dma, "PSS")) { printk("pss.c: Can't allocate DMA channel.\n"); release_region(hw_config->io_base, 0x10); release_region(hw_config->io_base+0x10, 0x9); return 0; } if (!set_irq(devc, CONF_PSS, devc->irq)) { printk("PSS: IRQ allocation error.\n"); release_region(hw_config->io_base, 0x10); release_region(hw_config->io_base+0x10, 0x9); return 0; } if (!set_dma(devc, CONF_PSS, devc->dma)) { printk(KERN_ERR "PSS: DMA allocation error\n"); release_region(hw_config->io_base, 0x10); release_region(hw_config->io_base+0x10, 0x9); return 0; } #endif configure_nonsound_components(); pss_initialized = 1; sprintf(tmp, "ECHO-PSS Rev. %d", id); conf_printf(tmp, hw_config); return 1; } static int __init probe_pss_mpu(struct address_info *hw_config) { struct resource *ports; int timeout; if (!pss_initialized) return 0; ports = request_region(hw_config->io_base, 2, "mpu401"); if (!ports) { printk(KERN_ERR "PSS: MPU I/O port conflict\n"); return 0; } set_io_base(devc, CONF_MIDI, hw_config->io_base); if (!set_irq(devc, CONF_MIDI, hw_config->irq)) { printk(KERN_ERR "PSS: MIDI IRQ allocation error.\n"); goto fail; } if (!pss_synthLen) { printk(KERN_ERR "PSS: Can't enable MPU. MIDI synth microcode not available.\n"); goto fail; } if (!pss_download_boot(devc, pss_synth, pss_synthLen, CPF_FIRST | CPF_LAST)) { printk(KERN_ERR "PSS: Unable to load MIDI synth microcode to DSP.\n"); goto fail; } /* * Finally wait until the DSP algorithm has initialized itself and * deactivates receive interrupt. */ for (timeout = 900000; timeout > 0; timeout--) { if ((inb(hw_config->io_base + 1) & 0x80) == 0) /* Input data avail */ inb(hw_config->io_base); /* Discard it */ else break; /* No more input */ } if (!probe_mpu401(hw_config, ports)) goto fail; attach_mpu401(hw_config, THIS_MODULE); /* Slot 1 */ if (hw_config->slots[1] != -1) /* The MPU driver installed itself */ midi_devs[hw_config->slots[1]]->coproc = &pss_coproc_operations; return 1; fail: release_region(hw_config->io_base, 2); return 0; } static int pss_coproc_open(void *dev_info, int sub_device) { switch (sub_device) { case COPR_MIDI: if (pss_synthLen == 0) { printk(KERN_ERR "PSS: MIDI synth microcode not available.\n"); return -EIO; } if (nonstandard_microcode) if (!pss_download_boot(devc, pss_synth, pss_synthLen, CPF_FIRST | CPF_LAST)) { printk(KERN_ERR "PSS: Unable to load MIDI synth microcode to DSP.\n"); return -EIO; } nonstandard_microcode = 0; break; default: break; } return 0; } static void pss_coproc_close(void *dev_info, int sub_device) { return; } static void pss_coproc_reset(void *dev_info) { if (pss_synthLen) if (!pss_download_boot(devc, pss_synth, pss_synthLen, CPF_FIRST | CPF_LAST)) { printk(KERN_ERR "PSS: Unable to load MIDI synth microcode to DSP.\n"); } nonstandard_microcode = 0; } static int download_boot_block(void *dev_info, copr_buffer * buf) { if (buf->len <= 0 || buf->len > sizeof(buf->data)) return -EINVAL; if (!pss_download_boot(devc, buf->data, buf->len, buf->flags)) { printk(KERN_ERR "PSS: Unable to load microcode block to DSP.\n"); return -EIO; } nonstandard_microcode = 1; /* The MIDI microcode has been overwritten */ return 0; } static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg, int local) { copr_buffer *buf; copr_msg *mbuf; copr_debug_buf dbuf; unsigned short tmp; unsigned long flags; unsigned short *data; int i, err; /* printk( "PSS coproc ioctl %x %x %d\n", cmd, arg, local); */ switch (cmd) { case SNDCTL_COPR_RESET: pss_coproc_reset(dev_info); return 0; case SNDCTL_COPR_LOAD: buf = vmalloc(sizeof(copr_buffer)); if (buf == NULL) return -ENOSPC; if (copy_from_user(buf, arg, sizeof(copr_buffer))) { vfree(buf); return -EFAULT; } err = download_boot_block(dev_info, buf); vfree(buf); return err; case SNDCTL_COPR_SENDMSG: mbuf = vmalloc(sizeof(copr_msg)); if (mbuf == NULL) return -ENOSPC; if (copy_from_user(mbuf, arg, sizeof(copr_msg))) { vfree(mbuf); return -EFAULT; } data = (unsigned short *)(mbuf->data); spin_lock_irqsave(&lock, flags); for (i = 0; i < mbuf->len; i++) { if (!pss_put_dspword(devc, *data++)) { spin_unlock_irqrestore(&lock,flags); mbuf->len = i; /* feed back number of WORDs sent */ err = copy_to_user(arg, mbuf, sizeof(copr_msg)); vfree(mbuf); return err ? -EFAULT : -EIO; } } spin_unlock_irqrestore(&lock,flags); vfree(mbuf); return 0; case SNDCTL_COPR_RCVMSG: err = 0; mbuf = vmalloc(sizeof(copr_msg)); if (mbuf == NULL) return -ENOSPC; data = (unsigned short *)mbuf->data; spin_lock_irqsave(&lock, flags); for (i = 0; i < sizeof(mbuf->data)/sizeof(unsigned short); i++) { mbuf->len = i; /* feed back number of WORDs read */ if (!pss_get_dspword(devc, data++)) { if (i == 0) err = -EIO; break; } } spin_unlock_irqrestore(&lock,flags); if (copy_to_user(arg, mbuf, sizeof(copr_msg))) err = -EFAULT; vfree(mbuf); return err; case SNDCTL_COPR_RDATA: if (copy_from_user(&dbuf, arg, sizeof(dbuf))) return -EFAULT; spin_lock_irqsave(&lock, flags); if (!pss_put_dspword(devc, 0x00d0)) { spin_unlock_irqrestore(&lock,flags); return -EIO; } if (!pss_put_dspword(devc, (unsigned short)(dbuf.parm1 & 0xffff))) { spin_unlock_irqrestore(&lock,flags); return -EIO; } if (!pss_get_dspword(devc, &tmp)) { spin_unlock_irqrestore(&lock,flags); return -EIO; } dbuf.parm1 = tmp; spin_unlock_irqrestore(&lock,flags); if (copy_to_user(arg, &dbuf, sizeof(dbuf))) return -EFAULT; return 0; case SNDCTL_COPR_WDATA: if (copy_from_user(&dbuf, arg, sizeof(dbuf))) return -EFAULT; spin_lock_irqsave(&lock, flags); if (!pss_put_dspword(devc, 0x00d1)) { spin_unlock_irqrestore(&lock,flags); return -EIO; } if (!pss_put_dspword(devc, (unsigned short) (dbuf.parm1 & 0xffff))) { spin_unlock_irqrestore(&lock,flags); return -EIO; } tmp = (unsigned int)dbuf.parm2 & 0xffff; if (!pss_put_dspword(devc, tmp)) { spin_unlock_irqrestore(&lock,flags); return -EIO; } spin_unlock_irqrestore(&lock,flags); return 0; case SNDCTL_COPR_WCODE: if (copy_from_user(&dbuf, arg, sizeof(dbuf))) return -EFAULT; spin_lock_irqsave(&lock, flags); if (!pss_put_dspword(devc, 0x00d3)) { spin_unlock_irqrestore(&lock,flags); return -EIO; } if (!pss_put_dspword(devc, (unsigned short)(dbuf.parm1 & 0xffff))) { spin_unlock_irqrestore(&lock,flags); return -EIO; } tmp = (unsigned int)dbuf.parm2 & 0x00ff; if (!pss_put_dspword(devc, tmp)) { spin_unlock_irqrestore(&lock,flags); return -EIO; } tmp = ((unsigned int)dbuf.parm2 >> 8) & 0xffff; if (!pss_put_dspword(devc, tmp)) { spin_unlock_irqrestore(&lock,flags); return -EIO; } spin_unlock_irqrestore(&lock,flags); return 0; case SNDCTL_COPR_RCODE: if (copy_from_user(&dbuf, arg, sizeof(dbuf))) return -EFAULT; spin_lock_irqsave(&lock, flags); if (!pss_put_dspword(devc, 0x00d2)) { spin_unlock_irqrestore(&lock,flags); return -EIO; } if (!pss_put_dspword(devc, (unsigned short)(dbuf.parm1 & 0xffff))) { spin_unlock_irqrestore(&lock,flags); return -EIO; } if (!pss_get_dspword(devc, &tmp)) { /* Read MSB */ spin_unlock_irqrestore(&lock,flags); return -EIO; } dbuf.parm1 = tmp << 8; if (!pss_get_dspword(devc, &tmp)) { /* Read LSB */ spin_unlock_irqrestore(&lock,flags); return -EIO; } dbuf.parm1 |= tmp & 0x00ff; spin_unlock_irqrestore(&lock,flags); if (copy_to_user(arg, &dbuf, sizeof(dbuf))) return -EFAULT; return 0; default: return -EINVAL; } return -EINVAL; } static coproc_operations pss_coproc_operations = { "ADSP-2115", THIS_MODULE, pss_coproc_open, pss_coproc_close, pss_coproc_ioctl, pss_coproc_reset, &pss_data }; static int __init probe_pss_mss(struct address_info *hw_config) { volatile int timeout; struct resource *ports; int my_mix = -999; /* gcc shut up */ if (!pss_initialized) return 0; if (!request_region(hw_config->io_base, 4, "WSS config")) { printk(KERN_ERR "PSS: WSS I/O port conflicts.\n"); return 0; } ports = request_region(hw_config->io_base + 4, 4, "ad1848"); if (!ports) { printk(KERN_ERR "PSS: WSS I/O port conflicts.\n"); release_region(hw_config->io_base, 4); return 0; } set_io_base(devc, CONF_WSS, hw_config->io_base); if (!set_irq(devc, CONF_WSS, hw_config->irq)) { printk("PSS: WSS IRQ allocation error.\n"); goto fail; } if (!set_dma(devc, CONF_WSS, hw_config->dma)) { printk(KERN_ERR "PSS: WSS DMA allocation error\n"); goto fail; } /* * For some reason the card returns 0xff in the WSS status register * immediately after boot. Probably MIDI+SB emulation algorithm * downloaded to the ADSP2115 spends some time initializing the card. * Let's try to wait until it finishes this task. */ for (timeout = 0; timeout < 100000 && (inb(hw_config->io_base + WSS_INDEX) & WSS_INITIALIZING); timeout++) ; outb((0x0b), hw_config->io_base + WSS_INDEX); /* Required by some cards */ for (timeout = 0; (inb(hw_config->io_base + WSS_DATA) & WSS_AUTOCALIBRATION) && (timeout < 100000); timeout++) ; if (!probe_ms_sound(hw_config, ports)) goto fail; devc->ad_mixer_dev = NO_WSS_MIXER; if (pss_mixer) { if ((my_mix = sound_install_mixer (MIXER_DRIVER_VERSION, "PSS-SPEAKERS and AD1848 (through MSS audio codec)", &pss_mixer_operations, sizeof (struct mixer_operations), devc)) < 0) { printk(KERN_ERR "Could not install PSS mixer\n"); goto fail; } } pss_mixer_reset(devc); attach_ms_sound(hw_config, ports, THIS_MODULE); /* Slot 0 */ if (hw_config->slots[0] != -1) { /* The MSS driver installed itself */ audio_devs[hw_config->slots[0]]->coproc = &pss_coproc_operations; if (pss_mixer && (num_mixers == (my_mix + 2))) { /* The MSS mixer installed */ devc->ad_mixer_dev = audio_devs[hw_config->slots[0]]->mixer_dev; } } return 1; fail: release_region(hw_config->io_base + 4, 4); release_region(hw_config->io_base, 4); return 0; } static inline void __exit unload_pss(struct address_info *hw_config) { release_region(hw_config->io_base, 0x10); release_region(hw_config->io_base+0x10, 0x9); } static inline void __exit unload_pss_mpu(struct address_info *hw_config) { unload_mpu401(hw_config); } static inline void __exit unload_pss_mss(struct address_info *hw_config) { unload_ms_sound(hw_config); } static struct address_info cfg; static struct address_info cfg2; static struct address_info cfg_mpu; static int pss_io __initdata = -1; static int mss_io __initdata = -1; static int mss_irq __initdata = -1; static int mss_dma __initdata = -1; static int mpu_io __initdata = -1; static int mpu_irq __initdata = -1; static int pss_no_sound = 0; /* Just configure non-sound components */ static int pss_keep_settings = 1; /* Keep hardware settings at module exit */ static char *pss_firmware = "/etc/sound/pss_synth"; module_param(pss_io, int, 0); MODULE_PARM_DESC(pss_io, "Set i/o base of PSS card (probably 0x220 or 0x240)"); module_param(mss_io, int, 0); MODULE_PARM_DESC(mss_io, "Set WSS (audio) i/o base (0x530, 0x604, 0xE80, 0xF40, or other. Address must end in 0 or 4 and must be from 0x100 to 0xFF4)"); module_param(mss_irq, int, 0); MODULE_PARM_DESC(mss_irq, "Set WSS (audio) IRQ (3, 5, 7, 9, 10, 11, 12)"); module_param(mss_dma, int, 0); MODULE_PARM_DESC(mss_dma, "Set WSS (audio) DMA (0, 1, 3)"); module_param(mpu_io, int, 0); MODULE_PARM_DESC(mpu_io, "Set MIDI i/o base (0x330 or other. Address must be on 4 location boundaries and must be from 0x100 to 0xFFC)"); module_param(mpu_irq, int, 0); MODULE_PARM_DESC(mpu_irq, "Set MIDI IRQ (3, 5, 7, 9, 10, 11, 12)"); module_param(pss_cdrom_port, int, 0); MODULE_PARM_DESC(pss_cdrom_port, "Set the PSS CDROM port i/o base (0x340 or other)"); module_param(pss_enable_joystick, bool, 0); MODULE_PARM_DESC(pss_enable_joystick, "Enables the PSS joystick port (1 to enable, 0 to disable)"); module_param(pss_no_sound, bool, 0); MODULE_PARM_DESC(pss_no_sound, "Configure sound compoents (0 - no, 1 - yes)"); module_param(pss_keep_settings, bool, 0); MODULE_PARM_DESC(pss_keep_settings, "Keep hardware setting at driver unloading (0 - no, 1 - yes)"); module_param(pss_firmware, charp, 0); MODULE_PARM_DESC(pss_firmware, "Location of the firmware file (default - /etc/sound/pss_synth)"); module_param(pss_mixer, bool, 0); MODULE_PARM_DESC(pss_mixer, "Enable (1) or disable (0) PSS mixer (controlling of output volume, bass, treble, synth volume). The mixer is not available on all PSS cards."); MODULE_AUTHOR("Hannu Savolainen, Vladimir Michl"); MODULE_DESCRIPTION("Module for PSS sound cards (based on AD1848, ADSP-2115 and ESC614). This module includes control of output amplifier and synth volume of the Beethoven ADSP-16 card (this may work with other PSS cards)."); MODULE_LICENSE("GPL"); static int fw_load = 0; static int pssmpu = 0, pssmss = 0; /* * Load a PSS sound card module */ static int __init init_pss(void) { if(pss_no_sound) /* If configuring only nonsound components */ { cfg.io_base = pss_io; if(!probe_pss(&cfg)) return -ENODEV; printk(KERN_INFO "ECHO-PSS Rev. %d\n", inw(REG(PSS_ID)) & 0x00ff); printk(KERN_INFO "PSS: loading in no sound mode.\n"); disable_all_emulations(); configure_nonsound_components(); release_region(pss_io, 0x10); release_region(pss_io + 0x10, 0x9); return 0; } cfg.io_base = pss_io; cfg2.io_base = mss_io; cfg2.irq = mss_irq; cfg2.dma = mss_dma; cfg_mpu.io_base = mpu_io; cfg_mpu.irq = mpu_irq; if (cfg.io_base == -1 || cfg2.io_base == -1 || cfg2.irq == -1 || cfg.dma == -1) { printk(KERN_INFO "pss: mss_io, mss_dma, mss_irq and pss_io must be set.\n"); return -EINVAL; } if (!pss_synth) { fw_load = 1; pss_synthLen = mod_firmware_load(pss_firmware, (void *) &pss_synth); } if (!attach_pss(&cfg)) return -ENODEV; /* * Attach stuff */ if (probe_pss_mpu(&cfg_mpu)) pssmpu = 1; if (probe_pss_mss(&cfg2)) pssmss = 1; return 0; } static void __exit cleanup_pss(void) { if(!pss_no_sound) { if(fw_load && pss_synth) vfree(pss_synth); if(pssmss) unload_pss_mss(&cfg2); if(pssmpu) unload_pss_mpu(&cfg_mpu); unload_pss(&cfg); } if(!pss_keep_settings) /* Keep hardware settings if asked */ { disable_all_emulations(); printk(KERN_INFO "Resetting PSS sound card configurations.\n"); } } module_init(init_pss); module_exit(cleanup_pss); #ifndef MODULE static int __init setup_pss(char *str) { /* io, mss_io, mss_irq, mss_dma, mpu_io, mpu_irq */ int ints[7]; str = get_options(str, ARRAY_SIZE(ints), ints); pss_io = ints[1]; mss_io = ints[2]; mss_irq = ints[3]; mss_dma = ints[4]; mpu_io = ints[5]; mpu_irq = ints[6]; return 1; } __setup("pss=", setup_pss); #endif
gpl-2.0
zombah/android_kernel_toshiba_ac100_test
drivers/staging/cx25821/cx25821-video-upstream.c
2925
22852
/* * Driver for the Conexant CX25821 PCIe bridge * * Copyright (C) 2009 Conexant Systems Inc. * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "cx25821-video.h" #include "cx25821-video-upstream.h" #include <linux/fs.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/syscalls.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <linux/uaccess.h> MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards"); MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>"); MODULE_LICENSE("GPL"); static int _intr_msk = FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC | FLD_VID_SRC_OPC_ERR; int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev, struct sram_channel *ch, unsigned int bpl, u32 risc) { unsigned int i, lines; u32 cdt; if (ch->cmds_start == 0) { cx_write(ch->ptr1_reg, 0); cx_write(ch->ptr2_reg, 0); cx_write(ch->cnt2_reg, 0); cx_write(ch->cnt1_reg, 0); return 0; } bpl = (bpl + 7) & ~7; /* alignment */ cdt = ch->cdt; lines = ch->fifo_size / bpl; if (lines > 4) lines = 4; BUG_ON(lines < 2); /* write CDT */ for (i = 0; i < lines; i++) { cx_write(cdt + 16 * i, ch->fifo_start + bpl * i); cx_write(cdt + 16 * i + 4, 0); cx_write(cdt + 16 * i + 8, 0); cx_write(cdt + 16 * i + 12, 0); } /* write CMDS */ cx_write(ch->cmds_start + 0, risc); cx_write(ch->cmds_start + 4, 0); cx_write(ch->cmds_start + 8, cdt); cx_write(ch->cmds_start + 12, (lines * 16) >> 3); cx_write(ch->cmds_start + 16, ch->ctrl_start); cx_write(ch->cmds_start + 20, VID_IQ_SIZE_DW); for (i = 24; i < 80; i += 4) cx_write(ch->cmds_start + i, 0); /* fill registers */ cx_write(ch->ptr1_reg, ch->fifo_start); cx_write(ch->ptr2_reg, cdt); cx_write(ch->cnt2_reg, (lines * 16) >> 3); cx_write(ch->cnt1_reg, (bpl >> 3) - 1); return 0; } static __le32 *cx25821_update_riscprogram(struct cx25821_dev *dev, __le32 *rp, unsigned int offset, unsigned int bpl, u32 sync_line, unsigned int lines, int fifo_enable, int field_type) { unsigned int line, i; int dist_betwn_starts = bpl * 2; *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line); if (USE_RISC_NOOP_VIDEO) { for (i = 0; i < NUM_NO_OPS; i++) *(rp++) = cpu_to_le32(RISC_NOOP); } /* scan lines */ for (line = 0; line < lines; line++) { *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl); *(rp++) = cpu_to_le32(dev->_data_buf_phys_addr + offset); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ if ((lines <= NTSC_FIELD_HEIGHT) || (line < (NTSC_FIELD_HEIGHT - 1)) || !(dev->_isNTSC)) { offset += dist_betwn_starts; } } return rp; } static __le32 *cx25821_risc_field_upstream(struct cx25821_dev *dev, __le32 * rp, dma_addr_t databuf_phys_addr, unsigned int offset, u32 sync_line, unsigned int bpl, unsigned int lines, int fifo_enable, int field_type) { unsigned int line, i; struct sram_channel *sram_ch = dev->channels[dev->_channel_upstream_select].sram_channels; int dist_betwn_starts = bpl * 2; /* sync instruction */ if (sync_line != NO_SYNC_LINE) *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line); if (USE_RISC_NOOP_VIDEO) { for (i = 0; i < NUM_NO_OPS; i++) *(rp++) = cpu_to_le32(RISC_NOOP); } /* scan lines */ for (line = 0; line < lines; line++) { *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl); *(rp++) = cpu_to_le32(databuf_phys_addr + offset); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ if ((lines <= NTSC_FIELD_HEIGHT) || (line < (NTSC_FIELD_HEIGHT - 1)) || !(dev->_isNTSC)) /* to skip the other field line */ offset += dist_betwn_starts; /* check if we need to enable the FIFO after the first 4 lines * For the upstream video channel, the risc engine will enable * the FIFO. */ if (fifo_enable && line == 3) { *(rp++) = RISC_WRITECR; *(rp++) = sram_ch->dma_ctl; *(rp++) = FLD_VID_FIFO_EN; *(rp++) = 0x00000001; } } return rp; } int cx25821_risc_buffer_upstream(struct cx25821_dev *dev, struct pci_dev *pci, unsigned int top_offset, unsigned int bpl, unsigned int lines) { __le32 *rp; int fifo_enable = 0; /* get line count for single field */ int singlefield_lines = lines >> 1; int odd_num_lines = singlefield_lines; int frame = 0; int frame_size = 0; int databuf_offset = 0; int risc_program_size = 0; int risc_flag = RISC_CNT_RESET; unsigned int bottom_offset = bpl; dma_addr_t risc_phys_jump_addr; if (dev->_isNTSC) { odd_num_lines = singlefield_lines + 1; risc_program_size = FRAME1_VID_PROG_SIZE; frame_size = (bpl == Y411_LINE_SZ) ? FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422; } else { risc_program_size = PAL_VID_PROG_SIZE; frame_size = (bpl == Y411_LINE_SZ) ? FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422; } /* Virtual address of Risc buffer program */ rp = dev->_dma_virt_addr; for (frame = 0; frame < NUM_FRAMES; frame++) { databuf_offset = frame_size * frame; if (UNSET != top_offset) { fifo_enable = (frame == 0) ? FIFO_ENABLE : FIFO_DISABLE; rp = cx25821_risc_field_upstream(dev, rp, dev-> _data_buf_phys_addr + databuf_offset, top_offset, 0, bpl, odd_num_lines, fifo_enable, ODD_FIELD); } fifo_enable = FIFO_DISABLE; /* Even Field */ rp = cx25821_risc_field_upstream(dev, rp, dev->_data_buf_phys_addr + databuf_offset, bottom_offset, 0x200, bpl, singlefield_lines, fifo_enable, EVEN_FIELD); if (frame == 0) { risc_flag = RISC_CNT_RESET; risc_phys_jump_addr = dev->_dma_phys_start_addr + risc_program_size; } else { risc_phys_jump_addr = dev->_dma_phys_start_addr; risc_flag = RISC_CNT_INC; } /* Loop to 2ndFrameRISC or to Start of Risc * program & generate IRQ */ *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag); *(rp++) = cpu_to_le32(risc_phys_jump_addr); *(rp++) = cpu_to_le32(0); } return 0; } void cx25821_stop_upstream_video_ch1(struct cx25821_dev *dev) { struct sram_channel *sram_ch = dev->channels[VID_UPSTREAM_SRAM_CHANNEL_I].sram_channels; u32 tmp = 0; if (!dev->_is_running) { pr_info("No video file is currently running so return!\n"); return; } /* Disable RISC interrupts */ tmp = cx_read(sram_ch->int_msk); cx_write(sram_ch->int_msk, tmp & ~_intr_msk); /* Turn OFF risc and fifo enable */ tmp = cx_read(sram_ch->dma_ctl); cx_write(sram_ch->dma_ctl, tmp & ~(FLD_VID_FIFO_EN | FLD_VID_RISC_EN)); /* Clear data buffer memory */ if (dev->_data_buf_virt_addr) memset(dev->_data_buf_virt_addr, 0, dev->_data_buf_size); dev->_is_running = 0; dev->_is_first_frame = 0; dev->_frame_count = 0; dev->_file_status = END_OF_FILE; kfree(dev->_irq_queues); dev->_irq_queues = NULL; kfree(dev->_filename); tmp = cx_read(VID_CH_MODE_SEL); cx_write(VID_CH_MODE_SEL, tmp & 0xFFFFFE00); } void cx25821_free_mem_upstream_ch1(struct cx25821_dev *dev) { if (dev->_is_running) cx25821_stop_upstream_video_ch1(dev); if (dev->_dma_virt_addr) { pci_free_consistent(dev->pci, dev->_risc_size, dev->_dma_virt_addr, dev->_dma_phys_addr); dev->_dma_virt_addr = NULL; } if (dev->_data_buf_virt_addr) { pci_free_consistent(dev->pci, dev->_data_buf_size, dev->_data_buf_virt_addr, dev->_data_buf_phys_addr); dev->_data_buf_virt_addr = NULL; } } int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch) { struct file *myfile; int frame_index_temp = dev->_frame_index; int i = 0; int line_size = (dev->_pixel_format == PIXEL_FRMT_411) ? Y411_LINE_SZ : Y422_LINE_SZ; int frame_size = 0; int frame_offset = 0; ssize_t vfs_read_retval = 0; char mybuf[line_size]; loff_t file_offset; loff_t pos; mm_segment_t old_fs; if (dev->_file_status == END_OF_FILE) return 0; if (dev->_isNTSC) { frame_size = (line_size == Y411_LINE_SZ) ? FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422; } else { frame_size = (line_size == Y411_LINE_SZ) ? FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422; } frame_offset = (frame_index_temp > 0) ? frame_size : 0; file_offset = dev->_frame_count * frame_size; myfile = filp_open(dev->_filename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(myfile)) { const int open_errno = -PTR_ERR(myfile); pr_err("%s(): ERROR opening file(%s) with errno = %d!\n", __func__, dev->_filename, open_errno); return PTR_ERR(myfile); } else { if (!(myfile->f_op)) { pr_err("%s(): File has no file operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } if (!myfile->f_op->read) { pr_err("%s(): File has no READ operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } pos = myfile->f_pos; old_fs = get_fs(); set_fs(KERNEL_DS); for (i = 0; i < dev->_lines_count; i++) { pos = file_offset; vfs_read_retval = vfs_read(myfile, mybuf, line_size, &pos); if (vfs_read_retval > 0 && vfs_read_retval == line_size && dev->_data_buf_virt_addr != NULL) { memcpy((void *)(dev->_data_buf_virt_addr + frame_offset / 4), mybuf, vfs_read_retval); } file_offset += vfs_read_retval; frame_offset += vfs_read_retval; if (vfs_read_retval < line_size) { pr_info("Done: exit %s() since no more bytes to read from Video file\n", __func__); break; } } if (i > 0) dev->_frame_count++; dev->_file_status = (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE; set_fs(old_fs); filp_close(myfile, NULL); } return 0; } static void cx25821_vidups_handler(struct work_struct *work) { struct cx25821_dev *dev = container_of(work, struct cx25821_dev, _irq_work_entry); if (!dev) { pr_err("ERROR %s(): since container_of(work_struct) FAILED!\n", __func__); return; } cx25821_get_frame(dev, dev->channels[dev->_channel_upstream_select]. sram_channels); } int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch) { struct file *myfile; int i = 0, j = 0; int line_size = (dev->_pixel_format == PIXEL_FRMT_411) ? Y411_LINE_SZ : Y422_LINE_SZ; ssize_t vfs_read_retval = 0; char mybuf[line_size]; loff_t pos; loff_t offset = (unsigned long)0; mm_segment_t old_fs; myfile = filp_open(dev->_filename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(myfile)) { const int open_errno = -PTR_ERR(myfile); pr_err("%s(): ERROR opening file(%s) with errno = %d!\n", __func__, dev->_filename, open_errno); return PTR_ERR(myfile); } else { if (!(myfile->f_op)) { pr_err("%s(): File has no file operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } if (!myfile->f_op->read) { pr_err("%s(): File has no READ operations registered! Returning\n", __func__); filp_close(myfile, NULL); return -EIO; } pos = myfile->f_pos; old_fs = get_fs(); set_fs(KERNEL_DS); for (j = 0; j < NUM_FRAMES; j++) { for (i = 0; i < dev->_lines_count; i++) { pos = offset; vfs_read_retval = vfs_read(myfile, mybuf, line_size, &pos); if (vfs_read_retval > 0 && vfs_read_retval == line_size && dev->_data_buf_virt_addr != NULL) { memcpy((void *)(dev-> _data_buf_virt_addr + offset / 4), mybuf, vfs_read_retval); } offset += vfs_read_retval; if (vfs_read_retval < line_size) { pr_info("Done: exit %s() since no more bytes to read from Video file\n", __func__); break; } } if (i > 0) dev->_frame_count++; if (vfs_read_retval < line_size) break; } dev->_file_status = (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE; set_fs(old_fs); myfile->f_pos = 0; filp_close(myfile, NULL); } return 0; } int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev, struct sram_channel *sram_ch, int bpl) { int ret = 0; dma_addr_t dma_addr; dma_addr_t data_dma_addr; if (dev->_dma_virt_addr != NULL) { pci_free_consistent(dev->pci, dev->upstream_riscbuf_size, dev->_dma_virt_addr, dev->_dma_phys_addr); } dev->_dma_virt_addr = pci_alloc_consistent(dev->pci, dev->upstream_riscbuf_size, &dma_addr); dev->_dma_virt_start_addr = dev->_dma_virt_addr; dev->_dma_phys_start_addr = dma_addr; dev->_dma_phys_addr = dma_addr; dev->_risc_size = dev->upstream_riscbuf_size; if (!dev->_dma_virt_addr) { pr_err("FAILED to allocate memory for Risc buffer! Returning\n"); return -ENOMEM; } /* Clear memory at address */ memset(dev->_dma_virt_addr, 0, dev->_risc_size); if (dev->_data_buf_virt_addr != NULL) { pci_free_consistent(dev->pci, dev->upstream_databuf_size, dev->_data_buf_virt_addr, dev->_data_buf_phys_addr); } /* For Video Data buffer allocation */ dev->_data_buf_virt_addr = pci_alloc_consistent(dev->pci, dev->upstream_databuf_size, &data_dma_addr); dev->_data_buf_phys_addr = data_dma_addr; dev->_data_buf_size = dev->upstream_databuf_size; if (!dev->_data_buf_virt_addr) { pr_err("FAILED to allocate memory for data buffer! Returning\n"); return -ENOMEM; } /* Clear memory at address */ memset(dev->_data_buf_virt_addr, 0, dev->_data_buf_size); ret = cx25821_openfile(dev, sram_ch); if (ret < 0) return ret; /* Create RISC programs */ ret = cx25821_risc_buffer_upstream(dev, dev->pci, 0, bpl, dev->_lines_count); if (ret < 0) { pr_info("Failed creating Video Upstream Risc programs!\n"); goto error; } return 0; error: return ret; } int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num, u32 status) { u32 int_msk_tmp; struct sram_channel *channel = dev->channels[chan_num].sram_channels; int singlefield_lines = NTSC_FIELD_HEIGHT; int line_size_in_bytes = Y422_LINE_SZ; int odd_risc_prog_size = 0; dma_addr_t risc_phys_jump_addr; __le32 *rp; if (status & FLD_VID_SRC_RISC1) { /* We should only process one program per call */ u32 prog_cnt = cx_read(channel->gpcnt); /* Since we've identified our IRQ, clear our bits from the * interrupt mask and interrupt status registers */ int_msk_tmp = cx_read(channel->int_msk); cx_write(channel->int_msk, int_msk_tmp & ~_intr_msk); cx_write(channel->int_stat, _intr_msk); spin_lock(&dev->slock); dev->_frame_index = prog_cnt; queue_work(dev->_irq_queues, &dev->_irq_work_entry); if (dev->_is_first_frame) { dev->_is_first_frame = 0; if (dev->_isNTSC) { singlefield_lines += 1; odd_risc_prog_size = ODD_FLD_NTSC_PROG_SIZE; } else { singlefield_lines = PAL_FIELD_HEIGHT; odd_risc_prog_size = ODD_FLD_PAL_PROG_SIZE; } if (dev->_dma_virt_start_addr != NULL) { line_size_in_bytes = (dev->_pixel_format == PIXEL_FRMT_411) ? Y411_LINE_SZ : Y422_LINE_SZ; risc_phys_jump_addr = dev->_dma_phys_start_addr + odd_risc_prog_size; rp = cx25821_update_riscprogram(dev, dev-> _dma_virt_start_addr, TOP_OFFSET, line_size_in_bytes, 0x0, singlefield_lines, FIFO_DISABLE, ODD_FIELD); /* Jump to Even Risc program of 1st Frame */ *(rp++) = cpu_to_le32(RISC_JUMP); *(rp++) = cpu_to_le32(risc_phys_jump_addr); *(rp++) = cpu_to_le32(0); } } spin_unlock(&dev->slock); } else { if (status & FLD_VID_SRC_UF) pr_err("%s(): Video Received Underflow Error Interrupt!\n", __func__); if (status & FLD_VID_SRC_SYNC) pr_err("%s(): Video Received Sync Error Interrupt!\n", __func__); if (status & FLD_VID_SRC_OPC_ERR) pr_err("%s(): Video Received OpCode Error Interrupt!\n", __func__); } if (dev->_file_status == END_OF_FILE) { pr_err("EOF Channel 1 Framecount = %d\n", dev->_frame_count); return -1; } /* ElSE, set the interrupt mask register, re-enable irq. */ int_msk_tmp = cx_read(channel->int_msk); cx_write(channel->int_msk, int_msk_tmp |= _intr_msk); return 0; } static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id) { struct cx25821_dev *dev = dev_id; u32 msk_stat, vid_status; int handled = 0; int channel_num = 0; struct sram_channel *sram_ch; if (!dev) return -1; channel_num = VID_UPSTREAM_SRAM_CHANNEL_I; sram_ch = dev->channels[channel_num].sram_channels; msk_stat = cx_read(sram_ch->int_mstat); vid_status = cx_read(sram_ch->int_stat); /* Only deal with our interrupt */ if (vid_status) { handled = cx25821_video_upstream_irq(dev, channel_num, vid_status); } if (handled < 0) cx25821_stop_upstream_video_ch1(dev); else handled += handled; return IRQ_RETVAL(handled); } void cx25821_set_pixelengine(struct cx25821_dev *dev, struct sram_channel *ch, int pix_format) { int width = WIDTH_D1; int height = dev->_lines_count; int num_lines, odd_num_lines; u32 value; int vip_mode = OUTPUT_FRMT_656; value = ((pix_format & 0x3) << 12) | (vip_mode & 0x7); value &= 0xFFFFFFEF; value |= dev->_isNTSC ? 0 : 0x10; cx_write(ch->vid_fmt_ctl, value); /* set number of active pixels in each line. * Default is 720 pixels in both NTSC and PAL format */ cx_write(ch->vid_active_ctl1, width); num_lines = (height / 2) & 0x3FF; odd_num_lines = num_lines; if (dev->_isNTSC) odd_num_lines += 1; value = (num_lines << 16) | odd_num_lines; /* set number of active lines in field 0 (top) and field 1 (bottom) */ cx_write(ch->vid_active_ctl2, value); cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3); } int cx25821_start_video_dma_upstream(struct cx25821_dev *dev, struct sram_channel *sram_ch) { u32 tmp = 0; int err = 0; /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for * channel A-C */ tmp = cx_read(VID_CH_MODE_SEL); cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF); /* Set the physical start address of the RISC program in the initial * program counter(IPC) member of the cmds. */ cx_write(sram_ch->cmds_start + 0, dev->_dma_phys_addr); /* Risc IPC High 64 bits 63-32 */ cx_write(sram_ch->cmds_start + 4, 0); /* reset counter */ cx_write(sram_ch->gpcnt_ctl, 3); /* Clear our bits from the interrupt status register. */ cx_write(sram_ch->int_stat, _intr_msk); /* Set the interrupt mask register, enable irq. */ cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit)); tmp = cx_read(sram_ch->int_msk); cx_write(sram_ch->int_msk, tmp |= _intr_msk); err = request_irq(dev->pci->irq, cx25821_upstream_irq, IRQF_SHARED | IRQF_DISABLED, dev->name, dev); if (err < 0) { pr_err("%s: can't get upstream IRQ %d\n", dev->name, dev->pci->irq); goto fail_irq; } /* Start the DMA engine */ tmp = cx_read(sram_ch->dma_ctl); cx_set(sram_ch->dma_ctl, tmp | FLD_VID_RISC_EN); dev->_is_running = 1; dev->_is_first_frame = 1; return 0; fail_irq: cx25821_dev_unregister(dev); return err; } int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select, int pixel_format) { struct sram_channel *sram_ch; u32 tmp; int retval = 0; int err = 0; int data_frame_size = 0; int risc_buffer_size = 0; int str_length = 0; if (dev->_is_running) { pr_info("Video Channel is still running so return!\n"); return 0; } dev->_channel_upstream_select = channel_select; sram_ch = dev->channels[channel_select].sram_channels; INIT_WORK(&dev->_irq_work_entry, cx25821_vidups_handler); dev->_irq_queues = create_singlethread_workqueue("cx25821_workqueue"); if (!dev->_irq_queues) { pr_err("create_singlethread_workqueue() for Video FAILED!\n"); return -ENOMEM; } /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for * channel A-C */ tmp = cx_read(VID_CH_MODE_SEL); cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF); dev->_is_running = 0; dev->_frame_count = 0; dev->_file_status = RESET_STATUS; dev->_lines_count = dev->_isNTSC ? 480 : 576; dev->_pixel_format = pixel_format; dev->_line_size = (dev->_pixel_format == PIXEL_FRMT_422) ? (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2; data_frame_size = dev->_isNTSC ? NTSC_DATA_BUF_SZ : PAL_DATA_BUF_SZ; risc_buffer_size = dev->_isNTSC ? NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE; if (dev->input_filename) { str_length = strlen(dev->input_filename); dev->_filename = kmalloc(str_length + 1, GFP_KERNEL); if (!dev->_filename) goto error; memcpy(dev->_filename, dev->input_filename, str_length + 1); } else { str_length = strlen(dev->_defaultname); dev->_filename = kmalloc(str_length + 1, GFP_KERNEL); if (!dev->_filename) goto error; memcpy(dev->_filename, dev->_defaultname, str_length + 1); } /* Default if filename is empty string */ if (strcmp(dev->input_filename, "") == 0) { if (dev->_isNTSC) { dev->_filename = (dev->_pixel_format == PIXEL_FRMT_411) ? "/root/vid411.yuv" : "/root/vidtest.yuv"; } else { dev->_filename = (dev->_pixel_format == PIXEL_FRMT_411) ? "/root/pal411.yuv" : "/root/pal422.yuv"; } } dev->_is_running = 0; dev->_frame_count = 0; dev->_file_status = RESET_STATUS; dev->_lines_count = dev->_isNTSC ? 480 : 576; dev->_pixel_format = pixel_format; dev->_line_size = (dev->_pixel_format == PIXEL_FRMT_422) ? (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2; retval = cx25821_sram_channel_setup_upstream(dev, sram_ch, dev->_line_size, 0); /* setup fifo + format */ cx25821_set_pixelengine(dev, sram_ch, dev->_pixel_format); dev->upstream_riscbuf_size = risc_buffer_size * 2; dev->upstream_databuf_size = data_frame_size * 2; /* Allocating buffers and prepare RISC program */ retval = cx25821_upstream_buffer_prepare(dev, sram_ch, dev->_line_size); if (retval < 0) { pr_err("%s: Failed to set up Video upstream buffers!\n", dev->name); goto error; } cx25821_start_video_dma_upstream(dev, sram_ch); return 0; error: cx25821_dev_unregister(dev); return err; }
gpl-2.0
Dee-UK/D33_KK_Kernel
drivers/video/backlight/apple_bl.c
3181
5805
/* * Backlight Driver for Intel-based Apples * * Copyright (c) Red Hat <mjg@redhat.com> * Based on code from Pommed: * Copyright (C) 2006 Nicolas Boichat <nicolas @boichat.ch> * Copyright (C) 2006 Felipe Alfaro Solana <felipe_alfaro @linuxmail.org> * Copyright (C) 2007 Julien BLACHE <jb@jblache.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver triggers SMIs which cause the firmware to change the * backlight brightness. This is icky in many ways, but it's impractical to * get at the firmware code in order to figure out what it's actually doing. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/backlight.h> #include <linux/err.h> #include <linux/io.h> #include <linux/pci.h> #include <linux/acpi.h> static struct backlight_device *apple_backlight_device; struct hw_data { /* I/O resource to allocate. */ unsigned long iostart; unsigned long iolen; /* Backlight operations structure. */ const struct backlight_ops backlight_ops; void (*set_brightness)(int); }; static const struct hw_data *hw_data; #define DRIVER "apple_backlight: " /* Module parameters. */ static int debug; module_param_named(debug, debug, int, 0644); MODULE_PARM_DESC(debug, "Set to one to enable debugging messages."); /* * Implementation for machines with Intel chipset. */ static void intel_chipset_set_brightness(int intensity) { outb(0x04 | (intensity << 4), 0xb3); outb(0xbf, 0xb2); } static int intel_chipset_send_intensity(struct backlight_device *bd) { int intensity = bd->props.brightness; if (debug) printk(KERN_DEBUG DRIVER "setting brightness to %d\n", intensity); intel_chipset_set_brightness(intensity); return 0; } static int intel_chipset_get_intensity(struct backlight_device *bd) { int intensity; outb(0x03, 0xb3); outb(0xbf, 0xb2); intensity = inb(0xb3) >> 4; if (debug) printk(KERN_DEBUG DRIVER "read brightness of %d\n", intensity); return intensity; } static const struct hw_data intel_chipset_data = { .iostart = 0xb2, .iolen = 2, .backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .get_brightness = intel_chipset_get_intensity, .update_status = intel_chipset_send_intensity, }, .set_brightness = intel_chipset_set_brightness, }; /* * Implementation for machines with Nvidia chipset. */ static void nvidia_chipset_set_brightness(int intensity) { outb(0x04 | (intensity << 4), 0x52f); outb(0xbf, 0x52e); } static int nvidia_chipset_send_intensity(struct backlight_device *bd) { int intensity = bd->props.brightness; if (debug) printk(KERN_DEBUG DRIVER "setting brightness to %d\n", intensity); nvidia_chipset_set_brightness(intensity); return 0; } static int nvidia_chipset_get_intensity(struct backlight_device *bd) { int intensity; outb(0x03, 0x52f); outb(0xbf, 0x52e); intensity = inb(0x52f) >> 4; if (debug) printk(KERN_DEBUG DRIVER "read brightness of %d\n", intensity); return intensity; } static const struct hw_data nvidia_chipset_data = { .iostart = 0x52e, .iolen = 2, .backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .get_brightness = nvidia_chipset_get_intensity, .update_status = nvidia_chipset_send_intensity }, .set_brightness = nvidia_chipset_set_brightness, }; static int __devinit apple_bl_add(struct acpi_device *dev) { struct backlight_properties props; struct pci_dev *host; int intensity; host = pci_get_bus_and_slot(0, 0); if (!host) { printk(KERN_ERR DRIVER "unable to find PCI host\n"); return -ENODEV; } if (host->vendor == PCI_VENDOR_ID_INTEL) hw_data = &intel_chipset_data; else if (host->vendor == PCI_VENDOR_ID_NVIDIA) hw_data = &nvidia_chipset_data; pci_dev_put(host); if (!hw_data) { printk(KERN_ERR DRIVER "unknown hardware\n"); return -ENODEV; } /* Check that the hardware responds - this may not work under EFI */ intensity = hw_data->backlight_ops.get_brightness(NULL); if (!intensity) { hw_data->set_brightness(1); if (!hw_data->backlight_ops.get_brightness(NULL)) return -ENODEV; hw_data->set_brightness(0); } if (!request_region(hw_data->iostart, hw_data->iolen, "Apple backlight")) return -ENXIO; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = 15; apple_backlight_device = backlight_device_register("apple_backlight", NULL, NULL, &hw_data->backlight_ops, &props); if (IS_ERR(apple_backlight_device)) { release_region(hw_data->iostart, hw_data->iolen); return PTR_ERR(apple_backlight_device); } apple_backlight_device->props.brightness = hw_data->backlight_ops.get_brightness(apple_backlight_device); backlight_update_status(apple_backlight_device); return 0; } static int __devexit apple_bl_remove(struct acpi_device *dev, int type) { backlight_device_unregister(apple_backlight_device); release_region(hw_data->iostart, hw_data->iolen); hw_data = NULL; return 0; } static const struct acpi_device_id apple_bl_ids[] = { {"APP0002", 0}, {"", 0}, }; static struct acpi_driver apple_bl_driver = { .name = "Apple backlight", .ids = apple_bl_ids, .ops = { .add = apple_bl_add, .remove = apple_bl_remove, }, }; static int __init apple_bl_init(void) { return acpi_bus_register_driver(&apple_bl_driver); } static void __exit apple_bl_exit(void) { acpi_bus_unregister_driver(&apple_bl_driver); } module_init(apple_bl_init); module_exit(apple_bl_exit); MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); MODULE_DESCRIPTION("Apple Backlight Driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(acpi, apple_bl_ids); MODULE_ALIAS("mbp_nvidia_bl");
gpl-2.0
jamison904/d2tmo_kernel
drivers/isdn/mISDN/dsp_tones.c
3181
17286
/* * Audio support data for ISDN4Linux. * * Copyright Andreas Eversberg (jolly@eversberg.eu) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/gfp.h> #include <linux/mISDNif.h> #include <linux/mISDNdsp.h> #include "core.h" #include "dsp.h" #define DATA_S sample_silence #define SIZE_S (&sizeof_silence) #define DATA_GA sample_german_all #define SIZE_GA (&sizeof_german_all) #define DATA_GO sample_german_old #define SIZE_GO (&sizeof_german_old) #define DATA_DT sample_american_dialtone #define SIZE_DT (&sizeof_american_dialtone) #define DATA_RI sample_american_ringing #define SIZE_RI (&sizeof_american_ringing) #define DATA_BU sample_american_busy #define SIZE_BU (&sizeof_american_busy) #define DATA_S1 sample_special1 #define SIZE_S1 (&sizeof_special1) #define DATA_S2 sample_special2 #define SIZE_S2 (&sizeof_special2) #define DATA_S3 sample_special3 #define SIZE_S3 (&sizeof_special3) /***************/ /* tones loops */ /***************/ /* all tones are alaw encoded */ /* the last sample+1 is in phase with the first sample. the error is low */ static u8 sample_german_all[] = { 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d, 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c, 0xdc, 0xfc, 0x6c, 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d, 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c, 0xdc, 0xfc, 0x6c, 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d, 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c, 0xdc, 0xfc, 0x6c, 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d, 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c, 0xdc, 0xfc, 0x6c, }; static u32 sizeof_german_all = sizeof(sample_german_all); static u8 sample_german_old[] = { 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed, 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70, 0x8c, 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed, 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70, 0x8c, 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed, 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70, 0x8c, 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed, 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70, 0x8c, }; static u32 sizeof_german_old = sizeof(sample_german_old); static u8 sample_american_dialtone[] = { 0x2a, 0x18, 0x90, 0x6c, 0x4c, 0xbc, 0x4c, 0x6c, 0x10, 0x58, 0x32, 0xb9, 0x31, 0x2d, 0x8d, 0x0d, 0x8d, 0x2d, 0x31, 0x99, 0x0f, 0x28, 0x60, 0xf0, 0xd0, 0x50, 0xd0, 0x30, 0x60, 0x08, 0x8e, 0x67, 0x09, 0x19, 0x21, 0xe1, 0xd9, 0xb9, 0x29, 0x67, 0x83, 0x02, 0xce, 0xbe, 0xee, 0x1a, 0x1b, 0xef, 0xbf, 0xcf, 0x03, 0x82, 0x66, 0x28, 0xb8, 0xd8, 0xe0, 0x20, 0x18, 0x08, 0x66, 0x8f, 0x09, 0x61, 0x31, 0xd1, 0x51, 0xd1, 0xf1, 0x61, 0x29, 0x0e, 0x98, 0x30, 0x2c, 0x8c, 0x0c, 0x8c, 0x2c, 0x30, 0xb8, 0x33, 0x59, 0x11, 0x6d, 0x4d, 0xbd, 0x4d, 0x6d, 0x91, 0x19, }; static u32 sizeof_american_dialtone = sizeof(sample_american_dialtone); static u8 sample_american_ringing[] = { 0x2a, 0xe0, 0xac, 0x0c, 0xbc, 0x4c, 0x8c, 0x90, 0x48, 0xc7, 0xc1, 0xed, 0xcd, 0x4d, 0xcd, 0xed, 0xc1, 0xb7, 0x08, 0x30, 0xec, 0xcc, 0xcc, 0x8c, 0x10, 0x58, 0x1a, 0x99, 0x71, 0xed, 0x8d, 0x8d, 0x2d, 0x41, 0x89, 0x9e, 0x20, 0x70, 0x2c, 0xec, 0x2c, 0x70, 0x20, 0x86, 0x77, 0xe1, 0x31, 0x11, 0xd1, 0xf1, 0x81, 0x09, 0xa3, 0x56, 0x58, 0x00, 0x40, 0xc0, 0x60, 0x38, 0x46, 0x43, 0x57, 0x39, 0xd9, 0x59, 0x99, 0xc9, 0x77, 0x2f, 0x2e, 0xc6, 0xd6, 0x28, 0xd6, 0x36, 0x26, 0x2e, 0x8a, 0xa3, 0x43, 0x63, 0x4b, 0x4a, 0x62, 0x42, 0xa2, 0x8b, 0x2f, 0x27, 0x37, 0xd7, 0x29, 0xd7, 0xc7, 0x2f, 0x2e, 0x76, 0xc8, 0x98, 0x58, 0xd8, 0x38, 0x56, 0x42, 0x47, 0x39, 0x61, 0xc1, 0x41, 0x01, 0x59, 0x57, 0xa2, 0x08, 0x80, 0xf0, 0xd0, 0x10, 0x30, 0xe0, 0x76, 0x87, 0x21, 0x71, 0x2d, 0xed, 0x2d, 0x71, 0x21, 0x9f, 0x88, 0x40, 0x2c, 0x8c, 0x8c, 0xec, 0x70, 0x98, 0x1b, 0x59, 0x11, 0x8d, 0xcd, 0xcd, 0xed, 0x31, 0x09, 0xb6, 0xc0, 0xec, 0xcc, 0x4c, 0xcc, 0xec, 0xc0, 0xc6, 0x49, 0x91, 0x8d, 0x4d, 0xbd, 0x0d, 0xad, 0xe1, }; static u32 sizeof_american_ringing = sizeof(sample_american_ringing); static u8 sample_american_busy[] = { 0x2a, 0x00, 0x6c, 0x4c, 0x4c, 0x6c, 0xb0, 0x66, 0x99, 0x11, 0x6d, 0x8d, 0x2d, 0x41, 0xd7, 0x96, 0x60, 0xf0, 0x70, 0x40, 0x58, 0xf6, 0x53, 0x57, 0x09, 0x89, 0xd7, 0x5f, 0xe3, 0x2a, 0xe3, 0x5f, 0xd7, 0x89, 0x09, 0x57, 0x53, 0xf6, 0x58, 0x40, 0x70, 0xf0, 0x60, 0x96, 0xd7, 0x41, 0x2d, 0x8d, 0x6d, 0x11, 0x99, 0x66, 0xb0, 0x6c, 0x4c, 0x4c, 0x6c, 0x00, 0x2a, 0x01, 0x6d, 0x4d, 0x4d, 0x6d, 0xb1, 0x67, 0x98, 0x10, 0x6c, 0x8c, 0x2c, 0x40, 0xd6, 0x97, 0x61, 0xf1, 0x71, 0x41, 0x59, 0xf7, 0x52, 0x56, 0x08, 0x88, 0xd6, 0x5e, 0xe2, 0x2a, 0xe2, 0x5e, 0xd6, 0x88, 0x08, 0x56, 0x52, 0xf7, 0x59, 0x41, 0x71, 0xf1, 0x61, 0x97, 0xd6, 0x40, 0x2c, 0x8c, 0x6c, 0x10, 0x98, 0x67, 0xb1, 0x6d, 0x4d, 0x4d, 0x6d, 0x01, }; static u32 sizeof_american_busy = sizeof(sample_american_busy); static u8 sample_special1[] = { 0x2a, 0x2c, 0xbc, 0x6c, 0xd6, 0x71, 0xbd, 0x0d, 0xd9, 0x80, 0xcc, 0x4c, 0x40, 0x39, 0x0d, 0xbd, 0x11, 0x86, 0xec, 0xbc, 0xec, 0x0e, 0x51, 0xbd, 0x8d, 0x89, 0x30, 0x4c, 0xcc, 0xe0, 0xe1, 0xcd, 0x4d, 0x31, 0x88, 0x8c, 0xbc, 0x50, 0x0f, 0xed, 0xbd, 0xed, 0x87, 0x10, 0xbc, 0x0c, 0x38, 0x41, 0x4d, 0xcd, 0x81, 0xd8, 0x0c, 0xbc, 0x70, 0xd7, 0x6d, 0xbd, 0x2d, }; static u32 sizeof_special1 = sizeof(sample_special1); static u8 sample_special2[] = { 0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc, 0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d, 0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6, 0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0, 0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd, 0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc, 0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d, 0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6, 0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0, 0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd, }; static u32 sizeof_special2 = sizeof(sample_special2); static u8 sample_special3[] = { 0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1, 0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c, 0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc, 0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7, 0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd, 0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1, 0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c, 0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc, 0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7, 0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd, }; static u32 sizeof_special3 = sizeof(sample_special3); static u8 sample_silence[] = { 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, }; static u32 sizeof_silence = sizeof(sample_silence); struct tones_samples { u32 *len; u8 *data; }; static struct tones_samples samples[] = { {&sizeof_german_all, sample_german_all}, {&sizeof_german_old, sample_german_old}, {&sizeof_american_dialtone, sample_american_dialtone}, {&sizeof_american_ringing, sample_american_ringing}, {&sizeof_american_busy, sample_american_busy}, {&sizeof_special1, sample_special1}, {&sizeof_special2, sample_special2}, {&sizeof_special3, sample_special3}, {NULL, NULL}, }; /*********************************** * generate ulaw from alaw samples * ***********************************/ void dsp_audio_generate_ulaw_samples(void) { int i, j; i = 0; while (samples[i].len) { j = 0; while (j < (*samples[i].len)) { samples[i].data[j] = dsp_audio_alaw_to_ulaw[samples[i].data[j]]; j++; } i++; } } /**************************** * tone sequence definition * ****************************/ static struct pattern { int tone; u8 *data[10]; u32 *siz[10]; u32 seq[10]; } pattern[] = { {TONE_GERMAN_DIALTONE, {DATA_GA, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GA, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {1900, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_OLDDIALTONE, {DATA_GO, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GO, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {1998, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_AMERICAN_DIALTONE, {DATA_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_DIALPBX, {DATA_GA, DATA_S, DATA_GA, DATA_S, DATA_GA, DATA_S, NULL, NULL, NULL, NULL}, {SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL}, {2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} }, {TONE_GERMAN_OLDDIALPBX, {DATA_GO, DATA_S, DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL, NULL}, {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL}, {2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} }, {TONE_AMERICAN_DIALPBX, {DATA_DT, DATA_S, DATA_DT, DATA_S, DATA_DT, DATA_S, NULL, NULL, NULL, NULL}, {SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, NULL, NULL, NULL, NULL}, {2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} }, {TONE_GERMAN_RINGING, {DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_OLDRINGING, {DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {8000, 40000, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_AMERICAN_RINGING, {DATA_RI, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_RI, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_RINGPBX, {DATA_GA, DATA_S, DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL}, {4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_OLDRINGPBX, {DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL}, {4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} }, {TONE_AMERICAN_RINGPBX, {DATA_RI, DATA_S, DATA_RI, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_RI, SIZE_S, SIZE_RI, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL}, {4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_BUSY, {DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_OLDBUSY, {DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_AMERICAN_BUSY, {DATA_BU, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_BU, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_HANGUP, {DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_OLDHANGUP, {DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_AMERICAN_HANGUP, {DATA_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_SPECIAL_INFO, {DATA_S1, DATA_S2, DATA_S3, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_S1, SIZE_S2, SIZE_S3, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL}, {2666, 2666, 2666, 8002, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_GASSENBESETZT, {DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {2000, 2000, 0, 0, 0, 0, 0, 0, 0, 0} }, {TONE_GERMAN_AUFSCHALTTON, {DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL}, {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL}, {1000, 5000, 1000, 17000, 0, 0, 0, 0, 0, 0} }, {0, {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; /****************** * copy tone data * ******************/ /* an sk_buff is generated from the number of samples needed. * the count will be changed and may begin from 0 each pattern period. * the clue is to precalculate the pointers and legths to use only one * memcpy per function call, or two memcpy if the tone sequence changes. * * pattern - the type of the pattern * count - the sample from the beginning of the pattern (phase) * len - the number of bytes * * return - the sk_buff with the sample * * if tones has finished (e.g. knocking tone), dsp->tones is turned off */ void dsp_tone_copy(struct dsp *dsp, u8 *data, int len) { int index, count, start, num; struct pattern *pat; struct dsp_tone *tone = &dsp->tone; /* if we have no tone, we copy silence */ if (!tone->tone) { memset(data, dsp_silence, len); return; } /* process pattern */ pat = (struct pattern *)tone->pattern; /* points to the current pattern */ index = tone->index; /* gives current sequence index */ count = tone->count; /* gives current sample */ /* copy sample */ while (len) { /* find sample to start with */ while (42) { /* wrap around */ if (!pat->seq[index]) { count = 0; index = 0; } /* check if we are currently playing this tone */ if (count < pat->seq[index]) break; if (dsp_debug & DEBUG_DSP_TONE) printk(KERN_DEBUG "%s: reaching next sequence " "(index=%d)\n", __func__, index); count -= pat->seq[index]; index++; } /* calculate start and number of samples */ start = count % (*(pat->siz[index])); num = len; if (num+count > pat->seq[index]) num = pat->seq[index] - count; if (num+start > (*(pat->siz[index]))) num = (*(pat->siz[index])) - start; /* copy memory */ memcpy(data, pat->data[index]+start, num); /* reduce length */ data += num; count += num; len -= num; } tone->index = index; tone->count = count; /* return sk_buff */ return; } /******************************* * send HW message to hfc card * *******************************/ static void dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len) { struct sk_buff *nskb; /* unlocking is not required, because we don't expect a response */ nskb = _alloc_mISDN_skb(PH_CONTROL_REQ, (len) ? HFC_SPL_LOOP_ON : HFC_SPL_LOOP_OFF, len, sample, GFP_ATOMIC); if (nskb) { if (dsp->ch.peer) { if (dsp->ch.recv(dsp->ch.peer, nskb)) dev_kfree_skb(nskb); } else dev_kfree_skb(nskb); } } /***************** * timer expires * *****************/ void dsp_tone_timeout(void *arg) { struct dsp *dsp = arg; struct dsp_tone *tone = &dsp->tone; struct pattern *pat = (struct pattern *)tone->pattern; int index = tone->index; if (!tone->tone) return; index++; if (!pat->seq[index]) index = 0; tone->index = index; /* set next tone */ if (pat->data[index] == DATA_S) dsp_tone_hw_message(dsp, NULL, 0); else dsp_tone_hw_message(dsp, pat->data[index], *(pat->siz[index])); /* set timer */ init_timer(&tone->tl); tone->tl.expires = jiffies + (pat->seq[index] * HZ) / 8000; add_timer(&tone->tl); } /******************** * set/release tone * ********************/ /* * tones are relaized by streaming or by special loop commands if supported * by hardware. when hardware is used, the patterns will be controlled by * timers. */ int dsp_tone(struct dsp *dsp, int tone) { struct pattern *pat; int i; struct dsp_tone *tonet = &dsp->tone; tonet->software = 0; tonet->hardware = 0; /* we turn off the tone */ if (!tone) { if (dsp->features.hfc_loops && timer_pending(&tonet->tl)) del_timer(&tonet->tl); if (dsp->features.hfc_loops) dsp_tone_hw_message(dsp, NULL, 0); tonet->tone = 0; return 0; } pat = NULL; i = 0; while (pattern[i].tone) { if (pattern[i].tone == tone) { pat = &pattern[i]; break; } i++; } if (!pat) { printk(KERN_WARNING "dsp: given tone 0x%x is invalid\n", tone); return -EINVAL; } if (dsp_debug & DEBUG_DSP_TONE) printk(KERN_DEBUG "%s: now starting tone %d (index=%d)\n", __func__, tone, 0); tonet->tone = tone; tonet->pattern = pat; tonet->index = 0; tonet->count = 0; if (dsp->features.hfc_loops) { tonet->hardware = 1; /* set first tone */ dsp_tone_hw_message(dsp, pat->data[0], *(pat->siz[0])); /* set timer */ if (timer_pending(&tonet->tl)) del_timer(&tonet->tl); init_timer(&tonet->tl); tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000; add_timer(&tonet->tl); } else { tonet->software = 1; } return 0; }
gpl-2.0
laufersteppenwolf/lge-kernel-p880
fs/jffs2/debug.c
3437
26208
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/crc32.h> #include <linux/jffs2.h> #include <linux/mtd/mtd.h> #include <linux/slab.h> #include "nodelist.h" #include "debug.h" #ifdef JFFS2_DBG_SANITY_CHECKS void __jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { if (unlikely(jeb && jeb->used_size + jeb->dirty_size + jeb->free_size + jeb->wasted_size + jeb->unchecked_size != c->sector_size)) { JFFS2_ERROR("eeep, space accounting for block at 0x%08x is screwed.\n", jeb->offset); JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", jeb->free_size, jeb->dirty_size, jeb->used_size, jeb->wasted_size, jeb->unchecked_size, c->sector_size); BUG(); } if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size + c->wasted_size + c->unchecked_size != c->flash_size)) { JFFS2_ERROR("eeep, space accounting superblock info is screwed.\n"); JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + erasing %#08x + bad %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c->wasted_size, c->unchecked_size, c->flash_size); BUG(); } } void __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { spin_lock(&c->erase_completion_lock); jffs2_dbg_acct_sanity_check_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); } #endif /* JFFS2_DBG_SANITY_CHECKS */ #ifdef JFFS2_DBG_PARANOIA_CHECKS /* * Check the fragtree. */ void __jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f) { mutex_lock(&f->sem); __jffs2_dbg_fragtree_paranoia_check_nolock(f); mutex_unlock(&f->sem); } void __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f) { struct jffs2_node_frag *frag; int bitched = 0; for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { struct jffs2_full_dnode *fn = frag->node; if (!fn || !fn->raw) continue; if (ref_flags(fn->raw) == REF_PRISTINE) { if (fn->frags > 1) { JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n", ref_offset(fn->raw), fn->frags); bitched = 1; } /* A hole node which isn't multi-page should be garbage-collected and merged anyway, so we just check for the frag size here, rather than mucking around with actually reading the node and checking the compression type, which is the real way to tell a hole node. */ if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", ref_offset(fn->raw)); bitched = 1; } if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); bitched = 1; } } } if (bitched) { JFFS2_ERROR("fragtree is corrupted.\n"); __jffs2_dbg_dump_fragtree_nolock(f); BUG(); } } /* * Check if the flash contains all 0xFF before we start writing. */ void __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, uint32_t ofs, int len) { size_t retlen; int ret, i; unsigned char *buf; buf = kmalloc(len, GFP_KERNEL); if (!buf) return; ret = jffs2_flash_read(c, ofs, len, &retlen, buf); if (ret || (retlen != len)) { JFFS2_WARNING("read %d bytes failed or short. ret %d, retlen %zd.\n", len, ret, retlen); kfree(buf); return; } ret = 0; for (i = 0; i < len; i++) if (buf[i] != 0xff) ret = 1; if (ret) { JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n", ofs, ofs + i); __jffs2_dbg_dump_buffer(buf, len, ofs); kfree(buf); BUG(); } kfree(buf); } void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c) { struct jffs2_eraseblock *jeb; uint32_t free = 0, dirty = 0, used = 0, wasted = 0, erasing = 0, bad = 0, unchecked = 0; int nr_counted = 0; int dump = 0; if (c->gcblock) { nr_counted++; free += c->gcblock->free_size; dirty += c->gcblock->dirty_size; used += c->gcblock->used_size; wasted += c->gcblock->wasted_size; unchecked += c->gcblock->unchecked_size; } if (c->nextblock) { nr_counted++; free += c->nextblock->free_size; dirty += c->nextblock->dirty_size; used += c->nextblock->used_size; wasted += c->nextblock->wasted_size; unchecked += c->nextblock->unchecked_size; } list_for_each_entry(jeb, &c->clean_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->very_dirty_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->dirty_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->erasable_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->erasable_pending_wbuf_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->erase_pending_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->free_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->bad_used_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->erasing_list, list) { nr_counted++; erasing += c->sector_size; } list_for_each_entry(jeb, &c->erase_checking_list, list) { nr_counted++; erasing += c->sector_size; } list_for_each_entry(jeb, &c->erase_complete_list, list) { nr_counted++; erasing += c->sector_size; } list_for_each_entry(jeb, &c->bad_list, list) { nr_counted++; bad += c->sector_size; } #define check(sz) \ if (sz != c->sz##_size) { \ printk(KERN_WARNING #sz "_size mismatch counted 0x%x, c->" #sz "_size 0x%x\n", \ sz, c->sz##_size); \ dump = 1; \ } check(free); check(dirty); check(used); check(wasted); check(unchecked); check(bad); check(erasing); #undef check if (nr_counted != c->nr_blocks) { printk(KERN_WARNING "%s counted only 0x%x blocks of 0x%x. Where are the others?\n", __func__, nr_counted, c->nr_blocks); dump = 1; } if (dump) { __jffs2_dbg_dump_block_lists_nolock(c); BUG(); } } /* * Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'. */ void __jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { spin_lock(&c->erase_completion_lock); __jffs2_dbg_acct_paranoia_check_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); } void __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { uint32_t my_used_size = 0; uint32_t my_unchecked_size = 0; uint32_t my_dirty_size = 0; struct jffs2_raw_node_ref *ref2 = jeb->first_node; while (ref2) { uint32_t totlen = ref_totlen(c, jeb, ref2); if (ref_offset(ref2) < jeb->offset || ref_offset(ref2) > jeb->offset + c->sector_size) { JFFS2_ERROR("node_ref %#08x shouldn't be in block at %#08x.\n", ref_offset(ref2), jeb->offset); goto error; } if (ref_flags(ref2) == REF_UNCHECKED) my_unchecked_size += totlen; else if (!ref_obsolete(ref2)) my_used_size += totlen; else my_dirty_size += totlen; if ((!ref_next(ref2)) != (ref2 == jeb->last_node)) { JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next at %#08x (mem %p), last_node is at %#08x (mem %p).\n", ref_offset(ref2), ref2, ref_offset(ref_next(ref2)), ref_next(ref2), ref_offset(jeb->last_node), jeb->last_node); goto error; } ref2 = ref_next(ref2); } if (my_used_size != jeb->used_size) { JFFS2_ERROR("Calculated used size %#08x != stored used size %#08x.\n", my_used_size, jeb->used_size); goto error; } if (my_unchecked_size != jeb->unchecked_size) { JFFS2_ERROR("Calculated unchecked size %#08x != stored unchecked size %#08x.\n", my_unchecked_size, jeb->unchecked_size); goto error; } #if 0 /* This should work when we implement ref->__totlen elemination */ if (my_dirty_size != jeb->dirty_size + jeb->wasted_size) { JFFS2_ERROR("Calculated dirty+wasted size %#08x != stored dirty + wasted size %#08x\n", my_dirty_size, jeb->dirty_size + jeb->wasted_size); goto error; } if (jeb->free_size == 0 && my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) { JFFS2_ERROR("The sum of all nodes in block (%#x) != size of block (%#x)\n", my_used_size + my_unchecked_size + my_dirty_size, c->sector_size); goto error; } #endif if (!(c->flags & (JFFS2_SB_FLAG_BUILDING|JFFS2_SB_FLAG_SCANNING))) __jffs2_dbg_superblock_counts(c); return; error: __jffs2_dbg_dump_node_refs_nolock(c, jeb); __jffs2_dbg_dump_jeb_nolock(jeb); __jffs2_dbg_dump_block_lists_nolock(c); BUG(); } #endif /* JFFS2_DBG_PARANOIA_CHECKS */ #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) /* * Dump the node_refs of the 'jeb' JFFS2 eraseblock. */ void __jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { spin_lock(&c->erase_completion_lock); __jffs2_dbg_dump_node_refs_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); } void __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { struct jffs2_raw_node_ref *ref; int i = 0; printk(JFFS2_DBG_MSG_PREFIX " Dump node_refs of the eraseblock %#08x\n", jeb->offset); if (!jeb->first_node) { printk(JFFS2_DBG_MSG_PREFIX " no nodes in the eraseblock %#08x\n", jeb->offset); return; } printk(JFFS2_DBG); for (ref = jeb->first_node; ; ref = ref_next(ref)) { printk("%#08x", ref_offset(ref)); #ifdef TEST_TOTLEN printk("(%x)", ref->__totlen); #endif if (ref_next(ref)) printk("->"); else break; if (++i == 4) { i = 0; printk("\n" JFFS2_DBG); } } printk("\n"); } /* * Dump an eraseblock's space accounting. */ void __jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { spin_lock(&c->erase_completion_lock); __jffs2_dbg_dump_jeb_nolock(jeb); spin_unlock(&c->erase_completion_lock); } void __jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb) { if (!jeb) return; printk(JFFS2_DBG_MSG_PREFIX " dump space accounting for the eraseblock at %#08x:\n", jeb->offset); printk(JFFS2_DBG "used_size: %#08x\n", jeb->used_size); printk(JFFS2_DBG "dirty_size: %#08x\n", jeb->dirty_size); printk(JFFS2_DBG "wasted_size: %#08x\n", jeb->wasted_size); printk(JFFS2_DBG "unchecked_size: %#08x\n", jeb->unchecked_size); printk(JFFS2_DBG "free_size: %#08x\n", jeb->free_size); } void __jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c) { spin_lock(&c->erase_completion_lock); __jffs2_dbg_dump_block_lists_nolock(c); spin_unlock(&c->erase_completion_lock); } void __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c) { printk(JFFS2_DBG_MSG_PREFIX " dump JFFS2 blocks lists:\n"); printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size); printk(JFFS2_DBG "used_size: %#08x\n", c->used_size); printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size); printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size); printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size); printk(JFFS2_DBG "free_size: %#08x\n", c->free_size); printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size); printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size); printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size); printk(JFFS2_DBG "jffs2_reserved_blocks size: %#08x\n", c->sector_size * c->resv_blocks_write); if (c->nextblock) printk(JFFS2_DBG "nextblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size); else printk(JFFS2_DBG "nextblock: NULL\n"); if (c->gcblock) printk(JFFS2_DBG "gcblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); else printk(JFFS2_DBG "gcblock: NULL\n"); if (list_empty(&c->clean_list)) { printk(JFFS2_DBG "clean_list: empty\n"); } else { struct list_head *this; int numblocks = 0; uint32_t dirty = 0; list_for_each(this, &c->clean_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); numblocks ++; dirty += jeb->wasted_size; if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "clean_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } printk (JFFS2_DBG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks); } if (list_empty(&c->very_dirty_list)) { printk(JFFS2_DBG "very_dirty_list: empty\n"); } else { struct list_head *this; int numblocks = 0; uint32_t dirty = 0; list_for_each(this, &c->very_dirty_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); numblocks ++; dirty += jeb->dirty_size; if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "very_dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } printk (JFFS2_DBG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", numblocks, dirty, dirty / numblocks); } if (list_empty(&c->dirty_list)) { printk(JFFS2_DBG "dirty_list: empty\n"); } else { struct list_head *this; int numblocks = 0; uint32_t dirty = 0; list_for_each(this, &c->dirty_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); numblocks ++; dirty += jeb->dirty_size; if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } printk (JFFS2_DBG "contains %d blocks with total dirty size %u, average dirty size: %u\n", numblocks, dirty, dirty / numblocks); } if (list_empty(&c->erasable_list)) { printk(JFFS2_DBG "erasable_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->erasable_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "erasable_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->erasing_list)) { printk(JFFS2_DBG "erasing_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->erasing_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "erasing_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->erase_checking_list)) { printk(JFFS2_DBG "erase_checking_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->erase_checking_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "erase_checking_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->erase_pending_list)) { printk(JFFS2_DBG "erase_pending_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->erase_pending_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "erase_pending_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->erasable_pending_wbuf_list)) { printk(JFFS2_DBG "erasable_pending_wbuf_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->erasable_pending_wbuf_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "erasable_pending_wbuf_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->free_list)) { printk(JFFS2_DBG "free_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->free_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "free_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->bad_list)) { printk(JFFS2_DBG "bad_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->bad_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "bad_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->bad_used_list)) { printk(JFFS2_DBG "bad_used_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->bad_used_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "bad_used_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } } void __jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f) { mutex_lock(&f->sem); jffs2_dbg_dump_fragtree_nolock(f); mutex_unlock(&f->sem); } void __jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f) { struct jffs2_node_frag *this = frag_first(&f->fragtree); uint32_t lastofs = 0; int buggy = 0; printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino); while(this) { if (this->node) printk(JFFS2_DBG "frag %#04x-%#04x: %#08x(%d) on flash (*%p), left (%p), right (%p), parent (%p)\n", this->ofs, this->ofs+this->size, ref_offset(this->node->raw), ref_flags(this->node->raw), this, frag_left(this), frag_right(this), frag_parent(this)); else printk(JFFS2_DBG "frag %#04x-%#04x: hole (*%p). left (%p), right (%p), parent (%p)\n", this->ofs, this->ofs+this->size, this, frag_left(this), frag_right(this), frag_parent(this)); if (this->ofs != lastofs) buggy = 1; lastofs = this->ofs + this->size; this = frag_next(this); } if (f->metadata) printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); if (buggy) { JFFS2_ERROR("frag tree got a hole in it.\n"); BUG(); } } #define JFFS2_BUFDUMP_BYTES_PER_LINE 32 void __jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs) { int skip; int i; printk(JFFS2_DBG_MSG_PREFIX " dump from offset %#08x to offset %#08x (%x bytes).\n", offs, offs + len, len); i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE; offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1); if (skip != 0) printk(JFFS2_DBG "%#08x: ", offs); while (skip--) printk(" "); while (i < len) { if ((i % JFFS2_BUFDUMP_BYTES_PER_LINE) == 0 && i != len -1) { if (i != 0) printk("\n"); offs += JFFS2_BUFDUMP_BYTES_PER_LINE; printk(JFFS2_DBG "%0#8x: ", offs); } printk("%02x ", buf[i]); i += 1; } printk("\n"); } /* * Dump a JFFS2 node. */ void __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs) { union jffs2_node_union node; int len = sizeof(union jffs2_node_union); size_t retlen; uint32_t crc; int ret; printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs); ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node); if (ret || (retlen != len)) { JFFS2_ERROR("read %d bytes failed or short. ret %d, retlen %zd.\n", len, ret, retlen); return; } printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic)); printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype)); printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen)); printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc)); crc = crc32(0, &node.u, sizeof(node.u) - 4); if (crc != je32_to_cpu(node.u.hdr_crc)) { JFFS2_ERROR("wrong common header CRC.\n"); return; } if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK && je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK) { JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n", je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK); return; } switch(je16_to_cpu(node.u.nodetype)) { case JFFS2_NODETYPE_INODE: printk(JFFS2_DBG "the node is inode node\n"); printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino)); printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version)); printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m); printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid)); printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid)); printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize)); printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime)); printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime)); printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime)); printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset)); printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize)); printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize)); printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr); printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr); printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags)); printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc)); printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc)); crc = crc32(0, &node.i, sizeof(node.i) - 8); if (crc != je32_to_cpu(node.i.node_crc)) { JFFS2_ERROR("wrong node header CRC.\n"); return; } break; case JFFS2_NODETYPE_DIRENT: printk(JFFS2_DBG "the node is dirent node\n"); printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino)); printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version)); printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino)); printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime)); printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize); printk(JFFS2_DBG "type:\t%#02x\n", node.d.type); printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc)); printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc)); node.d.name[node.d.nsize] = '\0'; printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name); crc = crc32(0, &node.d, sizeof(node.d) - 8); if (crc != je32_to_cpu(node.d.node_crc)) { JFFS2_ERROR("wrong node header CRC.\n"); return; } break; default: printk(JFFS2_DBG "node type is unknown\n"); break; } } #endif /* JFFS2_DBG_DUMPS || JFFS2_DBG_PARANOIA_CHECKS */
gpl-2.0
civato/Note8.0-StormBorn
arch/arm/mach-at91/cpuidle.c
3949
2682
/* * based on arch/arm/mach-kirkwood/cpuidle.c * * CPU idle support for AT91 SoC * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * The cpu idle uses wait-for-interrupt and RAM self refresh in order * to implement two idle states - * #1 wait-for-interrupt * #2 wait-for-interrupt and RAM self refresh */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/cpuidle.h> #include <asm/proc-fns.h> #include <linux/io.h> #include "pm.h" #define AT91_MAX_STATES 2 static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device); static struct cpuidle_driver at91_idle_driver = { .name = "at91_idle", .owner = THIS_MODULE, }; /* Actual code that puts the SoC in different idle states */ static int at91_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct timeval before, after; int idle_time; u32 saved_lpr; local_irq_disable(); do_gettimeofday(&before); if (state == &dev->states[0]) /* Wait for interrupt state */ cpu_do_idle(); else if (state == &dev->states[1]) { asm("b 1f; .align 5; 1:"); asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */ saved_lpr = sdram_selfrefresh_enable(); cpu_do_idle(); sdram_selfrefresh_disable(saved_lpr); } do_gettimeofday(&after); local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); return idle_time; } /* Initialize CPU idle by registering the idle states */ static int at91_init_cpuidle(void) { struct cpuidle_device *device; cpuidle_register_driver(&at91_idle_driver); device = &per_cpu(at91_cpuidle_device, smp_processor_id()); device->state_count = AT91_MAX_STATES; /* Wait for interrupt state */ device->states[0].enter = at91_enter_idle; device->states[0].exit_latency = 1; device->states[0].target_residency = 10000; device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[0].name, "WFI"); strcpy(device->states[0].desc, "Wait for interrupt"); /* Wait for interrupt and RAM self refresh state */ device->states[1].enter = at91_enter_idle; device->states[1].exit_latency = 10; device->states[1].target_residency = 10000; device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[1].name, "RAM_SR"); strcpy(device->states[1].desc, "WFI and RAM Self Refresh"); if (cpuidle_register_device(device)) { printk(KERN_ERR "at91_init_cpuidle: Failed registering\n"); return -EIO; } return 0; } device_initcall(at91_init_cpuidle);
gpl-2.0
RaymanFX/kernel_samsung_lt03wifi
drivers/input/mouse/alps.c
3949
46488
/* * ALPS touchpad PS/2 mouse driver * * Copyright (c) 2003 Neil Brown <neilb@cse.unsw.edu.au> * Copyright (c) 2003-2005 Peter Osterlund <petero2@telia.com> * Copyright (c) 2004 Dmitry Torokhov <dtor@mail.ru> * Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2009 Sebastian Kapfer <sebastian_kapfer@gmx.net> * * ALPS detection, tap switching and status querying info is taken from * tpconfig utility (by C. Scott Ananian and Bruce Kall). * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/slab.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/serio.h> #include <linux/libps2.h> #include "psmouse.h" #include "alps.h" /* * Definitions for ALPS version 3 and 4 command mode protocol */ #define ALPS_V3_X_MAX 2000 #define ALPS_V3_Y_MAX 1400 #define ALPS_BITMAP_X_BITS 15 #define ALPS_BITMAP_Y_BITS 11 #define ALPS_CMD_NIBBLE_10 0x01f2 static const struct alps_nibble_commands alps_v3_nibble_commands[] = { { PSMOUSE_CMD_SETPOLL, 0x00 }, /* 0 */ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ }; static const struct alps_nibble_commands alps_v4_nibble_commands[] = { { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ }; #define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */ #define ALPS_PASS 0x04 /* device has a pass-through port */ #define ALPS_WHEEL 0x08 /* hardware wheel present */ #define ALPS_FW_BK_1 0x10 /* front & back buttons present */ #define ALPS_FW_BK_2 0x20 /* front & back buttons present */ #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 6-byte ALPS packet */ static const struct alps_model_info alps_model_data[] = { { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */ { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 }, /* UMAX-530T */ { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, /* HP ze1115 */ { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */ { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */ { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */ { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */ { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */ { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */ { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */ { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */ /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ { { 0x73, 0x02, 0x64 }, 0x9b, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT }, { { 0x73, 0x02, 0x64 }, 0x9d, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT }, { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 }, }; /* * XXX - this entry is suspicious. First byte has zero lower nibble, * which is what a normal mouse would report. Also, the value 0x0e * isn't valid per PS/2 spec. */ /* Packet formats are described in Documentation/input/alps.txt */ static bool alps_is_valid_first_byte(const struct alps_model_info *model, unsigned char data) { return (data & model->mask0) == model->byte0; } static void alps_report_buttons(struct psmouse *psmouse, struct input_dev *dev1, struct input_dev *dev2, int left, int right, int middle) { struct input_dev *dev; /* * If shared button has already been reported on the * other device (dev2) then this event should be also * sent through that device. */ dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1; input_report_key(dev, BTN_LEFT, left); dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1; input_report_key(dev, BTN_RIGHT, right); dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1; input_report_key(dev, BTN_MIDDLE, middle); /* * Sync the _other_ device now, we'll do the first * device later once we report the rest of the events. */ input_sync(dev2); } static void alps_process_packet_v1_v2(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; const struct alps_model_info *model = priv->i; unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; struct input_dev *dev2 = priv->dev2; int x, y, z, ges, fin, left, right, middle; int back = 0, forward = 0; if (model->proto_version == ALPS_PROTO_V1) { left = packet[2] & 0x10; right = packet[2] & 0x08; middle = 0; x = packet[1] | ((packet[0] & 0x07) << 7); y = packet[4] | ((packet[3] & 0x07) << 7); z = packet[5]; } else { left = packet[3] & 1; right = packet[3] & 2; middle = packet[3] & 4; x = packet[1] | ((packet[2] & 0x78) << (7 - 3)); y = packet[4] | ((packet[3] & 0x70) << (7 - 4)); z = packet[5]; } if (model->flags & ALPS_FW_BK_1) { back = packet[0] & 0x10; forward = packet[2] & 4; } if (model->flags & ALPS_FW_BK_2) { back = packet[3] & 4; forward = packet[2] & 4; if ((middle = forward && back)) forward = back = 0; } ges = packet[2] & 1; fin = packet[2] & 2; if ((model->flags & ALPS_DUALPOINT) && z == 127) { input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x)); input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y)); alps_report_buttons(psmouse, dev2, dev, left, right, middle); input_sync(dev2); return; } alps_report_buttons(psmouse, dev, dev2, left, right, middle); /* Convert hardware tap to a reasonable Z value */ if (ges && !fin) z = 40; /* * A "tap and drag" operation is reported by the hardware as a transition * from (!fin && ges) to (fin && ges). This should be translated to the * sequence Z>0, Z==0, Z>0, so the Z==0 event has to be generated manually. */ if (ges && fin && !priv->prev_fin) { input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); input_report_abs(dev, ABS_PRESSURE, 0); input_report_key(dev, BTN_TOOL_FINGER, 0); input_sync(dev); } priv->prev_fin = fin; if (z > 30) input_report_key(dev, BTN_TOUCH, 1); if (z < 25) input_report_key(dev, BTN_TOUCH, 0); if (z > 0) { input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); } input_report_abs(dev, ABS_PRESSURE, z); input_report_key(dev, BTN_TOOL_FINGER, z > 0); if (model->flags & ALPS_WHEEL) input_report_rel(dev, REL_WHEEL, ((packet[2] << 1) & 0x08) - ((packet[0] >> 4) & 0x07)); if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) { input_report_key(dev, BTN_FORWARD, forward); input_report_key(dev, BTN_BACK, back); } if (model->flags & ALPS_FOUR_BUTTONS) { input_report_key(dev, BTN_0, packet[2] & 4); input_report_key(dev, BTN_1, packet[0] & 0x10); input_report_key(dev, BTN_2, packet[3] & 4); input_report_key(dev, BTN_3, packet[0] & 0x20); } input_sync(dev); } /* * Process bitmap data from v3 and v4 protocols. Returns the number of * fingers detected. A return value of 0 means at least one of the * bitmaps was empty. * * The bitmaps don't have enough data to track fingers, so this function * only generates points representing a bounding box of all contacts. * These points are returned in x1, y1, x2, and y2 when the return value * is greater than 0. */ static int alps_process_bitmap(unsigned int x_map, unsigned int y_map, int *x1, int *y1, int *x2, int *y2) { struct alps_bitmap_point { int start_bit; int num_bits; }; int fingers_x = 0, fingers_y = 0, fingers; int i, bit, prev_bit; struct alps_bitmap_point x_low = {0,}, x_high = {0,}; struct alps_bitmap_point y_low = {0,}, y_high = {0,}; struct alps_bitmap_point *point; if (!x_map || !y_map) return 0; *x1 = *y1 = *x2 = *y2 = 0; prev_bit = 0; point = &x_low; for (i = 0; x_map != 0; i++, x_map >>= 1) { bit = x_map & 1; if (bit) { if (!prev_bit) { point->start_bit = i; fingers_x++; } point->num_bits++; } else { if (prev_bit) point = &x_high; else point->num_bits = 0; } prev_bit = bit; } /* * y bitmap is reversed for what we need (lower positions are in * higher bits), so we process from the top end. */ y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - ALPS_BITMAP_Y_BITS); prev_bit = 0; point = &y_low; for (i = 0; y_map != 0; i++, y_map <<= 1) { bit = y_map & (1 << (sizeof(y_map) * BITS_PER_BYTE - 1)); if (bit) { if (!prev_bit) { point->start_bit = i; fingers_y++; } point->num_bits++; } else { if (prev_bit) point = &y_high; else point->num_bits = 0; } prev_bit = bit; } /* * Fingers can overlap, so we use the maximum count of fingers * on either axis as the finger count. */ fingers = max(fingers_x, fingers_y); /* * If total fingers is > 1 but either axis reports only a single * contact, we have overlapping or adjacent fingers. For the * purposes of creating a bounding box, divide the single contact * (roughly) equally between the two points. */ if (fingers > 1) { if (fingers_x == 1) { i = x_low.num_bits / 2; x_low.num_bits = x_low.num_bits - i; x_high.start_bit = x_low.start_bit + i; x_high.num_bits = max(i, 1); } else if (fingers_y == 1) { i = y_low.num_bits / 2; y_low.num_bits = y_low.num_bits - i; y_high.start_bit = y_low.start_bit + i; y_high.num_bits = max(i, 1); } } *x1 = (ALPS_V3_X_MAX * (2 * x_low.start_bit + x_low.num_bits - 1)) / (2 * (ALPS_BITMAP_X_BITS - 1)); *y1 = (ALPS_V3_Y_MAX * (2 * y_low.start_bit + y_low.num_bits - 1)) / (2 * (ALPS_BITMAP_Y_BITS - 1)); if (fingers > 1) { *x2 = (ALPS_V3_X_MAX * (2 * x_high.start_bit + x_high.num_bits - 1)) / (2 * (ALPS_BITMAP_X_BITS - 1)); *y2 = (ALPS_V3_Y_MAX * (2 * y_high.start_bit + y_high.num_bits - 1)) / (2 * (ALPS_BITMAP_Y_BITS - 1)); } return fingers; } static void alps_set_slot(struct input_dev *dev, int slot, bool active, int x, int y) { input_mt_slot(dev, slot); input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); if (active) { input_report_abs(dev, ABS_MT_POSITION_X, x); input_report_abs(dev, ABS_MT_POSITION_Y, y); } } static void alps_report_semi_mt_data(struct input_dev *dev, int num_fingers, int x1, int y1, int x2, int y2) { alps_set_slot(dev, 0, num_fingers != 0, x1, y1); alps_set_slot(dev, 1, num_fingers == 2, x2, y2); } static void alps_process_trackstick_packet_v3(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev = priv->dev2; int x, y, z, left, right, middle; /* Sanity check packet */ if (!(packet[0] & 0x40)) { psmouse_dbg(psmouse, "Bad trackstick packet, discarding\n"); return; } /* * There's a special packet that seems to indicate the end * of a stream of trackstick data. Filter these out. */ if (packet[1] == 0x7f && packet[2] == 0x7f && packet[4] == 0x7f) return; x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f)); y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f)); z = (packet[4] & 0x7c) >> 2; /* * The x and y values tend to be quite large, and when used * alone the trackstick is difficult to use. Scale them down * to compensate. */ x /= 8; y /= 8; input_report_rel(dev, REL_X, x); input_report_rel(dev, REL_Y, -y); /* * Most ALPS models report the trackstick buttons in the touchpad * packets, but a few report them here. No reliable way has been * found to differentiate between the models upfront, so we enable * the quirk in response to seeing a button press in the trackstick * packet. */ left = packet[3] & 0x01; right = packet[3] & 0x02; middle = packet[3] & 0x04; if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) && (left || right || middle)) priv->quirks |= ALPS_QUIRK_TRACKSTICK_BUTTONS; if (priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) { input_report_key(dev, BTN_LEFT, left); input_report_key(dev, BTN_RIGHT, right); input_report_key(dev, BTN_MIDDLE, middle); } input_sync(dev); return; } static void alps_process_touchpad_packet_v3(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; struct input_dev *dev2 = priv->dev2; int x, y, z; int left, right, middle; int x1 = 0, y1 = 0, x2 = 0, y2 = 0; int fingers = 0, bmap_fingers; unsigned int x_bitmap, y_bitmap; /* * There's no single feature of touchpad position and bitmap packets * that can be used to distinguish between them. We rely on the fact * that a bitmap packet should always follow a position packet with * bit 6 of packet[4] set. */ if (priv->multi_packet) { /* * Sometimes a position packet will indicate a multi-packet * sequence, but then what follows is another position * packet. Check for this, and when it happens process the * position packet as usual. */ if (packet[0] & 0x40) { fingers = (packet[5] & 0x3) + 1; x_bitmap = ((packet[4] & 0x7e) << 8) | ((packet[1] & 0x7f) << 2) | ((packet[0] & 0x30) >> 4); y_bitmap = ((packet[3] & 0x70) << 4) | ((packet[2] & 0x7f) << 1) | (packet[4] & 0x01); bmap_fingers = alps_process_bitmap(x_bitmap, y_bitmap, &x1, &y1, &x2, &y2); /* * We shouldn't report more than one finger if * we don't have two coordinates. */ if (fingers > 1 && bmap_fingers < 2) fingers = bmap_fingers; /* Now process position packet */ packet = priv->multi_data; } else { priv->multi_packet = 0; } } /* * Bit 6 of byte 0 is not usually set in position packets. The only * times it seems to be set is in situations where the data is * suspect anyway, e.g. a palm resting flat on the touchpad. Given * this combined with the fact that this bit is useful for filtering * out misidentified bitmap packets, we reject anything with this * bit set. */ if (packet[0] & 0x40) return; if (!priv->multi_packet && (packet[4] & 0x40)) { priv->multi_packet = 1; memcpy(priv->multi_data, packet, sizeof(priv->multi_data)); return; } priv->multi_packet = 0; left = packet[3] & 0x01; right = packet[3] & 0x02; middle = packet[3] & 0x04; x = ((packet[1] & 0x7f) << 4) | ((packet[4] & 0x30) >> 2) | ((packet[0] & 0x30) >> 4); y = ((packet[2] & 0x7f) << 4) | (packet[4] & 0x0f); z = packet[5] & 0x7f; /* * Sometimes the hardware sends a single packet with z = 0 * in the middle of a stream. Real releases generate packets * with x, y, and z all zero, so these seem to be flukes. * Ignore them. */ if (x && y && !z) return; /* * If we don't have MT data or the bitmaps were empty, we have * to rely on ST data. */ if (!fingers) { x1 = x; y1 = y; fingers = z > 0 ? 1 : 0; } if (z >= 64) input_report_key(dev, BTN_TOUCH, 1); else input_report_key(dev, BTN_TOUCH, 0); alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2); input_report_key(dev, BTN_TOOL_FINGER, fingers == 1); input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2); input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3); input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4); input_report_key(dev, BTN_LEFT, left); input_report_key(dev, BTN_RIGHT, right); input_report_key(dev, BTN_MIDDLE, middle); if (z > 0) { input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); } input_report_abs(dev, ABS_PRESSURE, z); input_sync(dev); if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) { left = packet[3] & 0x10; right = packet[3] & 0x20; middle = packet[3] & 0x40; input_report_key(dev2, BTN_LEFT, left); input_report_key(dev2, BTN_RIGHT, right); input_report_key(dev2, BTN_MIDDLE, middle); input_sync(dev2); } } static void alps_process_packet_v3(struct psmouse *psmouse) { unsigned char *packet = psmouse->packet; /* * v3 protocol packets come in three types, two representing * touchpad data and one representing trackstick data. * Trackstick packets seem to be distinguished by always * having 0x3f in the last byte. This value has never been * observed in the last byte of either of the other types * of packets. */ if (packet[5] == 0x3f) { alps_process_trackstick_packet_v3(psmouse); return; } alps_process_touchpad_packet_v3(psmouse); } static void alps_process_packet_v4(struct psmouse *psmouse) { unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; int x, y, z; int left, right; left = packet[4] & 0x01; right = packet[4] & 0x02; x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) | ((packet[0] & 0x30) >> 4); y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f); z = packet[5] & 0x7f; if (z >= 64) input_report_key(dev, BTN_TOUCH, 1); else input_report_key(dev, BTN_TOUCH, 0); if (z > 0) { input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); } input_report_abs(dev, ABS_PRESSURE, z); input_report_key(dev, BTN_TOOL_FINGER, z > 0); input_report_key(dev, BTN_LEFT, left); input_report_key(dev, BTN_RIGHT, right); input_sync(dev); } static void alps_process_packet(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; const struct alps_model_info *model = priv->i; switch (model->proto_version) { case ALPS_PROTO_V1: case ALPS_PROTO_V2: alps_process_packet_v1_v2(psmouse); break; case ALPS_PROTO_V3: alps_process_packet_v3(psmouse); break; case ALPS_PROTO_V4: alps_process_packet_v4(psmouse); break; } } static void alps_report_bare_ps2_packet(struct psmouse *psmouse, unsigned char packet[], bool report_buttons) { struct alps_data *priv = psmouse->private; struct input_dev *dev2 = priv->dev2; if (report_buttons) alps_report_buttons(psmouse, dev2, psmouse->dev, packet[0] & 1, packet[0] & 2, packet[0] & 4); input_report_rel(dev2, REL_X, packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0); input_report_rel(dev2, REL_Y, packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0); input_sync(dev2); } static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; if (psmouse->pktcnt < 6) return PSMOUSE_GOOD_DATA; if (psmouse->pktcnt == 6) { /* * Start a timer to flush the packet if it ends up last * 6-byte packet in the stream. Timer needs to fire * psmouse core times out itself. 20 ms should be enough * to decide if we are getting more data or not. */ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(20)); return PSMOUSE_GOOD_DATA; } del_timer(&priv->timer); if (psmouse->packet[6] & 0x80) { /* * Highest bit is set - that means we either had * complete ALPS packet and this is start of the * next packet or we got garbage. */ if (((psmouse->packet[3] | psmouse->packet[4] | psmouse->packet[5]) & 0x80) || (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) { psmouse_dbg(psmouse, "refusing packet %x %x %x %x (suspected interleaved ps/2)\n", psmouse->packet[3], psmouse->packet[4], psmouse->packet[5], psmouse->packet[6]); return PSMOUSE_BAD_DATA; } alps_process_packet(psmouse); /* Continue with the next packet */ psmouse->packet[0] = psmouse->packet[6]; psmouse->pktcnt = 1; } else { /* * High bit is 0 - that means that we indeed got a PS/2 * packet in the middle of ALPS packet. * * There is also possibility that we got 6-byte ALPS * packet followed by 3-byte packet from trackpoint. We * can not distinguish between these 2 scenarios but * because the latter is unlikely to happen in course of * normal operation (user would need to press all * buttons on the pad and start moving trackpoint * without touching the pad surface) we assume former. * Even if we are wrong the wost thing that would happen * the cursor would jump but we should not get protocol * de-synchronization. */ alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], false); /* * Continue with the standard ALPS protocol handling, * but make sure we won't process it as an interleaved * packet again, which may happen if all buttons are * pressed. To avoid this let's reset the 4th bit which * is normally 1. */ psmouse->packet[3] = psmouse->packet[6] & 0xf7; psmouse->pktcnt = 4; } return PSMOUSE_GOOD_DATA; } static void alps_flush_packet(unsigned long data) { struct psmouse *psmouse = (struct psmouse *)data; serio_pause_rx(psmouse->ps2dev.serio); if (psmouse->pktcnt == psmouse->pktsize) { /* * We did not any more data in reasonable amount of time. * Validate the last 3 bytes and process as a standard * ALPS packet. */ if ((psmouse->packet[3] | psmouse->packet[4] | psmouse->packet[5]) & 0x80) { psmouse_dbg(psmouse, "refusing packet %x %x %x (suspected interleaved ps/2)\n", psmouse->packet[3], psmouse->packet[4], psmouse->packet[5]); } else { alps_process_packet(psmouse); } psmouse->pktcnt = 0; } serio_continue_rx(psmouse->ps2dev.serio); } static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; const struct alps_model_info *model = priv->i; if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ if (psmouse->pktcnt == 3) { alps_report_bare_ps2_packet(psmouse, psmouse->packet, true); return PSMOUSE_FULL_PACKET; } return PSMOUSE_GOOD_DATA; } /* Check for PS/2 packet stuffed in the middle of ALPS packet. */ if ((model->flags & ALPS_PS2_INTERLEAVED) && psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) { return alps_handle_interleaved_ps2(psmouse); } if (!alps_is_valid_first_byte(model, psmouse->packet[0])) { psmouse_dbg(psmouse, "refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n", psmouse->packet[0], model->mask0, model->byte0); return PSMOUSE_BAD_DATA; } /* Bytes 2 - pktsize should have 0 in the highest bit */ if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]); return PSMOUSE_BAD_DATA; } if (psmouse->pktcnt == psmouse->pktsize) { alps_process_packet(psmouse); return PSMOUSE_FULL_PACKET; } return PSMOUSE_GOOD_DATA; } static int alps_command_mode_send_nibble(struct psmouse *psmouse, int nibble) { struct ps2dev *ps2dev = &psmouse->ps2dev; struct alps_data *priv = psmouse->private; int command; unsigned char *param; unsigned char dummy[4]; BUG_ON(nibble > 0xf); command = priv->nibble_commands[nibble].command; param = (command & 0x0f00) ? dummy : (unsigned char *)&priv->nibble_commands[nibble].data; if (ps2_command(ps2dev, param, command)) return -1; return 0; } static int alps_command_mode_set_addr(struct psmouse *psmouse, int addr) { struct ps2dev *ps2dev = &psmouse->ps2dev; struct alps_data *priv = psmouse->private; int i, nibble; if (ps2_command(ps2dev, NULL, priv->addr_command)) return -1; for (i = 12; i >= 0; i -= 4) { nibble = (addr >> i) & 0xf; if (alps_command_mode_send_nibble(psmouse, nibble)) return -1; } return 0; } static int __alps_command_mode_read_reg(struct psmouse *psmouse, int addr) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[4]; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) return -1; /* * The address being read is returned in the first two bytes * of the result. Check that this address matches the expected * address. */ if (addr != ((param[0] << 8) | param[1])) return -1; return param[2]; } static int alps_command_mode_read_reg(struct psmouse *psmouse, int addr) { if (alps_command_mode_set_addr(psmouse, addr)) return -1; return __alps_command_mode_read_reg(psmouse, addr); } static int __alps_command_mode_write_reg(struct psmouse *psmouse, u8 value) { if (alps_command_mode_send_nibble(psmouse, (value >> 4) & 0xf)) return -1; if (alps_command_mode_send_nibble(psmouse, value & 0xf)) return -1; return 0; } static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr, u8 value) { if (alps_command_mode_set_addr(psmouse, addr)) return -1; return __alps_command_mode_write_reg(psmouse, value); } static int alps_enter_command_mode(struct psmouse *psmouse, unsigned char *resp) { unsigned char param[4]; struct ps2dev *ps2dev = &psmouse->ps2dev; if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) || ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) { psmouse_err(psmouse, "failed to enter command mode\n"); return -1; } if (param[0] != 0x88 && param[1] != 0x07) { psmouse_dbg(psmouse, "unknown response while entering command mode: %2.2x %2.2x %2.2x\n", param[0], param[1], param[2]); return -1; } if (resp) *resp = param[2]; return 0; } static inline int alps_exit_command_mode(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM)) return -1; return 0; } static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version) { struct ps2dev *ps2dev = &psmouse->ps2dev; static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 }; unsigned char param[4]; const struct alps_model_info *model = NULL; int i; /* * First try "E6 report". * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed. * The bits 0-2 of the first byte will be 1s if some buttons are * pressed. */ param[0] = 0; if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11)) return NULL; param[0] = param[1] = param[2] = 0xff; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) return NULL; psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x", param[0], param[1], param[2]); if ((param[0] & 0xf8) != 0 || param[1] != 0 || (param[2] != 10 && param[2] != 100)) return NULL; /* * Now try "E7 report". Allowed responses are in * alps_model_data[].signature */ param[0] = 0; if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21)) return NULL; param[0] = param[1] = param[2] = 0xff; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) return NULL; psmouse_dbg(psmouse, "E7 report: %2.2x %2.2x %2.2x", param[0], param[1], param[2]); if (version) { for (i = 0; i < ARRAY_SIZE(rates) && param[2] != rates[i]; i++) /* empty */; *version = (param[0] << 8) | (param[1] << 4) | i; } for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) { if (!memcmp(param, alps_model_data[i].signature, sizeof(alps_model_data[i].signature))) { model = alps_model_data + i; break; } } if (model && model->proto_version > ALPS_PROTO_V2) { /* * Need to check command mode response to identify * model */ model = NULL; if (alps_enter_command_mode(psmouse, param)) { psmouse_warn(psmouse, "touchpad failed to enter command mode\n"); } else { for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) { if (alps_model_data[i].proto_version > ALPS_PROTO_V2 && alps_model_data[i].command_mode_resp == param[0]) { model = alps_model_data + i; break; } } alps_exit_command_mode(psmouse); if (!model) psmouse_dbg(psmouse, "Unknown command mode response %2.2x\n", param[0]); } } return model; } /* * For DualPoint devices select the device that should respond to * subsequent commands. It looks like glidepad is behind stickpointer, * I'd thought it would be other way around... */ static int alps_passthrough_mode_v2(struct psmouse *psmouse, bool enable) { struct ps2dev *ps2dev = &psmouse->ps2dev; int cmd = enable ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11; if (ps2_command(ps2dev, NULL, cmd) || ps2_command(ps2dev, NULL, cmd) || ps2_command(ps2dev, NULL, cmd) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE)) return -1; /* we may get 3 more bytes, just ignore them */ ps2_drain(ps2dev, 3, 100); return 0; } static int alps_absolute_mode_v1_v2(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; /* Try ALPS magic knock - 4 disable before enable */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) return -1; /* * Switch mouse to poll (remote) mode so motion data will not * get in our way */ return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL); } static int alps_get_status(struct psmouse *psmouse, char *param) { struct ps2dev *ps2dev = &psmouse->ps2dev; /* Get status: 0xF5 0xF5 0xF5 0xE9 */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) return -1; psmouse_dbg(psmouse, "Status: %2.2x %2.2x %2.2x", param[0], param[1], param[2]); return 0; } /* * Turn touchpad tapping on or off. The sequences are: * 0xE9 0xF5 0xF5 0xF3 0x0A to enable, * 0xE9 0xF5 0xF5 0xE8 0x00 to disable. * My guess that 0xE9 (GetInfo) is here as a sync point. * For models that also have stickpointer (DualPoints) its tapping * is controlled separately (0xE6 0xE6 0xE6 0xF3 0x14|0x0A) but * we don't fiddle with it. */ static int alps_tap_mode(struct psmouse *psmouse, int enable) { struct ps2dev *ps2dev = &psmouse->ps2dev; int cmd = enable ? PSMOUSE_CMD_SETRATE : PSMOUSE_CMD_SETRES; unsigned char tap_arg = enable ? 0x0A : 0x00; unsigned char param[4]; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, &tap_arg, cmd)) return -1; if (alps_get_status(psmouse, param)) return -1; return 0; } /* * alps_poll() - poll the touchpad for current motion packet. * Used in resync. */ static int alps_poll(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char buf[sizeof(psmouse->packet)]; bool poll_failed; if (priv->i->flags & ALPS_PASS) alps_passthrough_mode_v2(psmouse, true); poll_failed = ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0; if (priv->i->flags & ALPS_PASS) alps_passthrough_mode_v2(psmouse, false); if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0) return -1; if ((psmouse->badbyte & 0xc8) == 0x08) { /* * Poll the track stick ... */ if (ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (3 << 8))) return -1; } memcpy(psmouse->packet, buf, sizeof(buf)); return 0; } static int alps_hw_init_v1_v2(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; const struct alps_model_info *model = priv->i; if ((model->flags & ALPS_PASS) && alps_passthrough_mode_v2(psmouse, true)) { return -1; } if (alps_tap_mode(psmouse, true)) { psmouse_warn(psmouse, "Failed to enable hardware tapping\n"); return -1; } if (alps_absolute_mode_v1_v2(psmouse)) { psmouse_err(psmouse, "Failed to enable absolute mode\n"); return -1; } if ((model->flags & ALPS_PASS) && alps_passthrough_mode_v2(psmouse, false)) { return -1; } /* ALPS needs stream mode, otherwise it won't report any data */ if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSTREAM)) { psmouse_err(psmouse, "Failed to enable stream mode\n"); return -1; } return 0; } /* * Enable or disable passthrough mode to the trackstick. Must be in * command mode when calling this function. */ static int alps_passthrough_mode_v3(struct psmouse *psmouse, bool enable) { int reg_val; reg_val = alps_command_mode_read_reg(psmouse, 0x0008); if (reg_val == -1) return -1; if (enable) reg_val |= 0x01; else reg_val &= ~0x01; if (__alps_command_mode_write_reg(psmouse, reg_val)) return -1; return 0; } /* Must be in command mode when calling this function */ static int alps_absolute_mode_v3(struct psmouse *psmouse) { int reg_val; reg_val = alps_command_mode_read_reg(psmouse, 0x0004); if (reg_val == -1) return -1; reg_val |= 0x06; if (__alps_command_mode_write_reg(psmouse, reg_val)) return -1; return 0; } static int alps_hw_init_v3(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; struct ps2dev *ps2dev = &psmouse->ps2dev; int reg_val; unsigned char param[4]; priv->nibble_commands = alps_v3_nibble_commands; priv->addr_command = PSMOUSE_CMD_RESET_WRAP; if (alps_enter_command_mode(psmouse, NULL)) goto error; /* Check for trackstick */ reg_val = alps_command_mode_read_reg(psmouse, 0x0008); if (reg_val == -1) goto error; if (reg_val & 0x80) { if (alps_passthrough_mode_v3(psmouse, true)) goto error; if (alps_exit_command_mode(psmouse)) goto error; /* * E7 report for the trackstick * * There have been reports of failures to seem to trace back * to the above trackstick check failing. When these occur * this E7 report fails, so when that happens we continue * with the assumption that there isn't a trackstick after * all. */ param[0] = 0x64; if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) { psmouse_warn(psmouse, "trackstick E7 report failed\n"); } else { psmouse_dbg(psmouse, "trackstick E7 report: %2.2x %2.2x %2.2x\n", param[0], param[1], param[2]); /* * Not sure what this does, but it is absolutely * essential. Without it, the touchpad does not * work at all and the trackstick just emits normal * PS/2 packets. */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || alps_command_mode_send_nibble(psmouse, 0x9) || alps_command_mode_send_nibble(psmouse, 0x4)) { psmouse_err(psmouse, "Error sending magic E6 sequence\n"); goto error_passthrough; } } if (alps_enter_command_mode(psmouse, NULL)) goto error_passthrough; if (alps_passthrough_mode_v3(psmouse, false)) goto error; } if (alps_absolute_mode_v3(psmouse)) { psmouse_err(psmouse, "Failed to enter absolute mode\n"); goto error; } reg_val = alps_command_mode_read_reg(psmouse, 0x0006); if (reg_val == -1) goto error; if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01)) goto error; reg_val = alps_command_mode_read_reg(psmouse, 0x0007); if (reg_val == -1) goto error; if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01)) goto error; if (alps_command_mode_read_reg(psmouse, 0x0144) == -1) goto error; if (__alps_command_mode_write_reg(psmouse, 0x04)) goto error; if (alps_command_mode_read_reg(psmouse, 0x0159) == -1) goto error; if (__alps_command_mode_write_reg(psmouse, 0x03)) goto error; if (alps_command_mode_read_reg(psmouse, 0x0163) == -1) goto error; if (alps_command_mode_write_reg(psmouse, 0x0163, 0x03)) goto error; if (alps_command_mode_read_reg(psmouse, 0x0162) == -1) goto error; if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04)) goto error; /* * This ensures the trackstick packets are in the format * supported by this driver. If bit 1 isn't set the packet * format is different. */ if (alps_command_mode_write_reg(psmouse, 0x0008, 0x82)) goto error; alps_exit_command_mode(psmouse); /* Set rate and enable data reporting */ param[0] = 0x64; if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) { psmouse_err(psmouse, "Failed to enable data reporting\n"); return -1; } return 0; error_passthrough: /* Something failed while in passthrough mode, so try to get out */ if (!alps_enter_command_mode(psmouse, NULL)) alps_passthrough_mode_v3(psmouse, false); error: /* * Leaving the touchpad in command mode will essentially render * it unusable until the machine reboots, so exit it here just * to be safe */ alps_exit_command_mode(psmouse); return -1; } /* Must be in command mode when calling this function */ static int alps_absolute_mode_v4(struct psmouse *psmouse) { int reg_val; reg_val = alps_command_mode_read_reg(psmouse, 0x0004); if (reg_val == -1) return -1; reg_val |= 0x02; if (__alps_command_mode_write_reg(psmouse, reg_val)) return -1; return 0; } static int alps_hw_init_v4(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[4]; priv->nibble_commands = alps_v4_nibble_commands; priv->addr_command = PSMOUSE_CMD_DISABLE; if (alps_enter_command_mode(psmouse, NULL)) goto error; if (alps_absolute_mode_v4(psmouse)) { psmouse_err(psmouse, "Failed to enter absolute mode\n"); goto error; } if (alps_command_mode_write_reg(psmouse, 0x0007, 0x8c)) goto error; if (alps_command_mode_write_reg(psmouse, 0x0149, 0x03)) goto error; if (alps_command_mode_write_reg(psmouse, 0x0160, 0x03)) goto error; if (alps_command_mode_write_reg(psmouse, 0x017f, 0x15)) goto error; if (alps_command_mode_write_reg(psmouse, 0x0151, 0x01)) goto error; if (alps_command_mode_write_reg(psmouse, 0x0168, 0x03)) goto error; if (alps_command_mode_write_reg(psmouse, 0x014a, 0x03)) goto error; if (alps_command_mode_write_reg(psmouse, 0x0161, 0x03)) goto error; alps_exit_command_mode(psmouse); /* * This sequence changes the output from a 9-byte to an * 8-byte format. All the same data seems to be present, * just in a more compact format. */ param[0] = 0xc8; param[1] = 0x64; param[2] = 0x50; if (ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, &param[2], PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, param, PSMOUSE_CMD_GETID)) return -1; /* Set rate and enable data reporting */ param[0] = 0x64; if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) { psmouse_err(psmouse, "Failed to enable data reporting\n"); return -1; } return 0; error: /* * Leaving the touchpad in command mode will essentially render * it unusable until the machine reboots, so exit it here just * to be safe */ alps_exit_command_mode(psmouse); return -1; } static int alps_hw_init(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; const struct alps_model_info *model = priv->i; int ret = -1; switch (model->proto_version) { case ALPS_PROTO_V1: case ALPS_PROTO_V2: ret = alps_hw_init_v1_v2(psmouse); break; case ALPS_PROTO_V3: ret = alps_hw_init_v3(psmouse); break; case ALPS_PROTO_V4: ret = alps_hw_init_v4(psmouse); break; } return ret; } static int alps_reconnect(struct psmouse *psmouse) { const struct alps_model_info *model; psmouse_reset(psmouse); model = alps_get_model(psmouse, NULL); if (!model) return -1; return alps_hw_init(psmouse); } static void alps_disconnect(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; psmouse_reset(psmouse); del_timer_sync(&priv->timer); input_unregister_device(priv->dev2); kfree(priv); } int alps_init(struct psmouse *psmouse) { struct alps_data *priv; const struct alps_model_info *model; struct input_dev *dev1 = psmouse->dev, *dev2; int version; priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL); dev2 = input_allocate_device(); if (!priv || !dev2) goto init_fail; priv->dev2 = dev2; setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse); psmouse->private = priv; psmouse_reset(psmouse); model = alps_get_model(psmouse, &version); if (!model) goto init_fail; priv->i = model; if (alps_hw_init(psmouse)) goto init_fail; /* * Undo part of setup done for us by psmouse core since touchpad * is not a relative device. */ __clear_bit(EV_REL, dev1->evbit); __clear_bit(REL_X, dev1->relbit); __clear_bit(REL_Y, dev1->relbit); /* * Now set up our capabilities. */ dev1->evbit[BIT_WORD(EV_KEY)] |= BIT_MASK(EV_KEY); dev1->keybit[BIT_WORD(BTN_TOUCH)] |= BIT_MASK(BTN_TOUCH); dev1->keybit[BIT_WORD(BTN_TOOL_FINGER)] |= BIT_MASK(BTN_TOOL_FINGER); dev1->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS); switch (model->proto_version) { case ALPS_PROTO_V1: case ALPS_PROTO_V2: input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0); input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0); break; case ALPS_PROTO_V3: set_bit(INPUT_PROP_SEMI_MT, dev1->propbit); input_mt_init_slots(dev1, 2); input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0); input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0); set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit); set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit); set_bit(BTN_TOOL_QUADTAP, dev1->keybit); /* fall through */ case ALPS_PROTO_V4: input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0); input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0); break; } input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0); if (model->flags & ALPS_WHEEL) { dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL); dev1->relbit[BIT_WORD(REL_WHEEL)] |= BIT_MASK(REL_WHEEL); } if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) { dev1->keybit[BIT_WORD(BTN_FORWARD)] |= BIT_MASK(BTN_FORWARD); dev1->keybit[BIT_WORD(BTN_BACK)] |= BIT_MASK(BTN_BACK); } if (model->flags & ALPS_FOUR_BUTTONS) { dev1->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_0); dev1->keybit[BIT_WORD(BTN_1)] |= BIT_MASK(BTN_1); dev1->keybit[BIT_WORD(BTN_2)] |= BIT_MASK(BTN_2); dev1->keybit[BIT_WORD(BTN_3)] |= BIT_MASK(BTN_3); } else { dev1->keybit[BIT_WORD(BTN_MIDDLE)] |= BIT_MASK(BTN_MIDDLE); } snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys); dev2->phys = priv->phys; dev2->name = (model->flags & ALPS_DUALPOINT) ? "DualPoint Stick" : "PS/2 Mouse"; dev2->id.bustype = BUS_I8042; dev2->id.vendor = 0x0002; dev2->id.product = PSMOUSE_ALPS; dev2->id.version = 0x0000; dev2->dev.parent = &psmouse->ps2dev.serio->dev; dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); dev2->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); if (input_register_device(priv->dev2)) goto init_fail; psmouse->protocol_handler = alps_process_byte; psmouse->poll = alps_poll; psmouse->disconnect = alps_disconnect; psmouse->reconnect = alps_reconnect; psmouse->pktsize = model->proto_version == ALPS_PROTO_V4 ? 8 : 6; /* We are having trouble resyncing ALPS touchpads so disable it for now */ psmouse->resync_time = 0; return 0; init_fail: psmouse_reset(psmouse); input_free_device(dev2); kfree(priv); psmouse->private = NULL; return -1; } int alps_detect(struct psmouse *psmouse, bool set_properties) { int version; const struct alps_model_info *model; model = alps_get_model(psmouse, &version); if (!model) return -1; if (set_properties) { psmouse->vendor = "ALPS"; psmouse->name = model->flags & ALPS_DUALPOINT ? "DualPoint TouchPad" : "GlidePoint"; psmouse->model = version; } return 0; }
gpl-2.0
tchaari/android_kernel_samsung_crespo_kitkang
arch/sh/kernel/signal_32.c
3949
17783
/* * linux/arch/sh/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima * */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/elf.h> #include <linux/personality.h> #include <linux/binfmts.h> #include <linux/freezer.h> #include <linux/io.h> #include <linux/tracehook.h> #include <asm/system.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> #include <asm/syscalls.h> #include <asm/fpu.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) struct fdpic_func_descriptor { unsigned long text; unsigned long GOT; }; /* * The following define adds a 64 byte gap between the signal * stack frame and previous contents of the stack. This allows * frame unwinding in a function epilogue but only if a frame * pointer is used in the function. This is necessary because * current gcc compilers (<4.3) do not generate unwind info on * SH for function epilogues. */ #define UNWINDGUARD 64 /* * Atomically swap in the new signal mask, and wait for a signal. */ asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs) { mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) return -EFAULT; __get_user(new_ka.sa.sa_flags, &act->sa_flags); __get_user(mask, &act->sa_mask); siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) return -EFAULT; __put_user(old_ka.sa.sa_flags, &oact->sa_flags); __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; } asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r6, unsigned long r7, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); return do_sigaltstack(uss, uoss, regs->regs[15]); } /* * Do a signal return; undo the signal stack. */ #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ #if defined(CONFIG_CPU_SH2) #define TRAP_NOARG 0xc320 /* Syscall w/no args (NR in R3) */ #else #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) */ #endif #define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */ struct sigframe { struct sigcontext sc; unsigned long extramask[_NSIG_WORDS-1]; u16 retcode[8]; }; struct rt_sigframe { struct siginfo info; struct ucontext uc; u16 retcode[8]; }; #ifdef CONFIG_SH_FPU static inline int restore_sigcontext_fpu(struct sigcontext __user *sc) { struct task_struct *tsk = current; if (!(boot_cpu_data.flags & CPU_HAS_FPU)) return 0; set_used_math(); return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0], sizeof(long)*(16*2+2)); } static inline int save_sigcontext_fpu(struct sigcontext __user *sc, struct pt_regs *regs) { struct task_struct *tsk = current; if (!(boot_cpu_data.flags & CPU_HAS_FPU)) return 0; if (!used_math()) { __put_user(0, &sc->sc_ownedfp); return 0; } __put_user(1, &sc->sc_ownedfp); /* This will cause a "finit" to be triggered by the next attempted FPU operation by the 'current' process. */ clear_used_math(); unlazy_fpu(tsk, regs); return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu, sizeof(long)*(16*2+2)); } #endif /* CONFIG_SH_FPU */ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p) { unsigned int err = 0; #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) COPY(regs[1]); COPY(regs[2]); COPY(regs[3]); COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]); COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]); COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]); COPY(gbr); COPY(mach); COPY(macl); COPY(pr); COPY(sr); COPY(pc); #undef COPY #ifdef CONFIG_SH_FPU if (boot_cpu_data.flags & CPU_HAS_FPU) { int owned_fp; struct task_struct *tsk = current; regs->sr |= SR_FD; /* Release FPU */ clear_fpu(tsk, regs); clear_used_math(); __get_user (owned_fp, &sc->sc_ownedfp); if (owned_fp) err |= restore_sigcontext_fpu(sc); } #endif regs->tra = -1; /* disable syscall checks */ err |= __get_user(*r0_p, &sc->sc_regs[0]); return err; } asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15]; sigset_t set; int r0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(regs, &frame->sc, &r0)) goto badframe; return r0; badframe: force_sig(SIGSEGV, current); return 0; } asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15]; sigset_t set; int r0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) goto badframe; if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->regs[15]) == -EFAULT) goto badframe; return r0; badframe: force_sig(SIGSEGV, current); return 0; } /* * Set up a signal frame. */ static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask) { int err = 0; #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]); COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]); COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]); COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]); COPY(gbr); COPY(mach); COPY(macl); COPY(pr); COPY(sr); COPY(pc); #undef COPY #ifdef CONFIG_SH_FPU err |= save_sigcontext_fpu(sc, regs); #endif /* non-iBCS2 extensions.. */ err |= __put_user(mask, &sc->oldmask); return err; } /* * Determine which stack to use.. */ static inline void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) { if (ka->sa.sa_flags & SA_ONSTACK) { if (sas_ss_flags(sp) == 0) sp = current->sas_ss_sp + current->sas_ss_size; } return (void __user *)((sp - (frame_size+UNWINDGUARD)) & -8ul); } /* These symbols are defined with the addresses in the vsyscall page. See vsyscall-trapa.S. */ extern void __kernel_sigreturn(void); extern void __kernel_rt_sigreturn(void); static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; int err = 0; int signal; frame = get_sigframe(ka, regs->regs[15], sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; signal = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); if (_NSIG_WORDS > 1) err |= __copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { regs->pr = (unsigned long) ka->sa.sa_restorer; #ifdef CONFIG_VSYSCALL } else if (likely(current->mm->context.vdso)) { regs->pr = VDSO_SYM(&__kernel_sigreturn); #endif } else { /* Generate return code (system call to sigreturn) */ err |= __put_user(MOVW(7), &frame->retcode[0]); err |= __put_user(TRAP_NOARG, &frame->retcode[1]); err |= __put_user(OR_R0_R0, &frame->retcode[2]); err |= __put_user(OR_R0_R0, &frame->retcode[3]); err |= __put_user(OR_R0_R0, &frame->retcode[4]); err |= __put_user(OR_R0_R0, &frame->retcode[5]); err |= __put_user(OR_R0_R0, &frame->retcode[6]); err |= __put_user((__NR_sigreturn), &frame->retcode[7]); regs->pr = (unsigned long) frame->retcode; flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); } if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->regs[15] = (unsigned long) frame; regs->regs[4] = signal; /* Arg for signal handler */ regs->regs[5] = 0; regs->regs[6] = (unsigned long) &frame->sc; if (current->personality & FDPIC_FUNCPTRS) { struct fdpic_func_descriptor __user *funcptr = (struct fdpic_func_descriptor __user *)ka->sa.sa_handler; __get_user(regs->pc, &funcptr->text); __get_user(regs->regs[12], &funcptr->GOT); } else regs->pc = (unsigned long)ka->sa.sa_handler; set_fs(USER_DS); pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0; int signal; frame = get_sigframe(ka, regs->regs[15], sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; signal = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= copy_siginfo_to_user(&frame->info, info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[15]), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { regs->pr = (unsigned long) ka->sa.sa_restorer; #ifdef CONFIG_VSYSCALL } else if (likely(current->mm->context.vdso)) { regs->pr = VDSO_SYM(&__kernel_rt_sigreturn); #endif } else { /* Generate return code (system call to rt_sigreturn) */ err |= __put_user(MOVW(7), &frame->retcode[0]); err |= __put_user(TRAP_NOARG, &frame->retcode[1]); err |= __put_user(OR_R0_R0, &frame->retcode[2]); err |= __put_user(OR_R0_R0, &frame->retcode[3]); err |= __put_user(OR_R0_R0, &frame->retcode[4]); err |= __put_user(OR_R0_R0, &frame->retcode[5]); err |= __put_user(OR_R0_R0, &frame->retcode[6]); err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); regs->pr = (unsigned long) frame->retcode; flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); } if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->regs[15] = (unsigned long) frame; regs->regs[4] = signal; /* Arg for signal handler */ regs->regs[5] = (unsigned long) &frame->info; regs->regs[6] = (unsigned long) &frame->uc; if (current->personality & FDPIC_FUNCPTRS) { struct fdpic_func_descriptor __user *funcptr = (struct fdpic_func_descriptor __user *)ka->sa.sa_handler; __get_user(regs->pc, &funcptr->text); __get_user(regs->regs[12], &funcptr->GOT); } else regs->pc = (unsigned long)ka->sa.sa_handler; set_fs(USER_DS); pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } static inline void handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs, struct sigaction *sa) { /* If we're not from a syscall, bail out */ if (regs->tra < 0) return; /* check for system call restart.. */ switch (regs->regs[0]) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: no_system_call_restart: regs->regs[0] = -EINTR; break; case -ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; /* fallthrough */ case -ERESTARTNOINTR: regs->regs[0] = save_r0; regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); break; } } /* * OK, we're invoking a handler */ static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0) { int ret; /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(sig, ka, info, oldset, regs); else ret = setup_frame(sig, ka, oldset, regs); if (ka->sa.sa_flags & SA_ONESHOT) ka->sa.sa_handler = SIG_DFL; if (ret == 0) { spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&current->blocked,sig); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } return ret; } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ static void do_signal(struct pt_regs *regs, unsigned int save_r0) { siginfo_t info; int signr; struct k_sigaction ka; sigset_t *oldset; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return; if (try_to_freeze()) goto no_signal; if (current_thread_info()->status & TS_RESTORE_SIGMASK) oldset = &current->saved_sigmask; else oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { handle_syscall_restart(save_r0, regs, &ka.sa); /* Whee! Actually deliver the signal. */ if (handle_signal(signr, &ka, &info, oldset, regs, save_r0) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TS_RESTORE_SIGMASK flag */ current_thread_info()->status &= ~TS_RESTORE_SIGMASK; tracehook_signal_handler(signr, &info, &ka, regs, test_thread_flag(TIF_SINGLESTEP)); } return; } no_signal: /* Did we come from a system call? */ if (regs->tra >= 0) { /* Restart the system call - no handlers present */ if (regs->regs[0] == -ERESTARTNOHAND || regs->regs[0] == -ERESTARTSYS || regs->regs[0] == -ERESTARTNOINTR) { regs->regs[0] = save_r0; regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) { regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); regs->regs[3] = __NR_restart_syscall; } } /* * If there's no signal to deliver, we just put the saved sigmask * back. */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } } asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, unsigned long thread_info_flags) { /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs, save_r0); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } }
gpl-2.0
heechul/linux
drivers/sbus/char/bbc_i2c.c
4461
9713
/* bbc_i2c.c: I2C low-level driver for BBC device on UltraSPARC-III * platforms. * * Copyright (C) 2001, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/bbc.h> #include <asm/io.h> #include "bbc_i2c.h" /* Convert this driver to use i2c bus layer someday... */ #define I2C_PCF_PIN 0x80 #define I2C_PCF_ESO 0x40 #define I2C_PCF_ES1 0x20 #define I2C_PCF_ES2 0x10 #define I2C_PCF_ENI 0x08 #define I2C_PCF_STA 0x04 #define I2C_PCF_STO 0x02 #define I2C_PCF_ACK 0x01 #define I2C_PCF_START (I2C_PCF_PIN | I2C_PCF_ESO | I2C_PCF_ENI | I2C_PCF_STA | I2C_PCF_ACK) #define I2C_PCF_STOP (I2C_PCF_PIN | I2C_PCF_ESO | I2C_PCF_STO | I2C_PCF_ACK) #define I2C_PCF_REPSTART ( I2C_PCF_ESO | I2C_PCF_STA | I2C_PCF_ACK) #define I2C_PCF_IDLE (I2C_PCF_PIN | I2C_PCF_ESO | I2C_PCF_ACK) #define I2C_PCF_INI 0x40 /* 1 if not initialized */ #define I2C_PCF_STS 0x20 #define I2C_PCF_BER 0x10 #define I2C_PCF_AD0 0x08 #define I2C_PCF_LRB 0x08 #define I2C_PCF_AAS 0x04 #define I2C_PCF_LAB 0x02 #define I2C_PCF_BB 0x01 /* The BBC devices have two I2C controllers. The first I2C controller * connects mainly to configuration proms (NVRAM, cpu configuration, * dimm types, etc.). Whereas the second I2C controller connects to * environmental control devices such as fans and temperature sensors. * The second controller also connects to the smartcard reader, if present. */ static void set_device_claimage(struct bbc_i2c_bus *bp, struct platform_device *op, int val) { int i; for (i = 0; i < NUM_CHILDREN; i++) { if (bp->devs[i].device == op) { bp->devs[i].client_claimed = val; return; } } } #define claim_device(BP,ECHILD) set_device_claimage(BP,ECHILD,1) #define release_device(BP,ECHILD) set_device_claimage(BP,ECHILD,0) struct platform_device *bbc_i2c_getdev(struct bbc_i2c_bus *bp, int index) { struct platform_device *op = NULL; int curidx = 0, i; for (i = 0; i < NUM_CHILDREN; i++) { if (!(op = bp->devs[i].device)) break; if (curidx == index) goto out; op = NULL; curidx++; } out: if (curidx == index) return op; return NULL; } struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct platform_device *op) { struct bbc_i2c_client *client; const u32 *reg; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) return NULL; client->bp = bp; client->op = op; reg = of_get_property(op->dev.of_node, "reg", NULL); if (!reg) { kfree(client); return NULL; } client->bus = reg[0]; client->address = reg[1]; claim_device(bp, op); return client; } void bbc_i2c_detach(struct bbc_i2c_client *client) { struct bbc_i2c_bus *bp = client->bp; struct platform_device *op = client->op; release_device(bp, op); kfree(client); } static int wait_for_pin(struct bbc_i2c_bus *bp, u8 *status) { DECLARE_WAITQUEUE(wait, current); int limit = 32; int ret = 1; bp->waiting = 1; add_wait_queue(&bp->wq, &wait); while (limit-- > 0) { long val; val = wait_event_interruptible_timeout( bp->wq, (((*status = readb(bp->i2c_control_regs + 0)) & I2C_PCF_PIN) == 0), msecs_to_jiffies(250)); if (val > 0) { ret = 0; break; } } remove_wait_queue(&bp->wq, &wait); bp->waiting = 0; return ret; } int bbc_i2c_writeb(struct bbc_i2c_client *client, unsigned char val, int off) { struct bbc_i2c_bus *bp = client->bp; int address = client->address; u8 status; int ret = -1; if (bp->i2c_bussel_reg != NULL) writeb(client->bus, bp->i2c_bussel_reg); writeb(address, bp->i2c_control_regs + 0x1); writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); if (wait_for_pin(bp, &status)) goto out; writeb(off, bp->i2c_control_regs + 0x1); if (wait_for_pin(bp, &status) || (status & I2C_PCF_LRB) != 0) goto out; writeb(val, bp->i2c_control_regs + 0x1); if (wait_for_pin(bp, &status)) goto out; ret = 0; out: writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); return ret; } int bbc_i2c_readb(struct bbc_i2c_client *client, unsigned char *byte, int off) { struct bbc_i2c_bus *bp = client->bp; unsigned char address = client->address, status; int ret = -1; if (bp->i2c_bussel_reg != NULL) writeb(client->bus, bp->i2c_bussel_reg); writeb(address, bp->i2c_control_regs + 0x1); writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); if (wait_for_pin(bp, &status)) goto out; writeb(off, bp->i2c_control_regs + 0x1); if (wait_for_pin(bp, &status) || (status & I2C_PCF_LRB) != 0) goto out; writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); address |= 0x1; /* READ */ writeb(address, bp->i2c_control_regs + 0x1); writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); if (wait_for_pin(bp, &status)) goto out; /* Set PIN back to one so the device sends the first * byte. */ (void) readb(bp->i2c_control_regs + 0x1); if (wait_for_pin(bp, &status)) goto out; writeb(I2C_PCF_ESO | I2C_PCF_ENI, bp->i2c_control_regs + 0x0); *byte = readb(bp->i2c_control_regs + 0x1); if (wait_for_pin(bp, &status)) goto out; ret = 0; out: writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); (void) readb(bp->i2c_control_regs + 0x1); return ret; } int bbc_i2c_write_buf(struct bbc_i2c_client *client, char *buf, int len, int off) { int ret = 0; while (len > 0) { ret = bbc_i2c_writeb(client, *buf, off); if (ret < 0) break; len--; buf++; off++; } return ret; } int bbc_i2c_read_buf(struct bbc_i2c_client *client, char *buf, int len, int off) { int ret = 0; while (len > 0) { ret = bbc_i2c_readb(client, buf, off); if (ret < 0) break; len--; buf++; off++; } return ret; } EXPORT_SYMBOL(bbc_i2c_getdev); EXPORT_SYMBOL(bbc_i2c_attach); EXPORT_SYMBOL(bbc_i2c_detach); EXPORT_SYMBOL(bbc_i2c_writeb); EXPORT_SYMBOL(bbc_i2c_readb); EXPORT_SYMBOL(bbc_i2c_write_buf); EXPORT_SYMBOL(bbc_i2c_read_buf); static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id) { struct bbc_i2c_bus *bp = dev_id; /* PIN going from set to clear is the only event which * makes the i2c assert an interrupt. */ if (bp->waiting && !(readb(bp->i2c_control_regs + 0x0) & I2C_PCF_PIN)) wake_up_interruptible(&bp->wq); return IRQ_HANDLED; } static void __init reset_one_i2c(struct bbc_i2c_bus *bp) { writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); writeb(bp->own, bp->i2c_control_regs + 0x1); writeb(I2C_PCF_PIN | I2C_PCF_ES1, bp->i2c_control_regs + 0x0); writeb(bp->clock, bp->i2c_control_regs + 0x1); writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); } static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index) { struct bbc_i2c_bus *bp; struct device_node *dp; int entry; bp = kzalloc(sizeof(*bp), GFP_KERNEL); if (!bp) return NULL; bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs"); if (!bp->i2c_control_regs) goto fail; bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); if (!bp->i2c_bussel_reg) goto fail; bp->waiting = 0; init_waitqueue_head(&bp->wq); if (request_irq(op->archdata.irqs[0], bbc_i2c_interrupt, IRQF_SHARED, "bbc_i2c", bp)) goto fail; bp->index = index; bp->op = op; spin_lock_init(&bp->lock); entry = 0; for (dp = op->dev.of_node->child; dp && entry < 8; dp = dp->sibling, entry++) { struct platform_device *child_op; child_op = of_find_device_by_node(dp); bp->devs[entry].device = child_op; bp->devs[entry].client_claimed = 0; } writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); bp->own = readb(bp->i2c_control_regs + 0x01); writeb(I2C_PCF_PIN | I2C_PCF_ES1, bp->i2c_control_regs + 0x0); bp->clock = readb(bp->i2c_control_regs + 0x01); printk(KERN_INFO "i2c-%d: Regs at %p, %d devices, own %02x, clock %02x.\n", bp->index, bp->i2c_control_regs, entry, bp->own, bp->clock); reset_one_i2c(bp); return bp; fail: if (bp->i2c_bussel_reg) of_iounmap(&op->resource[1], bp->i2c_bussel_reg, 1); if (bp->i2c_control_regs) of_iounmap(&op->resource[0], bp->i2c_control_regs, 2); kfree(bp); return NULL; } extern int bbc_envctrl_init(struct bbc_i2c_bus *bp); extern void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp); static int __devinit bbc_i2c_probe(struct platform_device *op) { struct bbc_i2c_bus *bp; int err, index = 0; bp = attach_one_i2c(op, index); if (!bp) return -EINVAL; err = bbc_envctrl_init(bp); if (err) { free_irq(op->archdata.irqs[0], bp); if (bp->i2c_bussel_reg) of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1); if (bp->i2c_control_regs) of_iounmap(&op->resource[1], bp->i2c_control_regs, 2); kfree(bp); } else { dev_set_drvdata(&op->dev, bp); } return err; } static int __devexit bbc_i2c_remove(struct platform_device *op) { struct bbc_i2c_bus *bp = dev_get_drvdata(&op->dev); bbc_envctrl_cleanup(bp); free_irq(op->archdata.irqs[0], bp); if (bp->i2c_bussel_reg) of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1); if (bp->i2c_control_regs) of_iounmap(&op->resource[1], bp->i2c_control_regs, 2); kfree(bp); return 0; } static const struct of_device_id bbc_i2c_match[] = { { .name = "i2c", .compatible = "SUNW,bbc-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, bbc_i2c_match); static struct platform_driver bbc_i2c_driver = { .driver = { .name = "bbc_i2c", .owner = THIS_MODULE, .of_match_table = bbc_i2c_match, }, .probe = bbc_i2c_probe, .remove = __devexit_p(bbc_i2c_remove), }; module_platform_driver(bbc_i2c_driver); MODULE_LICENSE("GPL");
gpl-2.0
goodwin/android_kernel_lge_hammerhead-1
drivers/usb/gadget/f_hid.c
4717
16612
/* * f_hid.c -- USB HID function driver * * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/utsname.h> #include <linux/module.h> #include <linux/hid.h> #include <linux/cdev.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/usb/g_hid.h> static int major, minors; static struct class *hidg_class; /*-------------------------------------------------------------------------*/ /* HID gadget struct */ struct f_hidg { /* configuration */ unsigned char bInterfaceSubClass; unsigned char bInterfaceProtocol; unsigned short report_desc_length; char *report_desc; unsigned short report_length; /* recv report */ char *set_report_buff; unsigned short set_report_length; spinlock_t spinlock; wait_queue_head_t read_queue; /* send report */ struct mutex lock; bool write_pending; wait_queue_head_t write_queue; struct usb_request *req; int minor; struct cdev cdev; struct usb_function func; struct usb_ep *in_ep; }; static inline struct f_hidg *func_to_hidg(struct usb_function *f) { return container_of(f, struct f_hidg, func); } /*-------------------------------------------------------------------------*/ /* Static descriptors */ static struct usb_interface_descriptor hidg_interface_desc = { .bLength = sizeof hidg_interface_desc, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bAlternateSetting = 0, .bNumEndpoints = 1, .bInterfaceClass = USB_CLASS_HID, /* .bInterfaceSubClass = DYNAMIC */ /* .bInterfaceProtocol = DYNAMIC */ /* .iInterface = DYNAMIC */ }; static struct hid_descriptor hidg_desc = { .bLength = sizeof hidg_desc, .bDescriptorType = HID_DT_HID, .bcdHID = 0x0101, .bCountryCode = 0x00, .bNumDescriptors = 0x1, /*.desc[0].bDescriptorType = DYNAMIC */ /*.desc[0].wDescriptorLenght = DYNAMIC */ }; /* High-Speed Support */ static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ .bInterval = 4, /* FIXME: Add this field in the * HID gadget configuration? * (struct hidg_func_descriptor) */ }; static struct usb_descriptor_header *hidg_hs_descriptors[] = { (struct usb_descriptor_header *)&hidg_interface_desc, (struct usb_descriptor_header *)&hidg_desc, (struct usb_descriptor_header *)&hidg_hs_in_ep_desc, NULL, }; /* Full-Speed Support */ static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ .bInterval = 10, /* FIXME: Add this field in the * HID gadget configuration? * (struct hidg_func_descriptor) */ }; static struct usb_descriptor_header *hidg_fs_descriptors[] = { (struct usb_descriptor_header *)&hidg_interface_desc, (struct usb_descriptor_header *)&hidg_desc, (struct usb_descriptor_header *)&hidg_fs_in_ep_desc, NULL, }; /*-------------------------------------------------------------------------*/ /* Char Device */ static ssize_t f_hidg_read(struct file *file, char __user *buffer, size_t count, loff_t *ptr) { struct f_hidg *hidg = file->private_data; char *tmp_buff = NULL; unsigned long flags; if (!count) return 0; if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; spin_lock_irqsave(&hidg->spinlock, flags); #define READ_COND (hidg->set_report_buff != NULL) while (!READ_COND) { spin_unlock_irqrestore(&hidg->spinlock, flags); if (file->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(hidg->read_queue, READ_COND)) return -ERESTARTSYS; spin_lock_irqsave(&hidg->spinlock, flags); } count = min_t(unsigned, count, hidg->set_report_length); tmp_buff = hidg->set_report_buff; hidg->set_report_buff = NULL; spin_unlock_irqrestore(&hidg->spinlock, flags); if (tmp_buff != NULL) { /* copy to user outside spinlock */ count -= copy_to_user(buffer, tmp_buff, count); kfree(tmp_buff); } else count = -ENOMEM; return count; } static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req) { struct f_hidg *hidg = (struct f_hidg *)ep->driver_data; if (req->status != 0) { ERROR(hidg->func.config->cdev, "End Point Request ERROR: %d\n", req->status); } hidg->write_pending = 0; wake_up(&hidg->write_queue); } static ssize_t f_hidg_write(struct file *file, const char __user *buffer, size_t count, loff_t *offp) { struct f_hidg *hidg = file->private_data; ssize_t status = -ENOMEM; if (!access_ok(VERIFY_READ, buffer, count)) return -EFAULT; mutex_lock(&hidg->lock); #define WRITE_COND (!hidg->write_pending) /* write queue */ while (!WRITE_COND) { mutex_unlock(&hidg->lock); if (file->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible_exclusive( hidg->write_queue, WRITE_COND)) return -ERESTARTSYS; mutex_lock(&hidg->lock); } count = min_t(unsigned, count, hidg->report_length); status = copy_from_user(hidg->req->buf, buffer, count); if (status != 0) { ERROR(hidg->func.config->cdev, "copy_from_user error\n"); mutex_unlock(&hidg->lock); return -EINVAL; } hidg->req->status = 0; hidg->req->zero = 0; hidg->req->length = count; hidg->req->complete = f_hidg_req_complete; hidg->req->context = hidg; hidg->write_pending = 1; status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); if (status < 0) { ERROR(hidg->func.config->cdev, "usb_ep_queue error on int endpoint %zd\n", status); hidg->write_pending = 0; wake_up(&hidg->write_queue); } else { status = count; } mutex_unlock(&hidg->lock); return status; } static unsigned int f_hidg_poll(struct file *file, poll_table *wait) { struct f_hidg *hidg = file->private_data; unsigned int ret = 0; poll_wait(file, &hidg->read_queue, wait); poll_wait(file, &hidg->write_queue, wait); if (WRITE_COND) ret |= POLLOUT | POLLWRNORM; if (READ_COND) ret |= POLLIN | POLLRDNORM; return ret; } #undef WRITE_COND #undef READ_COND static int f_hidg_release(struct inode *inode, struct file *fd) { fd->private_data = NULL; return 0; } static int f_hidg_open(struct inode *inode, struct file *fd) { struct f_hidg *hidg = container_of(inode->i_cdev, struct f_hidg, cdev); fd->private_data = hidg; return 0; } /*-------------------------------------------------------------------------*/ /* usb_function */ static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req) { struct f_hidg *hidg = (struct f_hidg *)req->context; if (req->status != 0 || req->buf == NULL || req->actual == 0) { ERROR(hidg->func.config->cdev, "%s FAILED\n", __func__); return; } spin_lock(&hidg->spinlock); hidg->set_report_buff = krealloc(hidg->set_report_buff, req->actual, GFP_ATOMIC); if (hidg->set_report_buff == NULL) { spin_unlock(&hidg->spinlock); return; } hidg->set_report_length = req->actual; memcpy(hidg->set_report_buff, req->buf, req->actual); spin_unlock(&hidg->spinlock); wake_up(&hidg->read_queue); } static int hidg_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_hidg *hidg = func_to_hidg(f); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int status = 0; __u16 value, length; value = __le16_to_cpu(ctrl->wValue); length = __le16_to_cpu(ctrl->wLength); VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x " "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value); switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_GET_REPORT): VDBG(cdev, "get_report\n"); /* send an empty report */ length = min_t(unsigned, length, hidg->report_length); memset(req->buf, 0x0, length); goto respond; break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_GET_PROTOCOL): VDBG(cdev, "get_protocol\n"); goto stall; break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_SET_REPORT): VDBG(cdev, "set_report | wLenght=%d\n", ctrl->wLength); req->context = hidg; req->complete = hidg_set_report_complete; goto respond; break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_SET_PROTOCOL): VDBG(cdev, "set_protocol\n"); goto stall; break; case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8 | USB_REQ_GET_DESCRIPTOR): switch (value >> 8) { case HID_DT_HID: VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n"); length = min_t(unsigned short, length, hidg_desc.bLength); memcpy(req->buf, &hidg_desc, length); goto respond; break; case HID_DT_REPORT: VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n"); length = min_t(unsigned short, length, hidg->report_desc_length); memcpy(req->buf, hidg->report_desc, length); goto respond; break; default: VDBG(cdev, "Unknown decriptor request 0x%x\n", value >> 8); goto stall; break; } break; default: VDBG(cdev, "Unknown request 0x%x\n", ctrl->bRequest); goto stall; break; } stall: return -EOPNOTSUPP; respond: req->zero = 0; req->length = length; status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (status < 0) ERROR(cdev, "usb_ep_queue error on ep0 %d\n", value); return status; } static void hidg_disable(struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); usb_ep_disable(hidg->in_ep); hidg->in_ep->driver_data = NULL; } static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct usb_composite_dev *cdev = f->config->cdev; struct f_hidg *hidg = func_to_hidg(f); int status = 0; VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt); if (hidg->in_ep != NULL) { /* restart endpoint */ if (hidg->in_ep->driver_data != NULL) usb_ep_disable(hidg->in_ep); status = config_ep_by_speed(f->config->cdev->gadget, f, hidg->in_ep); if (status) { ERROR(cdev, "config_ep_by_speed FAILED!\n"); goto fail; } status = usb_ep_enable(hidg->in_ep); if (status < 0) { ERROR(cdev, "Enable endpoint FAILED!\n"); goto fail; } hidg->in_ep->driver_data = hidg; } fail: return status; } const struct file_operations f_hidg_fops = { .owner = THIS_MODULE, .open = f_hidg_open, .release = f_hidg_release, .write = f_hidg_write, .read = f_hidg_read, .poll = f_hidg_poll, .llseek = noop_llseek, }; static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_ep *ep; struct f_hidg *hidg = func_to_hidg(f); int status; dev_t dev; /* allocate instance-specific interface IDs, and patch descriptors */ status = usb_interface_id(c, f); if (status < 0) goto fail; hidg_interface_desc.bInterfaceNumber = status; /* allocate instance-specific endpoints */ status = -ENODEV; ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc); if (!ep) goto fail; ep->driver_data = c->cdev; /* claim */ hidg->in_ep = ep; /* preallocate request and buffer */ status = -ENOMEM; hidg->req = usb_ep_alloc_request(hidg->in_ep, GFP_KERNEL); if (!hidg->req) goto fail; hidg->req->buf = kmalloc(hidg->report_length, GFP_KERNEL); if (!hidg->req->buf) goto fail; /* set descriptor dynamic values */ hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass; hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol; hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT; hidg_desc.desc[0].wDescriptorLength = cpu_to_le16(hidg->report_desc_length); hidg->set_report_buff = NULL; /* copy descriptors */ f->descriptors = usb_copy_descriptors(hidg_fs_descriptors); if (!f->descriptors) goto fail; if (gadget_is_dualspeed(c->cdev->gadget)) { hidg_hs_in_ep_desc.bEndpointAddress = hidg_fs_in_ep_desc.bEndpointAddress; f->hs_descriptors = usb_copy_descriptors(hidg_hs_descriptors); if (!f->hs_descriptors) goto fail; } mutex_init(&hidg->lock); spin_lock_init(&hidg->spinlock); init_waitqueue_head(&hidg->write_queue); init_waitqueue_head(&hidg->read_queue); /* create char device */ cdev_init(&hidg->cdev, &f_hidg_fops); dev = MKDEV(major, hidg->minor); status = cdev_add(&hidg->cdev, dev, 1); if (status) goto fail; device_create(hidg_class, NULL, dev, NULL, "%s%d", "hidg", hidg->minor); return 0; fail: ERROR(f->config->cdev, "hidg_bind FAILED\n"); if (hidg->req != NULL) { kfree(hidg->req->buf); if (hidg->in_ep != NULL) usb_ep_free_request(hidg->in_ep, hidg->req); } usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); return status; } static void hidg_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); device_destroy(hidg_class, MKDEV(major, hidg->minor)); cdev_del(&hidg->cdev); /* disable/free request and end point */ usb_ep_disable(hidg->in_ep); usb_ep_dequeue(hidg->in_ep, hidg->req); kfree(hidg->req->buf); usb_ep_free_request(hidg->in_ep, hidg->req); /* free descriptors copies */ usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(hidg->report_desc); kfree(hidg->set_report_buff); kfree(hidg); } /*-------------------------------------------------------------------------*/ /* Strings */ #define CT_FUNC_HID_IDX 0 static struct usb_string ct_func_string_defs[] = { [CT_FUNC_HID_IDX].s = "HID Interface", {}, /* end of list */ }; static struct usb_gadget_strings ct_func_string_table = { .language = 0x0409, /* en-US */ .strings = ct_func_string_defs, }; static struct usb_gadget_strings *ct_func_strings[] = { &ct_func_string_table, NULL, }; /*-------------------------------------------------------------------------*/ /* usb_configuration */ int __init hidg_bind_config(struct usb_configuration *c, struct hidg_func_descriptor *fdesc, int index) { struct f_hidg *hidg; int status; if (index >= minors) return -ENOENT; /* maybe allocate device-global string IDs, and patch descriptors */ if (ct_func_string_defs[CT_FUNC_HID_IDX].id == 0) { status = usb_string_id(c->cdev); if (status < 0) return status; ct_func_string_defs[CT_FUNC_HID_IDX].id = status; hidg_interface_desc.iInterface = status; } /* allocate and initialize one new instance */ hidg = kzalloc(sizeof *hidg, GFP_KERNEL); if (!hidg) return -ENOMEM; hidg->minor = index; hidg->bInterfaceSubClass = fdesc->subclass; hidg->bInterfaceProtocol = fdesc->protocol; hidg->report_length = fdesc->report_length; hidg->report_desc_length = fdesc->report_desc_length; hidg->report_desc = kmemdup(fdesc->report_desc, fdesc->report_desc_length, GFP_KERNEL); if (!hidg->report_desc) { kfree(hidg); return -ENOMEM; } hidg->func.name = "hid"; hidg->func.strings = ct_func_strings; hidg->func.bind = hidg_bind; hidg->func.unbind = hidg_unbind; hidg->func.set_alt = hidg_set_alt; hidg->func.disable = hidg_disable; hidg->func.setup = hidg_setup; status = usb_add_function(c, &hidg->func); if (status) kfree(hidg); return status; } int __init ghid_setup(struct usb_gadget *g, int count) { int status; dev_t dev; hidg_class = class_create(THIS_MODULE, "hidg"); status = alloc_chrdev_region(&dev, 0, count, "hidg"); if (!status) { major = MAJOR(dev); minors = count; } return status; } void ghid_cleanup(void) { if (major) { unregister_chrdev_region(MKDEV(major, 0), minors); major = minors = 0; } class_destroy(hidg_class); hidg_class = NULL; }
gpl-2.0
Renzo-Olivares/android_kernel_htc_m8wlv
arch/arm/mach-omap2/gpio.c
4717
5133
/* * OMAP2+ specific gpio initialization * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ * * Author: * Charulatha V <charu@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/gpio.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <plat/omap_hwmod.h> #include <plat/omap_device.h> #include <plat/omap-pm.h> #include "powerdomain.h" static int __init omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused) { struct platform_device *pdev; struct omap_gpio_platform_data *pdata; struct omap_gpio_dev_attr *dev_attr; char *name = "omap_gpio"; int id; struct powerdomain *pwrdm; /* * extract the device id from name field available in the * hwmod database and use the same for constructing ids for * gpio devices. * CAUTION: Make sure the name in the hwmod database does * not change. If changed, make corresponding change here * or make use of static variable mechanism to handle this. */ sscanf(oh->name, "gpio%d", &id); pdata = kzalloc(sizeof(struct omap_gpio_platform_data), GFP_KERNEL); if (!pdata) { pr_err("gpio%d: Memory allocation failed\n", id); return -ENOMEM; } dev_attr = (struct omap_gpio_dev_attr *)oh->dev_attr; pdata->bank_width = dev_attr->bank_width; pdata->dbck_flag = dev_attr->dbck_flag; pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1); pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count; pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL); if (!pdata) { pr_err("gpio%d: Memory allocation failed\n", id); return -ENOMEM; } switch (oh->class->rev) { case 0: if (id == 1) /* non-wakeup GPIO pins for OMAP2 Bank1 */ pdata->non_wakeup_gpios = 0xe203ffc0; else if (id == 2) /* non-wakeup GPIO pins for OMAP2 Bank2 */ pdata->non_wakeup_gpios = 0x08700040; /* fall through */ case 1: pdata->regs->revision = OMAP24XX_GPIO_REVISION; pdata->regs->direction = OMAP24XX_GPIO_OE; pdata->regs->datain = OMAP24XX_GPIO_DATAIN; pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT; pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT; pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT; pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1; pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2; pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1; pdata->regs->irqenable2 = OMAP24XX_GPIO_IRQENABLE2; pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1; pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1; pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL; pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN; pdata->regs->ctrl = OMAP24XX_GPIO_CTRL; pdata->regs->wkup_en = OMAP24XX_GPIO_WAKE_EN; pdata->regs->leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0; pdata->regs->leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1; pdata->regs->risingdetect = OMAP24XX_GPIO_RISINGDETECT; pdata->regs->fallingdetect = OMAP24XX_GPIO_FALLINGDETECT; break; case 2: pdata->regs->revision = OMAP4_GPIO_REVISION; pdata->regs->direction = OMAP4_GPIO_OE; pdata->regs->datain = OMAP4_GPIO_DATAIN; pdata->regs->dataout = OMAP4_GPIO_DATAOUT; pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT; pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT; pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0; pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1; pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0; pdata->regs->irqenable2 = OMAP4_GPIO_IRQSTATUSSET1; pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0; pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0; pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME; pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE; pdata->regs->ctrl = OMAP4_GPIO_CTRL; pdata->regs->wkup_en = OMAP4_GPIO_IRQWAKEN0; pdata->regs->leveldetect0 = OMAP4_GPIO_LEVELDETECT0; pdata->regs->leveldetect1 = OMAP4_GPIO_LEVELDETECT1; pdata->regs->risingdetect = OMAP4_GPIO_RISINGDETECT; pdata->regs->fallingdetect = OMAP4_GPIO_FALLINGDETECT; break; default: WARN(1, "Invalid gpio bank_type\n"); kfree(pdata); return -EINVAL; } pwrdm = omap_hwmod_get_pwrdm(oh); pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm); pdev = omap_device_build(name, id - 1, oh, pdata, sizeof(*pdata), NULL, 0, false); kfree(pdata); if (IS_ERR(pdev)) { WARN(1, "Can't build omap_device for %s:%s.\n", name, oh->name); return PTR_ERR(pdev); } return 0; } /* * gpio_init needs to be done before * machine_init functions access gpio APIs. * Hence gpio_init is a postcore_initcall. */ static int __init omap2_gpio_init(void) { return omap_hwmod_for_each_by_class("gpio", omap2_gpio_dev_init, NULL); } postcore_initcall(omap2_gpio_init);
gpl-2.0
SmartisanTech/T1Kernel
drivers/hid/hid-roccat.c
5229
10665
/* * Roccat driver for Linux * * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* * Module roccat is a char device used to report special events of roccat * hardware to userland. These events include requests for on-screen-display of * profile or dpi settings or requests for execution of macro sequences that are * not stored in device. The information in these events depends on hid device * implementation and contains data that is not available in a single hid event * or else hidraw could have been used. * It is inspired by hidraw, but uses only one circular buffer for all readers. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cdev.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/hid-roccat.h> #include <linux/module.h> #define ROCCAT_FIRST_MINOR 0 #define ROCCAT_MAX_DEVICES 8 /* should be a power of 2 for performance reason */ #define ROCCAT_CBUF_SIZE 16 struct roccat_report { uint8_t *value; }; struct roccat_device { unsigned int minor; int report_size; int open; int exist; wait_queue_head_t wait; struct device *dev; struct hid_device *hid; struct list_head readers; /* protects modifications of readers list */ struct mutex readers_lock; /* * circular_buffer has one writer and multiple readers with their own * read pointers */ struct roccat_report cbuf[ROCCAT_CBUF_SIZE]; int cbuf_end; struct mutex cbuf_lock; }; struct roccat_reader { struct list_head node; struct roccat_device *device; int cbuf_start; }; static int roccat_major; static struct cdev roccat_cdev; static struct roccat_device *devices[ROCCAT_MAX_DEVICES]; /* protects modifications of devices array */ static DEFINE_MUTEX(devices_lock); static ssize_t roccat_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct roccat_reader *reader = file->private_data; struct roccat_device *device = reader->device; struct roccat_report *report; ssize_t retval = 0, len; DECLARE_WAITQUEUE(wait, current); mutex_lock(&device->cbuf_lock); /* no data? */ if (reader->cbuf_start == device->cbuf_end) { add_wait_queue(&device->wait, &wait); set_current_state(TASK_INTERRUPTIBLE); /* wait for data */ while (reader->cbuf_start == device->cbuf_end) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (!device->exist) { retval = -EIO; break; } mutex_unlock(&device->cbuf_lock); schedule(); mutex_lock(&device->cbuf_lock); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&device->wait, &wait); } /* here we either have data or a reason to return if retval is set */ if (retval) goto exit_unlock; report = &device->cbuf[reader->cbuf_start]; /* * If report is larger than requested amount of data, rest of report * is lost! */ len = device->report_size > count ? count : device->report_size; if (copy_to_user(buffer, report->value, len)) { retval = -EFAULT; goto exit_unlock; } retval += len; reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE; exit_unlock: mutex_unlock(&device->cbuf_lock); return retval; } static unsigned int roccat_poll(struct file *file, poll_table *wait) { struct roccat_reader *reader = file->private_data; poll_wait(file, &reader->device->wait, wait); if (reader->cbuf_start != reader->device->cbuf_end) return POLLIN | POLLRDNORM; if (!reader->device->exist) return POLLERR | POLLHUP; return 0; } static int roccat_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct roccat_reader *reader; struct roccat_device *device; int error = 0; reader = kzalloc(sizeof(struct roccat_reader), GFP_KERNEL); if (!reader) return -ENOMEM; mutex_lock(&devices_lock); device = devices[minor]; if (!device) { pr_emerg("roccat device with minor %d doesn't exist\n", minor); error = -ENODEV; goto exit_err_devices; } mutex_lock(&device->readers_lock); if (!device->open++) { /* power on device on adding first reader */ error = hid_hw_power(device->hid, PM_HINT_FULLON); if (error < 0) { --device->open; goto exit_err_readers; } error = hid_hw_open(device->hid); if (error < 0) { hid_hw_power(device->hid, PM_HINT_NORMAL); --device->open; goto exit_err_readers; } } reader->device = device; /* new reader doesn't get old events */ reader->cbuf_start = device->cbuf_end; list_add_tail(&reader->node, &device->readers); file->private_data = reader; exit_err_readers: mutex_unlock(&device->readers_lock); exit_err_devices: mutex_unlock(&devices_lock); if (error) kfree(reader); return error; } static int roccat_release(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct roccat_reader *reader = file->private_data; struct roccat_device *device; mutex_lock(&devices_lock); device = devices[minor]; if (!device) { mutex_unlock(&devices_lock); pr_emerg("roccat device with minor %d doesn't exist\n", minor); return -ENODEV; } mutex_lock(&device->readers_lock); list_del(&reader->node); mutex_unlock(&device->readers_lock); kfree(reader); if (!--device->open) { /* removing last reader */ if (device->exist) { hid_hw_power(device->hid, PM_HINT_NORMAL); hid_hw_close(device->hid); } else { kfree(device); } } mutex_unlock(&devices_lock); return 0; } /* * roccat_report_event() - output data to readers * @minor: minor device number returned by roccat_connect() * @data: pointer to data * @len: size of data * * Return value is zero on success, a negative error code on failure. * * This is called from interrupt handler. */ int roccat_report_event(int minor, u8 const *data) { struct roccat_device *device; struct roccat_reader *reader; struct roccat_report *report; uint8_t *new_value; device = devices[minor]; new_value = kmemdup(data, device->report_size, GFP_ATOMIC); if (!new_value) return -ENOMEM; report = &device->cbuf[device->cbuf_end]; /* passing NULL is safe */ kfree(report->value); report->value = new_value; device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE; list_for_each_entry(reader, &device->readers, node) { /* * As we already inserted one element, the buffer can't be * empty. If start and end are equal, buffer is full and we * increase start, so that slow reader misses one event, but * gets the newer ones in the right order. */ if (reader->cbuf_start == device->cbuf_end) reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE; } wake_up_interruptible(&device->wait); return 0; } EXPORT_SYMBOL_GPL(roccat_report_event); /* * roccat_connect() - create a char device for special event output * @class: the class thats used to create the device. Meant to hold device * specific sysfs attributes. * @hid: the hid device the char device should be connected to. * * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on * success, a negative error code on failure. */ int roccat_connect(struct class *klass, struct hid_device *hid, int report_size) { unsigned int minor; struct roccat_device *device; int temp; device = kzalloc(sizeof(struct roccat_device), GFP_KERNEL); if (!device) return -ENOMEM; mutex_lock(&devices_lock); for (minor = 0; minor < ROCCAT_MAX_DEVICES; ++minor) { if (devices[minor]) continue; break; } if (minor < ROCCAT_MAX_DEVICES) { devices[minor] = device; } else { mutex_unlock(&devices_lock); kfree(device); return -EINVAL; } device->dev = device_create(klass, &hid->dev, MKDEV(roccat_major, minor), NULL, "%s%s%d", "roccat", hid->driver->name, minor); if (IS_ERR(device->dev)) { devices[minor] = NULL; mutex_unlock(&devices_lock); temp = PTR_ERR(device->dev); kfree(device); return temp; } mutex_unlock(&devices_lock); init_waitqueue_head(&device->wait); INIT_LIST_HEAD(&device->readers); mutex_init(&device->readers_lock); mutex_init(&device->cbuf_lock); device->minor = minor; device->hid = hid; device->exist = 1; device->cbuf_end = 0; device->report_size = report_size; return minor; } EXPORT_SYMBOL_GPL(roccat_connect); /* roccat_disconnect() - remove char device from hid device * @minor: the minor device number returned by roccat_connect() */ void roccat_disconnect(int minor) { struct roccat_device *device; mutex_lock(&devices_lock); device = devices[minor]; mutex_unlock(&devices_lock); device->exist = 0; /* TODO exist maybe not needed */ device_destroy(device->dev->class, MKDEV(roccat_major, minor)); mutex_lock(&devices_lock); devices[minor] = NULL; mutex_unlock(&devices_lock); if (device->open) { hid_hw_close(device->hid); wake_up_interruptible(&device->wait); } else { kfree(device); } } EXPORT_SYMBOL_GPL(roccat_disconnect); static long roccat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file->f_path.dentry->d_inode; struct roccat_device *device; unsigned int minor = iminor(inode); long retval = 0; mutex_lock(&devices_lock); device = devices[minor]; if (!device) { retval = -ENODEV; goto out; } switch (cmd) { case ROCCATIOCGREPSIZE: if (put_user(device->report_size, (int __user *)arg)) retval = -EFAULT; break; default: retval = -ENOTTY; } out: mutex_unlock(&devices_lock); return retval; } static const struct file_operations roccat_ops = { .owner = THIS_MODULE, .read = roccat_read, .poll = roccat_poll, .open = roccat_open, .release = roccat_release, .llseek = noop_llseek, .unlocked_ioctl = roccat_ioctl, }; static int __init roccat_init(void) { int retval; dev_t dev_id; retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR, ROCCAT_MAX_DEVICES, "roccat"); roccat_major = MAJOR(dev_id); if (retval < 0) { pr_warn("can't get major number\n"); return retval; } cdev_init(&roccat_cdev, &roccat_ops); cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES); return 0; } static void __exit roccat_exit(void) { dev_t dev_id = MKDEV(roccat_major, 0); cdev_del(&roccat_cdev); unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES); } module_init(roccat_init); module_exit(roccat_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat char device"); MODULE_LICENSE("GPL v2");
gpl-2.0
chucktr/vigor_aosp_kernel
kernel/time/jiffies.c
6765
3029
/*********************************************************************** * linux/kernel/time/jiffies.c * * This file contains the jiffies based clocksource. * * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ************************************************************************/ #include <linux/clocksource.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/init.h> #include "tick-internal.h" /* The Jiffies based clocksource is the lowest common * denominator clock source which should function on * all systems. It has the same coarse resolution as * the timer interrupt frequency HZ and it suffers * inaccuracies caused by missed or lost timer * interrupts and the inability for the timer * interrupt hardware to accuratly tick at the * requested HZ value. It is also not recommended * for "tick-less" systems. */ #define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ)) /* Since jiffies uses a simple NSEC_PER_JIFFY multiplier * conversion, the .shift value could be zero. However * this would make NTP adjustments impossible as they are * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to * shift both the nominator and denominator the same * amount, and give ntp adjustments in units of 1/2^8 * * The value 8 is somewhat carefully chosen, as anything * larger can result in overflows. NSEC_PER_JIFFY grows as * HZ shrinks, so values greater than 8 overflow 32bits when * HZ=100. */ #define JIFFIES_SHIFT 8 static cycle_t jiffies_read(struct clocksource *cs) { return (cycle_t) jiffies; } struct clocksource clocksource_jiffies = { .name = "jiffies", .rating = 1, /* lowest valid rating*/ .read = jiffies_read, .mask = 0xffffffff, /*32bits*/ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ .shift = JIFFIES_SHIFT, }; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void) { unsigned long seq; u64 ret; do { seq = read_seqbegin(&xtime_lock); ret = jiffies_64; } while (read_seqretry(&xtime_lock, seq)); return ret; } EXPORT_SYMBOL(get_jiffies_64); #endif EXPORT_SYMBOL(jiffies); static int __init init_jiffies_clocksource(void) { return clocksource_register(&clocksource_jiffies); } core_initcall(init_jiffies_clocksource); struct clocksource * __init __weak clocksource_default_clock(void) { return &clocksource_jiffies; }
gpl-2.0
kerneldevs/RM-35-KERNEL-PECAN
arch/xtensa/kernel/pci-dma.c
8045
2359
/* * arch/xtensa/kernel/pci-dma.c * * DMA coherent memory allocation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Copyright (C) 2002 - 2005 Tensilica Inc. * * Based on version for i386. * * Chris Zankel <chris@zankel.net> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/cacheflush.h> /* * Note: We assume that the full memory space is always mapped to 'kseg' * Otherwise we have to use page attributes (not implemented). */ void * dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) { unsigned long ret; unsigned long uncached = 0; /* ignore region speicifiers */ flag &= ~(__GFP_DMA | __GFP_HIGHMEM); if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) flag |= GFP_DMA; ret = (unsigned long)__get_free_pages(flag, get_order(size)); if (ret == 0) return NULL; /* We currently don't support coherent memory outside KSEG */ if (ret < XCHAL_KSEG_CACHED_VADDR || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE) BUG(); if (ret != 0) { memset((void*) ret, 0, size); uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR; *handle = virt_to_bus((void*)ret); __flush_invalidate_dcache_range(ret, size); } return (void*)uncached; } void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR; if (addr < 0 || addr >= XCHAL_KSEG_SIZE) BUG(); free_pages(addr, get_order(size)); } void consistent_sync(void *vaddr, size_t size, int direction) { switch (direction) { case PCI_DMA_NONE: BUG(); case PCI_DMA_FROMDEVICE: /* invalidate only */ __invalidate_dcache_range((unsigned long)vaddr, (unsigned long)size); break; case PCI_DMA_TODEVICE: /* writeback only */ case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ __flush_invalidate_dcache_range((unsigned long)vaddr, (unsigned long)size); break; } }
gpl-2.0
x86-8/linux-3.7
arch/parisc/hpux/ioctl.c
12653
1818
/* * Implements some necessary HPUX ioctls. * * Copyright (C) 1999-2002 Matthew Wilcox <willy with parisc-linux.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * Supported ioctls: * TCGETA * TCSETA * TCSETAW * TCSETAF * TCSBRK * TCXONC * TCFLSH * TIOCGWINSZ * TIOCSWINSZ * TIOCGPGRP * TIOCSPGRP */ #include <linux/sched.h> #include <linux/syscalls.h> #include <asm/errno.h> #include <asm/ioctl.h> #include <asm/termios.h> #include <asm/uaccess.h> static int hpux_ioctl_t(int fd, unsigned long cmd, unsigned long arg) { int result = -EOPNOTSUPP; int nr = _IOC_NR(cmd); switch (nr) { case 106: result = sys_ioctl(fd, TIOCSWINSZ, arg); break; case 107: result = sys_ioctl(fd, TIOCGWINSZ, arg); break; } return result; } int hpux_ioctl(int fd, unsigned long cmd, unsigned long arg) { int result = -EOPNOTSUPP; int type = _IOC_TYPE(cmd); switch (type) { case 'T': /* Our structures are now compatible with HPUX's */ result = sys_ioctl(fd, cmd, arg); break; case 't': result = hpux_ioctl_t(fd, cmd, arg); break; } return result; }
gpl-2.0
mason-hock/CHIP-linux-libre
CHIP-linux-libre/fs/cachefiles/proc.c
14189
3184
/* CacheFiles statistics * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "internal.h" atomic_t cachefiles_lookup_histogram[HZ]; atomic_t cachefiles_mkdir_histogram[HZ]; atomic_t cachefiles_create_histogram[HZ]; /* * display the latency histogram */ static int cachefiles_histogram_show(struct seq_file *m, void *v) { unsigned long index; unsigned x, y, z, t; switch ((unsigned long) v) { case 1: seq_puts(m, "JIFS SECS LOOKUPS MKDIRS CREATES\n"); return 0; case 2: seq_puts(m, "===== ===== ========= ========= =========\n"); return 0; default: index = (unsigned long) v - 3; x = atomic_read(&cachefiles_lookup_histogram[index]); y = atomic_read(&cachefiles_mkdir_histogram[index]); z = atomic_read(&cachefiles_create_histogram[index]); if (x == 0 && y == 0 && z == 0) return 0; t = (index * 1000) / HZ; seq_printf(m, "%4lu 0.%03u %9u %9u %9u\n", index, t, x, y, z); return 0; } } /* * set up the iterator to start reading from the first line */ static void *cachefiles_histogram_start(struct seq_file *m, loff_t *_pos) { if ((unsigned long long)*_pos >= HZ + 2) return NULL; if (*_pos == 0) *_pos = 1; return (void *)(unsigned long) *_pos; } /* * move to the next line */ static void *cachefiles_histogram_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; return (unsigned long long)*pos > HZ + 2 ? NULL : (void *)(unsigned long) *pos; } /* * clean up after reading */ static void cachefiles_histogram_stop(struct seq_file *m, void *v) { } static const struct seq_operations cachefiles_histogram_ops = { .start = cachefiles_histogram_start, .stop = cachefiles_histogram_stop, .next = cachefiles_histogram_next, .show = cachefiles_histogram_show, }; /* * open "/proc/fs/cachefiles/XXX" which provide statistics summaries */ static int cachefiles_histogram_open(struct inode *inode, struct file *file) { return seq_open(file, &cachefiles_histogram_ops); } static const struct file_operations cachefiles_histogram_fops = { .owner = THIS_MODULE, .open = cachefiles_histogram_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* * initialise the /proc/fs/cachefiles/ directory */ int __init cachefiles_proc_init(void) { _enter(""); if (!proc_mkdir("fs/cachefiles", NULL)) goto error_dir; if (!proc_create("fs/cachefiles/histogram", S_IFREG | 0444, NULL, &cachefiles_histogram_fops)) goto error_histogram; _leave(" = 0"); return 0; error_histogram: remove_proc_entry("fs/cachefiles", NULL); error_dir: _leave(" = -ENOMEM"); return -ENOMEM; } /* * clean up the /proc/fs/cachefiles/ directory */ void cachefiles_proc_cleanup(void) { remove_proc_entry("fs/cachefiles/histogram", NULL); remove_proc_entry("fs/cachefiles", NULL); }
gpl-2.0
XCage15/linux
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
110
170254
/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/firmware.h> #include "drmP.h" #include "amdgpu.h" #include "amdgpu_ih.h" #include "amdgpu_gfx.h" #include "cikd.h" #include "cik.h" #include "atom.h" #include "amdgpu_ucode.h" #include "clearstate_ci.h" #include "uvd/uvd_4_2_d.h" #include "dce/dce_8_0_d.h" #include "dce/dce_8_0_sh_mask.h" #include "bif/bif_4_1_d.h" #include "bif/bif_4_1_sh_mask.h" #include "gca/gfx_7_0_d.h" #include "gca/gfx_7_2_enum.h" #include "gca/gfx_7_2_sh_mask.h" #include "gmc/gmc_7_0_d.h" #include "gmc/gmc_7_0_sh_mask.h" #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" #define GFX7_NUM_GFX_RINGS 1 #define GFX7_NUM_COMPUTE_RINGS 8 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev); int gfx_v7_0_get_cu_info(struct amdgpu_device *, struct amdgpu_cu_info *); MODULE_FIRMWARE("radeon/bonaire_pfp.bin"); MODULE_FIRMWARE("radeon/bonaire_me.bin"); MODULE_FIRMWARE("radeon/bonaire_ce.bin"); MODULE_FIRMWARE("radeon/bonaire_rlc.bin"); MODULE_FIRMWARE("radeon/bonaire_mec.bin"); MODULE_FIRMWARE("radeon/hawaii_pfp.bin"); MODULE_FIRMWARE("radeon/hawaii_me.bin"); MODULE_FIRMWARE("radeon/hawaii_ce.bin"); MODULE_FIRMWARE("radeon/hawaii_rlc.bin"); MODULE_FIRMWARE("radeon/hawaii_mec.bin"); MODULE_FIRMWARE("radeon/kaveri_pfp.bin"); MODULE_FIRMWARE("radeon/kaveri_me.bin"); MODULE_FIRMWARE("radeon/kaveri_ce.bin"); MODULE_FIRMWARE("radeon/kaveri_rlc.bin"); MODULE_FIRMWARE("radeon/kaveri_mec.bin"); MODULE_FIRMWARE("radeon/kaveri_mec2.bin"); MODULE_FIRMWARE("radeon/kabini_pfp.bin"); MODULE_FIRMWARE("radeon/kabini_me.bin"); MODULE_FIRMWARE("radeon/kabini_ce.bin"); MODULE_FIRMWARE("radeon/kabini_rlc.bin"); MODULE_FIRMWARE("radeon/kabini_mec.bin"); MODULE_FIRMWARE("radeon/mullins_pfp.bin"); MODULE_FIRMWARE("radeon/mullins_me.bin"); MODULE_FIRMWARE("radeon/mullins_ce.bin"); MODULE_FIRMWARE("radeon/mullins_rlc.bin"); MODULE_FIRMWARE("radeon/mullins_mec.bin"); static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1}, {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2}, {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3}, {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4}, {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5}, {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6}, {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7}, {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8}, {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9}, {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10}, {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11}, {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12}, {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13}, {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14}, {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15} }; static const u32 spectre_rlc_save_restore_register_list[] = { (0x0e00 << 16) | (0xc12c >> 2), 0x00000000, (0x0e00 << 16) | (0xc140 >> 2), 0x00000000, (0x0e00 << 16) | (0xc150 >> 2), 0x00000000, (0x0e00 << 16) | (0xc15c >> 2), 0x00000000, (0x0e00 << 16) | (0xc168 >> 2), 0x00000000, (0x0e00 << 16) | (0xc170 >> 2), 0x00000000, (0x0e00 << 16) | (0xc178 >> 2), 0x00000000, (0x0e00 << 16) | (0xc204 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2b4 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2b8 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2bc >> 2), 0x00000000, (0x0e00 << 16) | (0xc2c0 >> 2), 0x00000000, (0x0e00 << 16) | (0x8228 >> 2), 0x00000000, (0x0e00 << 16) | (0x829c >> 2), 0x00000000, (0x0e00 << 16) | (0x869c >> 2), 0x00000000, (0x0600 << 16) | (0x98f4 >> 2), 0x00000000, (0x0e00 << 16) | (0x98f8 >> 2), 0x00000000, (0x0e00 << 16) | (0x9900 >> 2), 0x00000000, (0x0e00 << 16) | (0xc260 >> 2), 0x00000000, (0x0e00 << 16) | (0x90e8 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c000 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c00c >> 2), 0x00000000, (0x0e00 << 16) | (0x8c1c >> 2), 0x00000000, (0x0e00 << 16) | (0x9700 >> 2), 0x00000000, (0x0e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x4e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x5e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x6e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x7e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x8e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x9e00 << 16) | (0xcd20 >> 2), 0x00000000, (0xae00 << 16) | (0xcd20 >> 2), 0x00000000, (0xbe00 << 16) | (0xcd20 >> 2), 0x00000000, (0x0e00 << 16) | (0x89bc >> 2), 0x00000000, (0x0e00 << 16) | (0x8900 >> 2), 0x00000000, 0x3, (0x0e00 << 16) | (0xc130 >> 2), 0x00000000, (0x0e00 << 16) | (0xc134 >> 2), 0x00000000, (0x0e00 << 16) | (0xc1fc >> 2), 0x00000000, (0x0e00 << 16) | (0xc208 >> 2), 0x00000000, (0x0e00 << 16) | (0xc264 >> 2), 0x00000000, (0x0e00 << 16) | (0xc268 >> 2), 0x00000000, (0x0e00 << 16) | (0xc26c >> 2), 0x00000000, (0x0e00 << 16) | (0xc270 >> 2), 0x00000000, (0x0e00 << 16) | (0xc274 >> 2), 0x00000000, (0x0e00 << 16) | (0xc278 >> 2), 0x00000000, (0x0e00 << 16) | (0xc27c >> 2), 0x00000000, (0x0e00 << 16) | (0xc280 >> 2), 0x00000000, (0x0e00 << 16) | (0xc284 >> 2), 0x00000000, (0x0e00 << 16) | (0xc288 >> 2), 0x00000000, (0x0e00 << 16) | (0xc28c >> 2), 0x00000000, (0x0e00 << 16) | (0xc290 >> 2), 0x00000000, (0x0e00 << 16) | (0xc294 >> 2), 0x00000000, (0x0e00 << 16) | (0xc298 >> 2), 0x00000000, (0x0e00 << 16) | (0xc29c >> 2), 0x00000000, (0x0e00 << 16) | (0xc2a0 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2a4 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2a8 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2ac >> 2), 0x00000000, (0x0e00 << 16) | (0xc2b0 >> 2), 0x00000000, (0x0e00 << 16) | (0x301d0 >> 2), 0x00000000, (0x0e00 << 16) | (0x30238 >> 2), 0x00000000, (0x0e00 << 16) | (0x30250 >> 2), 0x00000000, (0x0e00 << 16) | (0x30254 >> 2), 0x00000000, (0x0e00 << 16) | (0x30258 >> 2), 0x00000000, (0x0e00 << 16) | (0x3025c >> 2), 0x00000000, (0x4e00 << 16) | (0xc900 >> 2), 0x00000000, (0x5e00 << 16) | (0xc900 >> 2), 0x00000000, (0x6e00 << 16) | (0xc900 >> 2), 0x00000000, (0x7e00 << 16) | (0xc900 >> 2), 0x00000000, (0x8e00 << 16) | (0xc900 >> 2), 0x00000000, (0x9e00 << 16) | (0xc900 >> 2), 0x00000000, (0xae00 << 16) | (0xc900 >> 2), 0x00000000, (0xbe00 << 16) | (0xc900 >> 2), 0x00000000, (0x4e00 << 16) | (0xc904 >> 2), 0x00000000, (0x5e00 << 16) | (0xc904 >> 2), 0x00000000, (0x6e00 << 16) | (0xc904 >> 2), 0x00000000, (0x7e00 << 16) | (0xc904 >> 2), 0x00000000, (0x8e00 << 16) | (0xc904 >> 2), 0x00000000, (0x9e00 << 16) | (0xc904 >> 2), 0x00000000, (0xae00 << 16) | (0xc904 >> 2), 0x00000000, (0xbe00 << 16) | (0xc904 >> 2), 0x00000000, (0x4e00 << 16) | (0xc908 >> 2), 0x00000000, (0x5e00 << 16) | (0xc908 >> 2), 0x00000000, (0x6e00 << 16) | (0xc908 >> 2), 0x00000000, (0x7e00 << 16) | (0xc908 >> 2), 0x00000000, (0x8e00 << 16) | (0xc908 >> 2), 0x00000000, (0x9e00 << 16) | (0xc908 >> 2), 0x00000000, (0xae00 << 16) | (0xc908 >> 2), 0x00000000, (0xbe00 << 16) | (0xc908 >> 2), 0x00000000, (0x4e00 << 16) | (0xc90c >> 2), 0x00000000, (0x5e00 << 16) | (0xc90c >> 2), 0x00000000, (0x6e00 << 16) | (0xc90c >> 2), 0x00000000, (0x7e00 << 16) | (0xc90c >> 2), 0x00000000, (0x8e00 << 16) | (0xc90c >> 2), 0x00000000, (0x9e00 << 16) | (0xc90c >> 2), 0x00000000, (0xae00 << 16) | (0xc90c >> 2), 0x00000000, (0xbe00 << 16) | (0xc90c >> 2), 0x00000000, (0x4e00 << 16) | (0xc910 >> 2), 0x00000000, (0x5e00 << 16) | (0xc910 >> 2), 0x00000000, (0x6e00 << 16) | (0xc910 >> 2), 0x00000000, (0x7e00 << 16) | (0xc910 >> 2), 0x00000000, (0x8e00 << 16) | (0xc910 >> 2), 0x00000000, (0x9e00 << 16) | (0xc910 >> 2), 0x00000000, (0xae00 << 16) | (0xc910 >> 2), 0x00000000, (0xbe00 << 16) | (0xc910 >> 2), 0x00000000, (0x0e00 << 16) | (0xc99c >> 2), 0x00000000, (0x0e00 << 16) | (0x9834 >> 2), 0x00000000, (0x0000 << 16) | (0x30f00 >> 2), 0x00000000, (0x0001 << 16) | (0x30f00 >> 2), 0x00000000, (0x0000 << 16) | (0x30f04 >> 2), 0x00000000, (0x0001 << 16) | (0x30f04 >> 2), 0x00000000, (0x0000 << 16) | (0x30f08 >> 2), 0x00000000, (0x0001 << 16) | (0x30f08 >> 2), 0x00000000, (0x0000 << 16) | (0x30f0c >> 2), 0x00000000, (0x0001 << 16) | (0x30f0c >> 2), 0x00000000, (0x0600 << 16) | (0x9b7c >> 2), 0x00000000, (0x0e00 << 16) | (0x8a14 >> 2), 0x00000000, (0x0e00 << 16) | (0x8a18 >> 2), 0x00000000, (0x0600 << 16) | (0x30a00 >> 2), 0x00000000, (0x0e00 << 16) | (0x8bf0 >> 2), 0x00000000, (0x0e00 << 16) | (0x8bcc >> 2), 0x00000000, (0x0e00 << 16) | (0x8b24 >> 2), 0x00000000, (0x0e00 << 16) | (0x30a04 >> 2), 0x00000000, (0x0600 << 16) | (0x30a10 >> 2), 0x00000000, (0x0600 << 16) | (0x30a14 >> 2), 0x00000000, (0x0600 << 16) | (0x30a18 >> 2), 0x00000000, (0x0600 << 16) | (0x30a2c >> 2), 0x00000000, (0x0e00 << 16) | (0xc700 >> 2), 0x00000000, (0x0e00 << 16) | (0xc704 >> 2), 0x00000000, (0x0e00 << 16) | (0xc708 >> 2), 0x00000000, (0x0e00 << 16) | (0xc768 >> 2), 0x00000000, (0x0400 << 16) | (0xc770 >> 2), 0x00000000, (0x0400 << 16) | (0xc774 >> 2), 0x00000000, (0x0400 << 16) | (0xc778 >> 2), 0x00000000, (0x0400 << 16) | (0xc77c >> 2), 0x00000000, (0x0400 << 16) | (0xc780 >> 2), 0x00000000, (0x0400 << 16) | (0xc784 >> 2), 0x00000000, (0x0400 << 16) | (0xc788 >> 2), 0x00000000, (0x0400 << 16) | (0xc78c >> 2), 0x00000000, (0x0400 << 16) | (0xc798 >> 2), 0x00000000, (0x0400 << 16) | (0xc79c >> 2), 0x00000000, (0x0400 << 16) | (0xc7a0 >> 2), 0x00000000, (0x0400 << 16) | (0xc7a4 >> 2), 0x00000000, (0x0400 << 16) | (0xc7a8 >> 2), 0x00000000, (0x0400 << 16) | (0xc7ac >> 2), 0x00000000, (0x0400 << 16) | (0xc7b0 >> 2), 0x00000000, (0x0400 << 16) | (0xc7b4 >> 2), 0x00000000, (0x0e00 << 16) | (0x9100 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c010 >> 2), 0x00000000, (0x0e00 << 16) | (0x92a8 >> 2), 0x00000000, (0x0e00 << 16) | (0x92ac >> 2), 0x00000000, (0x0e00 << 16) | (0x92b4 >> 2), 0x00000000, (0x0e00 << 16) | (0x92b8 >> 2), 0x00000000, (0x0e00 << 16) | (0x92bc >> 2), 0x00000000, (0x0e00 << 16) | (0x92c0 >> 2), 0x00000000, (0x0e00 << 16) | (0x92c4 >> 2), 0x00000000, (0x0e00 << 16) | (0x92c8 >> 2), 0x00000000, (0x0e00 << 16) | (0x92cc >> 2), 0x00000000, (0x0e00 << 16) | (0x92d0 >> 2), 0x00000000, (0x0e00 << 16) | (0x8c00 >> 2), 0x00000000, (0x0e00 << 16) | (0x8c04 >> 2), 0x00000000, (0x0e00 << 16) | (0x8c20 >> 2), 0x00000000, (0x0e00 << 16) | (0x8c38 >> 2), 0x00000000, (0x0e00 << 16) | (0x8c3c >> 2), 0x00000000, (0x0e00 << 16) | (0xae00 >> 2), 0x00000000, (0x0e00 << 16) | (0x9604 >> 2), 0x00000000, (0x0e00 << 16) | (0xac08 >> 2), 0x00000000, (0x0e00 << 16) | (0xac0c >> 2), 0x00000000, (0x0e00 << 16) | (0xac10 >> 2), 0x00000000, (0x0e00 << 16) | (0xac14 >> 2), 0x00000000, (0x0e00 << 16) | (0xac58 >> 2), 0x00000000, (0x0e00 << 16) | (0xac68 >> 2), 0x00000000, (0x0e00 << 16) | (0xac6c >> 2), 0x00000000, (0x0e00 << 16) | (0xac70 >> 2), 0x00000000, (0x0e00 << 16) | (0xac74 >> 2), 0x00000000, (0x0e00 << 16) | (0xac78 >> 2), 0x00000000, (0x0e00 << 16) | (0xac7c >> 2), 0x00000000, (0x0e00 << 16) | (0xac80 >> 2), 0x00000000, (0x0e00 << 16) | (0xac84 >> 2), 0x00000000, (0x0e00 << 16) | (0xac88 >> 2), 0x00000000, (0x0e00 << 16) | (0xac8c >> 2), 0x00000000, (0x0e00 << 16) | (0x970c >> 2), 0x00000000, (0x0e00 << 16) | (0x9714 >> 2), 0x00000000, (0x0e00 << 16) | (0x9718 >> 2), 0x00000000, (0x0e00 << 16) | (0x971c >> 2), 0x00000000, (0x0e00 << 16) | (0x31068 >> 2), 0x00000000, (0x4e00 << 16) | (0x31068 >> 2), 0x00000000, (0x5e00 << 16) | (0x31068 >> 2), 0x00000000, (0x6e00 << 16) | (0x31068 >> 2), 0x00000000, (0x7e00 << 16) | (0x31068 >> 2), 0x00000000, (0x8e00 << 16) | (0x31068 >> 2), 0x00000000, (0x9e00 << 16) | (0x31068 >> 2), 0x00000000, (0xae00 << 16) | (0x31068 >> 2), 0x00000000, (0xbe00 << 16) | (0x31068 >> 2), 0x00000000, (0x0e00 << 16) | (0xcd10 >> 2), 0x00000000, (0x0e00 << 16) | (0xcd14 >> 2), 0x00000000, (0x0e00 << 16) | (0x88b0 >> 2), 0x00000000, (0x0e00 << 16) | (0x88b4 >> 2), 0x00000000, (0x0e00 << 16) | (0x88b8 >> 2), 0x00000000, (0x0e00 << 16) | (0x88bc >> 2), 0x00000000, (0x0400 << 16) | (0x89c0 >> 2), 0x00000000, (0x0e00 << 16) | (0x88c4 >> 2), 0x00000000, (0x0e00 << 16) | (0x88c8 >> 2), 0x00000000, (0x0e00 << 16) | (0x88d0 >> 2), 0x00000000, (0x0e00 << 16) | (0x88d4 >> 2), 0x00000000, (0x0e00 << 16) | (0x88d8 >> 2), 0x00000000, (0x0e00 << 16) | (0x8980 >> 2), 0x00000000, (0x0e00 << 16) | (0x30938 >> 2), 0x00000000, (0x0e00 << 16) | (0x3093c >> 2), 0x00000000, (0x0e00 << 16) | (0x30940 >> 2), 0x00000000, (0x0e00 << 16) | (0x89a0 >> 2), 0x00000000, (0x0e00 << 16) | (0x30900 >> 2), 0x00000000, (0x0e00 << 16) | (0x30904 >> 2), 0x00000000, (0x0e00 << 16) | (0x89b4 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c210 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c214 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c218 >> 2), 0x00000000, (0x0e00 << 16) | (0x8904 >> 2), 0x00000000, 0x5, (0x0e00 << 16) | (0x8c28 >> 2), (0x0e00 << 16) | (0x8c2c >> 2), (0x0e00 << 16) | (0x8c30 >> 2), (0x0e00 << 16) | (0x8c34 >> 2), (0x0e00 << 16) | (0x9600 >> 2), }; static const u32 kalindi_rlc_save_restore_register_list[] = { (0x0e00 << 16) | (0xc12c >> 2), 0x00000000, (0x0e00 << 16) | (0xc140 >> 2), 0x00000000, (0x0e00 << 16) | (0xc150 >> 2), 0x00000000, (0x0e00 << 16) | (0xc15c >> 2), 0x00000000, (0x0e00 << 16) | (0xc168 >> 2), 0x00000000, (0x0e00 << 16) | (0xc170 >> 2), 0x00000000, (0x0e00 << 16) | (0xc204 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2b4 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2b8 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2bc >> 2), 0x00000000, (0x0e00 << 16) | (0xc2c0 >> 2), 0x00000000, (0x0e00 << 16) | (0x8228 >> 2), 0x00000000, (0x0e00 << 16) | (0x829c >> 2), 0x00000000, (0x0e00 << 16) | (0x869c >> 2), 0x00000000, (0x0600 << 16) | (0x98f4 >> 2), 0x00000000, (0x0e00 << 16) | (0x98f8 >> 2), 0x00000000, (0x0e00 << 16) | (0x9900 >> 2), 0x00000000, (0x0e00 << 16) | (0xc260 >> 2), 0x00000000, (0x0e00 << 16) | (0x90e8 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c000 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c00c >> 2), 0x00000000, (0x0e00 << 16) | (0x8c1c >> 2), 0x00000000, (0x0e00 << 16) | (0x9700 >> 2), 0x00000000, (0x0e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x4e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x5e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x6e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x7e00 << 16) | (0xcd20 >> 2), 0x00000000, (0x0e00 << 16) | (0x89bc >> 2), 0x00000000, (0x0e00 << 16) | (0x8900 >> 2), 0x00000000, 0x3, (0x0e00 << 16) | (0xc130 >> 2), 0x00000000, (0x0e00 << 16) | (0xc134 >> 2), 0x00000000, (0x0e00 << 16) | (0xc1fc >> 2), 0x00000000, (0x0e00 << 16) | (0xc208 >> 2), 0x00000000, (0x0e00 << 16) | (0xc264 >> 2), 0x00000000, (0x0e00 << 16) | (0xc268 >> 2), 0x00000000, (0x0e00 << 16) | (0xc26c >> 2), 0x00000000, (0x0e00 << 16) | (0xc270 >> 2), 0x00000000, (0x0e00 << 16) | (0xc274 >> 2), 0x00000000, (0x0e00 << 16) | (0xc28c >> 2), 0x00000000, (0x0e00 << 16) | (0xc290 >> 2), 0x00000000, (0x0e00 << 16) | (0xc294 >> 2), 0x00000000, (0x0e00 << 16) | (0xc298 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2a0 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2a4 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2a8 >> 2), 0x00000000, (0x0e00 << 16) | (0xc2ac >> 2), 0x00000000, (0x0e00 << 16) | (0x301d0 >> 2), 0x00000000, (0x0e00 << 16) | (0x30238 >> 2), 0x00000000, (0x0e00 << 16) | (0x30250 >> 2), 0x00000000, (0x0e00 << 16) | (0x30254 >> 2), 0x00000000, (0x0e00 << 16) | (0x30258 >> 2), 0x00000000, (0x0e00 << 16) | (0x3025c >> 2), 0x00000000, (0x4e00 << 16) | (0xc900 >> 2), 0x00000000, (0x5e00 << 16) | (0xc900 >> 2), 0x00000000, (0x6e00 << 16) | (0xc900 >> 2), 0x00000000, (0x7e00 << 16) | (0xc900 >> 2), 0x00000000, (0x4e00 << 16) | (0xc904 >> 2), 0x00000000, (0x5e00 << 16) | (0xc904 >> 2), 0x00000000, (0x6e00 << 16) | (0xc904 >> 2), 0x00000000, (0x7e00 << 16) | (0xc904 >> 2), 0x00000000, (0x4e00 << 16) | (0xc908 >> 2), 0x00000000, (0x5e00 << 16) | (0xc908 >> 2), 0x00000000, (0x6e00 << 16) | (0xc908 >> 2), 0x00000000, (0x7e00 << 16) | (0xc908 >> 2), 0x00000000, (0x4e00 << 16) | (0xc90c >> 2), 0x00000000, (0x5e00 << 16) | (0xc90c >> 2), 0x00000000, (0x6e00 << 16) | (0xc90c >> 2), 0x00000000, (0x7e00 << 16) | (0xc90c >> 2), 0x00000000, (0x4e00 << 16) | (0xc910 >> 2), 0x00000000, (0x5e00 << 16) | (0xc910 >> 2), 0x00000000, (0x6e00 << 16) | (0xc910 >> 2), 0x00000000, (0x7e00 << 16) | (0xc910 >> 2), 0x00000000, (0x0e00 << 16) | (0xc99c >> 2), 0x00000000, (0x0e00 << 16) | (0x9834 >> 2), 0x00000000, (0x0000 << 16) | (0x30f00 >> 2), 0x00000000, (0x0000 << 16) | (0x30f04 >> 2), 0x00000000, (0x0000 << 16) | (0x30f08 >> 2), 0x00000000, (0x0000 << 16) | (0x30f0c >> 2), 0x00000000, (0x0600 << 16) | (0x9b7c >> 2), 0x00000000, (0x0e00 << 16) | (0x8a14 >> 2), 0x00000000, (0x0e00 << 16) | (0x8a18 >> 2), 0x00000000, (0x0600 << 16) | (0x30a00 >> 2), 0x00000000, (0x0e00 << 16) | (0x8bf0 >> 2), 0x00000000, (0x0e00 << 16) | (0x8bcc >> 2), 0x00000000, (0x0e00 << 16) | (0x8b24 >> 2), 0x00000000, (0x0e00 << 16) | (0x30a04 >> 2), 0x00000000, (0x0600 << 16) | (0x30a10 >> 2), 0x00000000, (0x0600 << 16) | (0x30a14 >> 2), 0x00000000, (0x0600 << 16) | (0x30a18 >> 2), 0x00000000, (0x0600 << 16) | (0x30a2c >> 2), 0x00000000, (0x0e00 << 16) | (0xc700 >> 2), 0x00000000, (0x0e00 << 16) | (0xc704 >> 2), 0x00000000, (0x0e00 << 16) | (0xc708 >> 2), 0x00000000, (0x0e00 << 16) | (0xc768 >> 2), 0x00000000, (0x0400 << 16) | (0xc770 >> 2), 0x00000000, (0x0400 << 16) | (0xc774 >> 2), 0x00000000, (0x0400 << 16) | (0xc798 >> 2), 0x00000000, (0x0400 << 16) | (0xc79c >> 2), 0x00000000, (0x0e00 << 16) | (0x9100 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c010 >> 2), 0x00000000, (0x0e00 << 16) | (0x8c00 >> 2), 0x00000000, (0x0e00 << 16) | (0x8c04 >> 2), 0x00000000, (0x0e00 << 16) | (0x8c20 >> 2), 0x00000000, (0x0e00 << 16) | (0x8c38 >> 2), 0x00000000, (0x0e00 << 16) | (0x8c3c >> 2), 0x00000000, (0x0e00 << 16) | (0xae00 >> 2), 0x00000000, (0x0e00 << 16) | (0x9604 >> 2), 0x00000000, (0x0e00 << 16) | (0xac08 >> 2), 0x00000000, (0x0e00 << 16) | (0xac0c >> 2), 0x00000000, (0x0e00 << 16) | (0xac10 >> 2), 0x00000000, (0x0e00 << 16) | (0xac14 >> 2), 0x00000000, (0x0e00 << 16) | (0xac58 >> 2), 0x00000000, (0x0e00 << 16) | (0xac68 >> 2), 0x00000000, (0x0e00 << 16) | (0xac6c >> 2), 0x00000000, (0x0e00 << 16) | (0xac70 >> 2), 0x00000000, (0x0e00 << 16) | (0xac74 >> 2), 0x00000000, (0x0e00 << 16) | (0xac78 >> 2), 0x00000000, (0x0e00 << 16) | (0xac7c >> 2), 0x00000000, (0x0e00 << 16) | (0xac80 >> 2), 0x00000000, (0x0e00 << 16) | (0xac84 >> 2), 0x00000000, (0x0e00 << 16) | (0xac88 >> 2), 0x00000000, (0x0e00 << 16) | (0xac8c >> 2), 0x00000000, (0x0e00 << 16) | (0x970c >> 2), 0x00000000, (0x0e00 << 16) | (0x9714 >> 2), 0x00000000, (0x0e00 << 16) | (0x9718 >> 2), 0x00000000, (0x0e00 << 16) | (0x971c >> 2), 0x00000000, (0x0e00 << 16) | (0x31068 >> 2), 0x00000000, (0x4e00 << 16) | (0x31068 >> 2), 0x00000000, (0x5e00 << 16) | (0x31068 >> 2), 0x00000000, (0x6e00 << 16) | (0x31068 >> 2), 0x00000000, (0x7e00 << 16) | (0x31068 >> 2), 0x00000000, (0x0e00 << 16) | (0xcd10 >> 2), 0x00000000, (0x0e00 << 16) | (0xcd14 >> 2), 0x00000000, (0x0e00 << 16) | (0x88b0 >> 2), 0x00000000, (0x0e00 << 16) | (0x88b4 >> 2), 0x00000000, (0x0e00 << 16) | (0x88b8 >> 2), 0x00000000, (0x0e00 << 16) | (0x88bc >> 2), 0x00000000, (0x0400 << 16) | (0x89c0 >> 2), 0x00000000, (0x0e00 << 16) | (0x88c4 >> 2), 0x00000000, (0x0e00 << 16) | (0x88c8 >> 2), 0x00000000, (0x0e00 << 16) | (0x88d0 >> 2), 0x00000000, (0x0e00 << 16) | (0x88d4 >> 2), 0x00000000, (0x0e00 << 16) | (0x88d8 >> 2), 0x00000000, (0x0e00 << 16) | (0x8980 >> 2), 0x00000000, (0x0e00 << 16) | (0x30938 >> 2), 0x00000000, (0x0e00 << 16) | (0x3093c >> 2), 0x00000000, (0x0e00 << 16) | (0x30940 >> 2), 0x00000000, (0x0e00 << 16) | (0x89a0 >> 2), 0x00000000, (0x0e00 << 16) | (0x30900 >> 2), 0x00000000, (0x0e00 << 16) | (0x30904 >> 2), 0x00000000, (0x0e00 << 16) | (0x89b4 >> 2), 0x00000000, (0x0e00 << 16) | (0x3e1fc >> 2), 0x00000000, (0x0e00 << 16) | (0x3c210 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c214 >> 2), 0x00000000, (0x0e00 << 16) | (0x3c218 >> 2), 0x00000000, (0x0e00 << 16) | (0x8904 >> 2), 0x00000000, 0x5, (0x0e00 << 16) | (0x8c28 >> 2), (0x0e00 << 16) | (0x8c2c >> 2), (0x0e00 << 16) | (0x8c30 >> 2), (0x0e00 << 16) | (0x8c34 >> 2), (0x0e00 << 16) | (0x9600 >> 2), }; static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev); static void gfx_v7_0_init_pg(struct amdgpu_device *adev); /* * Core functions */ /** * gfx_v7_0_init_microcode - load ucode images from disk * * @adev: amdgpu_device pointer * * Use the firmware interface to load the ucode images into * the driver (not loaded into hw). * Returns 0 on success, error on failure. */ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; char fw_name[30]; int err; DRM_DEBUG("\n"); switch (adev->asic_type) { case CHIP_BONAIRE: chip_name = "bonaire"; break; case CHIP_HAWAII: chip_name = "hawaii"; break; case CHIP_KAVERI: chip_name = "kaveri"; break; case CHIP_KABINI: chip_name = "kabini"; break; case CHIP_MULLINS: chip_name = "mullins"; break; default: BUG(); } snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.pfp_fw); if (err) goto out; snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.me_fw); if (err) goto out; snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.ce_fw); if (err) goto out; snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.mec_fw); if (err) goto out; if (adev->asic_type == CHIP_KAVERI) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name); err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.mec2_fw); if (err) goto out; } snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.rlc_fw); out: if (err) { printk(KERN_ERR "gfx7: Failed to load firmware \"%s\"\n", fw_name); release_firmware(adev->gfx.pfp_fw); adev->gfx.pfp_fw = NULL; release_firmware(adev->gfx.me_fw); adev->gfx.me_fw = NULL; release_firmware(adev->gfx.ce_fw); adev->gfx.ce_fw = NULL; release_firmware(adev->gfx.mec_fw); adev->gfx.mec_fw = NULL; release_firmware(adev->gfx.mec2_fw); adev->gfx.mec2_fw = NULL; release_firmware(adev->gfx.rlc_fw); adev->gfx.rlc_fw = NULL; } return err; } /** * gfx_v7_0_tiling_mode_table_init - init the hw tiling table * * @adev: amdgpu_device pointer * * Starting with SI, the tiling setup is done globally in a * set of 32 tiling modes. Rather than selecting each set of * parameters per surface as on older asics, we just select * which index in the tiling table we want to use, and the * surface uses those parameters (CIK). */ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) { const u32 num_tile_mode_states = 32; const u32 num_secondary_tile_mode_states = 16; u32 reg_offset, gb_tile_moden, split_equal_to_row_size; switch (adev->gfx.config.mem_row_size_in_kb) { case 1: split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; break; case 2: default: split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; break; case 4: split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; break; } switch (adev->asic_type) { case CHIP_BONAIRE: for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 1: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 2: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 3: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 4: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | TILE_SPLIT(split_equal_to_row_size)); break; case 5: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 6: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | TILE_SPLIT(split_equal_to_row_size)); break; case 7: gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); break; case 8: gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | PIPE_CONFIG(ADDR_SURF_P4_16x16)); break; case 9: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); break; case 10: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 11: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 12: gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); break; case 13: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); break; case 14: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 15: gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 16: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 17: gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); break; case 18: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 19: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); break; case 20: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 21: gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 22: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 23: gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); break; case 24: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 25: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 26: gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 27: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); break; case 28: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 29: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 30: gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); break; default: gb_tile_moden = 0; break; } adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); } for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 1: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 2: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 3: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 4: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 5: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 6: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_4_BANK)); break; case 8: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 9: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 10: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 11: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 12: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 13: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 14: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_4_BANK)); break; default: gb_tile_moden = 0; break; } adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); } break; case CHIP_HAWAII: for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 1: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 2: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 3: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 4: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | TILE_SPLIT(split_equal_to_row_size)); break; case 5: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | TILE_SPLIT(split_equal_to_row_size)); break; case 6: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | TILE_SPLIT(split_equal_to_row_size)); break; case 7: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | TILE_SPLIT(split_equal_to_row_size)); break; case 8: gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); break; case 9: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); break; case 10: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 11: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 12: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 13: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); break; case 14: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 15: gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 16: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 17: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 18: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 19: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); break; case 20: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 21: gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 22: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 23: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 24: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 25: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 26: gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 27: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); break; case 28: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 29: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 30: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; default: gb_tile_moden = 0; break; } adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); } for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 1: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 2: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 3: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 4: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 5: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_4_BANK)); break; case 6: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_4_BANK)); break; case 8: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 9: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 10: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 11: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 12: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 13: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 14: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_4_BANK)); break; default: gb_tile_moden = 0; break; } adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); } break; case CHIP_KABINI: case CHIP_KAVERI: case CHIP_MULLINS: default: for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 1: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 2: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 3: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 4: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | TILE_SPLIT(split_equal_to_row_size)); break; case 5: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 6: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | TILE_SPLIT(split_equal_to_row_size)); break; case 7: gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); break; case 8: gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | PIPE_CONFIG(ADDR_SURF_P2)); break; case 9: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); break; case 10: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 11: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 12: gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); break; case 13: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); break; case 14: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 15: gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 16: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 17: gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); break; case 18: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 19: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); break; case 20: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 21: gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 22: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 23: gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); break; case 24: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 25: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 26: gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); break; case 27: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); break; case 28: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); break; case 29: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); break; case 30: gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); break; default: gb_tile_moden = 0; break; } adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); } for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 1: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 2: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 3: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 4: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 5: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 6: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_8_BANK)); break; case 8: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 9: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 10: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 11: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 12: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 13: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | NUM_BANKS(ADDR_SURF_16_BANK)); break; case 14: gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_8_BANK)); break; default: gb_tile_moden = 0; break; } adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); } break; } } /** * gfx_v7_0_select_se_sh - select which SE, SH to address * * @adev: amdgpu_device pointer * @se_num: shader engine to address * @sh_num: sh block to address * * Select which SE, SH combinations to address. Certain * registers are instanced per SE or SH. 0xffffffff means * broadcast to all SEs or SHs (CIK). */ void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) { u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK; if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK; else if (se_num == 0xffffffff) data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK | (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT); else if (sh_num == 0xffffffff) data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT); else data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) | (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT); WREG32(mmGRBM_GFX_INDEX, data); } /** * gfx_v7_0_create_bitmask - create a bitmask * * @bit_width: length of the mask * * create a variable length bit mask (CIK). * Returns the bitmask. */ static u32 gfx_v7_0_create_bitmask(u32 bit_width) { u32 i, mask = 0; for (i = 0; i < bit_width; i++) { mask <<= 1; mask |= 1; } return mask; } /** * gfx_v7_0_get_rb_disabled - computes the mask of disabled RBs * * @adev: amdgpu_device pointer * @max_rb_num: max RBs (render backends) for the asic * @se_num: number of SEs (shader engines) for the asic * @sh_per_se: number of SH blocks per SE for the asic * * Calculates the bitmask of disabled RBs (CIK). * Returns the disabled RB bitmask. */ static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev, u32 max_rb_num_per_se, u32 sh_per_se) { u32 data, mask; data = RREG32(mmCC_RB_BACKEND_DISABLE); if (data & 1) data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; else data = 0; data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; mask = gfx_v7_0_create_bitmask(max_rb_num_per_se / sh_per_se); return data & mask; } /** * gfx_v7_0_setup_rb - setup the RBs on the asic * * @adev: amdgpu_device pointer * @se_num: number of SEs (shader engines) for the asic * @sh_per_se: number of SH blocks per SE for the asic * @max_rb_num: max RBs (render backends) for the asic * * Configures per-SE/SH RB registers (CIK). */ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev, u32 se_num, u32 sh_per_se, u32 max_rb_num_per_se) { int i, j; u32 data, mask; u32 disabled_rbs = 0; u32 enabled_rbs = 0; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < se_num; i++) { for (j = 0; j < sh_per_se; j++) { gfx_v7_0_select_se_sh(adev, i, j); data = gfx_v7_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se); if (adev->asic_type == CHIP_HAWAII) disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); else disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH); } } gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = 1; for (i = 0; i < max_rb_num_per_se * se_num; i++) { if (!(disabled_rbs & mask)) enabled_rbs |= mask; mask <<= 1; } adev->gfx.config.backend_enable_mask = enabled_rbs; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < se_num; i++) { gfx_v7_0_select_se_sh(adev, i, 0xffffffff); data = 0; for (j = 0; j < sh_per_se; j++) { switch (enabled_rbs & 3) { case 0: if (j == 0) data |= (RASTER_CONFIG_RB_MAP_3 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); else data |= (RASTER_CONFIG_RB_MAP_0 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); break; case 1: data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); break; case 2: data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); break; case 3: default: data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); break; } enabled_rbs >>= 2; } WREG32(mmPA_SC_RASTER_CONFIG, data); } gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); } /** * gmc_v7_0_init_compute_vmid - gart enable * * @rdev: amdgpu_device pointer * * Initialize compute vmid sh_mem registers * */ #define DEFAULT_SH_MEM_BASES (0x6000) #define FIRST_COMPUTE_VMID (8) #define LAST_COMPUTE_VMID (16) static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev) { int i; uint32_t sh_mem_config; uint32_t sh_mem_bases; /* * Configure apertures: * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) */ sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT; mutex_lock(&adev->srbm_mutex); for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { cik_srbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32(mmSH_MEM_CONFIG, sh_mem_config); WREG32(mmSH_MEM_APE1_BASE, 1); WREG32(mmSH_MEM_APE1_LIMIT, 0); WREG32(mmSH_MEM_BASES, sh_mem_bases); } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } /** * gfx_v7_0_gpu_init - setup the 3D engine * * @adev: amdgpu_device pointer * * Configures the 3D engine and tiling configuration * registers so that the 3D engine is usable. */ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) { u32 gb_addr_config; u32 mc_shared_chmap, mc_arb_ramcfg; u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; u32 sh_mem_cfg; u32 tmp; int i; switch (adev->asic_type) { case CHIP_BONAIRE: adev->gfx.config.max_shader_engines = 2; adev->gfx.config.max_tile_pipes = 4; adev->gfx.config.max_cu_per_sh = 7; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_backends_per_se = 2; adev->gfx.config.max_texture_channel_caches = 4; adev->gfx.config.max_gprs = 256; adev->gfx.config.max_gs_threads = 32; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_HAWAII: adev->gfx.config.max_shader_engines = 4; adev->gfx.config.max_tile_pipes = 16; adev->gfx.config.max_cu_per_sh = 11; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_backends_per_se = 4; adev->gfx.config.max_texture_channel_caches = 16; adev->gfx.config.max_gprs = 256; adev->gfx.config.max_gs_threads = 32; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_KAVERI: adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 4; if ((adev->pdev->device == 0x1304) || (adev->pdev->device == 0x1305) || (adev->pdev->device == 0x130C) || (adev->pdev->device == 0x130F) || (adev->pdev->device == 0x1310) || (adev->pdev->device == 0x1311) || (adev->pdev->device == 0x131C)) { adev->gfx.config.max_cu_per_sh = 8; adev->gfx.config.max_backends_per_se = 2; } else if ((adev->pdev->device == 0x1309) || (adev->pdev->device == 0x130A) || (adev->pdev->device == 0x130D) || (adev->pdev->device == 0x1313) || (adev->pdev->device == 0x131D)) { adev->gfx.config.max_cu_per_sh = 6; adev->gfx.config.max_backends_per_se = 2; } else if ((adev->pdev->device == 0x1306) || (adev->pdev->device == 0x1307) || (adev->pdev->device == 0x130B) || (adev->pdev->device == 0x130E) || (adev->pdev->device == 0x1315) || (adev->pdev->device == 0x131B)) { adev->gfx.config.max_cu_per_sh = 4; adev->gfx.config.max_backends_per_se = 1; } else { adev->gfx.config.max_cu_per_sh = 3; adev->gfx.config.max_backends_per_se = 1; } adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_texture_channel_caches = 4; adev->gfx.config.max_gprs = 256; adev->gfx.config.max_gs_threads = 16; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_KABINI: case CHIP_MULLINS: default: adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 2; adev->gfx.config.max_cu_per_sh = 2; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_backends_per_se = 1; adev->gfx.config.max_texture_channel_caches = 2; adev->gfx.config.max_gprs = 256; adev->gfx.config.max_gs_threads = 16; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; break; } WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT)); mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; if (adev->flags & AMDGPU_IS_APU) { /* Get memory bank mapping mode. */ tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); /* Validate settings in case only one DIMM installed. */ if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) dimm00_addr_map = 0; if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) dimm01_addr_map = 0; if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) dimm10_addr_map = 0; if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) dimm11_addr_map = 0; /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) adev->gfx.config.mem_row_size_in_kb = 2; else adev->gfx.config.mem_row_size_in_kb = 1; } else { tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT; adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; if (adev->gfx.config.mem_row_size_in_kb > 4) adev->gfx.config.mem_row_size_in_kb = 4; } /* XXX use MC settings? */ adev->gfx.config.shader_engine_tile_size = 32; adev->gfx.config.num_gpus = 1; adev->gfx.config.multi_gpu_tile_size = 64; /* fix up row size */ gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK; switch (adev->gfx.config.mem_row_size_in_kb) { case 1: default: gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); break; case 2: gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); break; case 4: gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); break; } adev->gfx.config.gb_addr_config = gb_addr_config; WREG32(mmGB_ADDR_CONFIG, gb_addr_config); WREG32(mmHDP_ADDR_CONFIG, gb_addr_config); WREG32(mmDMIF_ADDR_CALC, gb_addr_config); WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70); WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70); WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config); WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); gfx_v7_0_tiling_mode_table_init(adev); gfx_v7_0_setup_rb(adev, adev->gfx.config.max_shader_engines, adev->gfx.config.max_sh_per_se, adev->gfx.config.max_backends_per_se); /* set HW defaults for 3D engine */ WREG32(mmCP_MEQ_THRESHOLDS, (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); mutex_lock(&adev->grbm_idx_mutex); /* * making sure that the following register writes will be broadcasted * to all the shaders */ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, SH_MEM_ALIGNMENT_MODE_UNALIGNED); mutex_lock(&adev->srbm_mutex); for (i = 0; i < 16; i++) { cik_srbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32(mmSH_MEM_CONFIG, sh_mem_cfg); WREG32(mmSH_MEM_APE1_BASE, 1); WREG32(mmSH_MEM_APE1_LIMIT, 0); WREG32(mmSH_MEM_BASES, 0); } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); gmc_v7_0_init_compute_vmid(adev); WREG32(mmSX_DEBUG_1, 0x20); WREG32(mmTA_CNTL_AUX, 0x00010000); tmp = RREG32(mmSPI_CONFIG_CNTL); tmp |= 0x03000000; WREG32(mmSPI_CONFIG_CNTL, tmp); WREG32(mmSQ_CONFIG, 1); WREG32(mmDB_DEBUG, 0); tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff; tmp |= 0x00000400; WREG32(mmDB_DEBUG2, tmp); tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c; tmp |= 0x00020200; WREG32(mmDB_DEBUG3, tmp); tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000; tmp |= 0x00018208; WREG32(mmCB_HW_CONTROL, tmp); WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT)); WREG32(mmPA_SC_FIFO_SIZE, ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT))); WREG32(mmVGT_NUM_INSTANCES, 1); WREG32(mmCP_PERFMON_CNTL, 0); WREG32(mmSQ_CONFIG, 0); WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS, ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) | (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT))); WREG32(mmVGT_CACHE_INVALIDATION, (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) | (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT)); WREG32(mmVGT_GS_VERTEX_REUSE, 16); WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0); WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK | (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT)); WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK); mutex_unlock(&adev->grbm_idx_mutex); udelay(50); } /* * GPU scratch registers helpers function. */ /** * gfx_v7_0_scratch_init - setup driver info for CP scratch regs * * @adev: amdgpu_device pointer * * Set up the number and offset of the CP scratch registers. * NOTE: use of CP scratch registers is a legacy inferface and * is not used by default on newer asics (r6xx+). On newer asics, * memory buffers are used for fences rather than scratch regs. */ static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) { int i; adev->gfx.scratch.num_reg = 7; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; for (i = 0; i < adev->gfx.scratch.num_reg; i++) { adev->gfx.scratch.free[i] = true; adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; } } /** * gfx_v7_0_ring_test_ring - basic gfx ring test * * @adev: amdgpu_device pointer * @ring: amdgpu_ring structure holding ring information * * Allocate a scratch register and write to it using the gfx ring (CIK). * Provides a basic gfx ring test to verify that the ring is working. * Used by gfx_v7_0_cp_gfx_resume(); * Returns 0 on success, error on failure. */ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t scratch; uint32_t tmp = 0; unsigned i; int r; r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); return r; } WREG32(scratch, 0xCAFEDEAD); r = amdgpu_ring_lock(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_gfx_scratch_free(adev, scratch); return r; } amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_unlock_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); } if (i < adev->usec_timeout) { DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", ring->idx, scratch, tmp); r = -EINVAL; } amdgpu_gfx_scratch_free(adev, scratch); return r; } /** * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp * * @adev: amdgpu_device pointer * @ridx: amdgpu ring index * * Emits an hdp flush on the cp. */ static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { u32 ref_and_mask; int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1; if (ring->type == AMDGPU_RING_TYPE_COMPUTE) { switch (ring->me) { case 1: ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; break; case 2: ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; break; default: return; } } else { ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; } amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ WAIT_REG_MEM_FUNCTION(3) | /* == */ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ); amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE); amdgpu_ring_write(ring, ref_and_mask); amdgpu_ring_write(ring, ref_and_mask); amdgpu_ring_write(ring, 0x20); /* poll interval */ } /** * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring * * @adev: amdgpu_device pointer * @fence: amdgpu fence object * * Emits a fence sequnce number on the gfx ring and flushes * GPU caches. */ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; /* Workaround for cache flush problems. First send a dummy EOP * event down the pipe with seq one below. */ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5))); amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(0)); amdgpu_ring_write(ring, lower_32_bits(seq - 1)); amdgpu_ring_write(ring, upper_32_bits(seq - 1)); /* Then send the real EOP event down the pipe. */ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5))); amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); amdgpu_ring_write(ring, lower_32_bits(seq)); amdgpu_ring_write(ring, upper_32_bits(seq)); } /** * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring * * @adev: amdgpu_device pointer * @fence: amdgpu fence object * * Emits a fence sequnce number on the compute ring and flushes * GPU caches. */ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; /* RELEASE_MEM - flush caches, send int */ amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5))); amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(addr)); amdgpu_ring_write(ring, lower_32_bits(seq)); amdgpu_ring_write(ring, upper_32_bits(seq)); } /** * gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring * * @ring: amdgpu ring buffer object * @semaphore: amdgpu semaphore object * @emit_wait: Is this a sempahore wait? * * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP * from running ahead of semaphore waits. */ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring, struct amdgpu_semaphore *semaphore, bool emit_wait) { uint64_t addr = semaphore->gpu_addr; unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); amdgpu_ring_write(ring, addr & 0xffffffff); amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) { /* Prevent the PFP from running ahead of the semaphore wait */ amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); amdgpu_ring_write(ring, 0x0); } return true; } /* * IB stuff */ /** * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring * * @ring: amdgpu_ring structure holding ring information * @ib: amdgpu indirect buffer object * * Emits an DE (drawing engine) or CE (constant engine) IB * on the gfx ring. IBs are usually generated by userspace * acceleration drivers and submitted to the kernel for * sheduling on the ring. This function schedules the IB * on the gfx ring for execution by the GPU. */ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { bool need_ctx_switch = ring->current_ctx != ib->ctx; u32 header, control = 0; u32 next_rptr = ring->wptr + 5; /* drop the CE preamble IB for the same context */ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch) return; if (need_ctx_switch) next_rptr += 2; next_rptr += 4; amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ if (need_ctx_switch) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); } if (ib->flags & AMDGPU_IB_FLAG_CE) header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); control |= ib->length_dw | (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | #endif (ib->gpu_addr & 0xFFFFFFFC)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); amdgpu_ring_write(ring, control); } static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { u32 header, control = 0; u32 next_rptr = ring->wptr + 5; control |= INDIRECT_BUFFER_VALID; next_rptr += 4; amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); amdgpu_ring_write(ring, next_rptr); header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); control |= ib->length_dw | (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | #endif (ib->gpu_addr & 0xFFFFFFFC)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); amdgpu_ring_write(ring, control); } /** * gfx_v7_0_ring_test_ib - basic ring IB test * * @ring: amdgpu_ring structure holding ring information * * Allocate an IB and execute it on the gfx ring (CIK). * Provides a basic gfx ring test to verify that IBs are working. * Returns 0 on success, error on failure. */ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; uint32_t scratch; uint32_t tmp = 0; unsigned i; int r; r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); return r; } WREG32(scratch, 0xCAFEDEAD); r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); amdgpu_gfx_scratch_free(adev, scratch); return r; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); if (r) { amdgpu_gfx_scratch_free(adev, scratch); amdgpu_ib_free(adev, &ib); DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); return r; } r = amdgpu_fence_wait(ib.fence, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); amdgpu_gfx_scratch_free(adev, scratch); amdgpu_ib_free(adev, &ib); return r; } for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring->idx, i); } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } amdgpu_gfx_scratch_free(adev, scratch); amdgpu_ib_free(adev, &ib); return r; } /* * CP. * On CIK, gfx and compute now have independant command processors. * * GFX * Gfx consists of a single ring and can process both gfx jobs and * compute jobs. The gfx CP consists of three microengines (ME): * PFP - Pre-Fetch Parser * ME - Micro Engine * CE - Constant Engine * The PFP and ME make up what is considered the Drawing Engine (DE). * The CE is an asynchronous engine used for updating buffer desciptors * used by the DE so that they can be loaded into cache in parallel * while the DE is processing state update packets. * * Compute * The compute CP consists of two microengines (ME): * MEC1 - Compute MicroEngine 1 * MEC2 - Compute MicroEngine 2 * Each MEC supports 4 compute pipes and each pipe supports 8 queues. * The queues are exposed to userspace and are programmed directly * by the compute runtime. */ /** * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs * * @adev: amdgpu_device pointer * @enable: enable or disable the MEs * * Halts or unhalts the gfx MEs. */ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { int i; if (enable) { WREG32(mmCP_ME_CNTL, 0); } else { WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].ready = false; } udelay(50); } /** * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode * * @adev: amdgpu_device pointer * * Loads the gfx PFP, ME, and CE ucode. * Returns 0 for success, -EINVAL if the ucode is not available. */ static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev) { const struct gfx_firmware_header_v1_0 *pfp_hdr; const struct gfx_firmware_header_v1_0 *ce_hdr; const struct gfx_firmware_header_v1_0 *me_hdr; const __le32 *fw_data; unsigned i, fw_size; if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) return -EINVAL; pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); amdgpu_ucode_print_gfx_hdr(&me_hdr->header); adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version); adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version); adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version); adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version); adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version); adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version); gfx_v7_0_cp_gfx_enable(adev, false); /* PFP */ fw_data = (const __le32 *) (adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; WREG32(mmCP_PFP_UCODE_ADDR, 0); for (i = 0; i < fw_size; i++) WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); /* CE */ fw_data = (const __le32 *) (adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; WREG32(mmCP_CE_UCODE_ADDR, 0); for (i = 0; i < fw_size; i++) WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); /* ME */ fw_data = (const __le32 *) (adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; WREG32(mmCP_ME_RAM_WADDR, 0); for (i = 0; i < fw_size; i++) WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); return 0; } /** * gfx_v7_0_cp_gfx_start - start the gfx ring * * @adev: amdgpu_device pointer * * Enables the ring and loads the clear state context and other * packets required to init the ring. * Returns 0 for success, error for failure. */ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; const struct cs_section_def *sect = NULL; const struct cs_extent_def *ext = NULL; int r, i; /* init the CP */ WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); WREG32(mmCP_ENDIAN_SWAP, 0); WREG32(mmCP_DEVICE_ID, 1); gfx_v7_0_cp_gfx_enable(adev, true); r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); return r; } /* init the CE partitions. CE only used for gfx on CIK */ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); amdgpu_ring_write(ring, 0x8000); amdgpu_ring_write(ring, 0x8000); /* clear state buffer */ amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); amdgpu_ring_write(ring, 0x80000000); amdgpu_ring_write(ring, 0x80000000); for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { for (ext = sect->section; ext->extent != NULL; ++ext) { if (sect->id == SECT_CONTEXT) { amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START); for (i = 0; i < ext->reg_count; i++) amdgpu_ring_write(ring, ext->extent[i]); } } } amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); switch (adev->asic_type) { case CHIP_BONAIRE: amdgpu_ring_write(ring, 0x16000012); amdgpu_ring_write(ring, 0x00000000); break; case CHIP_KAVERI: amdgpu_ring_write(ring, 0x00000000); /* XXX */ amdgpu_ring_write(ring, 0x00000000); break; case CHIP_KABINI: case CHIP_MULLINS: amdgpu_ring_write(ring, 0x00000000); /* XXX */ amdgpu_ring_write(ring, 0x00000000); break; case CHIP_HAWAII: amdgpu_ring_write(ring, 0x3a00161a); amdgpu_ring_write(ring, 0x0000002e); break; default: amdgpu_ring_write(ring, 0x00000000); amdgpu_ring_write(ring, 0x00000000); break; } amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); amdgpu_ring_write(ring, 0x00000316); amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ amdgpu_ring_unlock_commit(ring); return 0; } /** * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers * * @adev: amdgpu_device pointer * * Program the location and size of the gfx ring buffer * and test it to make sure it's working. * Returns 0 for success, error for failure. */ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) { struct amdgpu_ring *ring; u32 tmp; u32 rb_bufsz; u64 rb_addr, rptr_addr; int r; WREG32(mmCP_SEM_WAIT_TIMER, 0x0); if (adev->asic_type != CHIP_HAWAII) WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); /* Set the write pointer delay */ WREG32(mmCP_RB_WPTR_DELAY, 0); /* set the RB to use vmid 0 */ WREG32(mmCP_RB_VMID, 0); WREG32(mmSCRATCH_ADDR, 0); /* ring 0 - compute and gfx */ /* Set ring buffer size */ ring = &adev->gfx.gfx_ring[0]; rb_bufsz = order_base_2(ring->ring_size / 8); tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT; #endif WREG32(mmCP_RB0_CNTL, tmp); /* Initialize the ring buffer's read and write pointers */ WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK); ring->wptr = 0; WREG32(mmCP_RB0_WPTR, ring->wptr); /* set the wb address wether it's enabled or not */ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); /* scratch register shadowing is no longer supported */ WREG32(mmSCRATCH_UMSK, 0); mdelay(1); WREG32(mmCP_RB0_CNTL, tmp); rb_addr = ring->gpu_addr >> 8; WREG32(mmCP_RB0_BASE, rb_addr); WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); /* start the ring */ gfx_v7_0_cp_gfx_start(adev); ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; return r; } return 0; } static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) { u32 rptr; rptr = ring->adev->wb.wb[ring->rptr_offs]; return rptr; } static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 wptr; wptr = RREG32(mmCP_RB0_WPTR); return wptr; } static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; WREG32(mmCP_RB0_WPTR, ring->wptr); (void)RREG32(mmCP_RB0_WPTR); } static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring) { u32 rptr; rptr = ring->adev->wb.wb[ring->rptr_offs]; return rptr; } static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring) { u32 wptr; /* XXX check if swapping is necessary on BE */ wptr = ring->adev->wb.wb[ring->wptr_offs]; return wptr; } static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; /* XXX check if swapping is necessary on BE */ adev->wb.wb[ring->wptr_offs] = ring->wptr; WDOORBELL32(ring->doorbell_index, ring->wptr); } /** * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs * * @adev: amdgpu_device pointer * @enable: enable or disable the MEs * * Halts or unhalts the compute MEs. */ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { int i; if (enable) { WREG32(mmCP_MEC_CNTL, 0); } else { WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); for (i = 0; i < adev->gfx.num_compute_rings; i++) adev->gfx.compute_ring[i].ready = false; } udelay(50); } /** * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode * * @adev: amdgpu_device pointer * * Loads the compute MEC1&2 ucode. * Returns 0 for success, -EINVAL if the ucode is not available. */ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) { const struct gfx_firmware_header_v1_0 *mec_hdr; const __le32 *fw_data; unsigned i, fw_size; if (!adev->gfx.mec_fw) return -EINVAL; mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); adev->gfx.mec_feature_version = le32_to_cpu( mec_hdr->ucode_feature_version); gfx_v7_0_cp_compute_enable(adev, false); /* MEC1 */ fw_data = (const __le32 *) (adev->gfx.mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); for (i = 0; i < fw_size; i++) WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++)); WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); if (adev->asic_type == CHIP_KAVERI) { const struct gfx_firmware_header_v1_0 *mec2_hdr; if (!adev->gfx.mec2_fw) return -EINVAL; mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); adev->gfx.mec2_feature_version = le32_to_cpu( mec2_hdr->ucode_feature_version); /* MEC2 */ fw_data = (const __le32 *) (adev->gfx.mec2_fw->data + le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4; WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); for (i = 0; i < fw_size; i++) WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++)); WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); } return 0; } /** * gfx_v7_0_cp_compute_start - start the compute queues * * @adev: amdgpu_device pointer * * Enable the compute queues. * Returns 0 for success, error for failure. */ static int gfx_v7_0_cp_compute_start(struct amdgpu_device *adev) { gfx_v7_0_cp_compute_enable(adev, true); return 0; } /** * gfx_v7_0_cp_compute_fini - stop the compute queues * * @adev: amdgpu_device pointer * * Stop the compute queues and tear down the driver queue * info. */ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) { int i, r; for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; if (ring->mqd_obj) { r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); amdgpu_bo_unpin(ring->mqd_obj); amdgpu_bo_unreserve(ring->mqd_obj); amdgpu_bo_unref(&ring->mqd_obj); ring->mqd_obj = NULL; } } } static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) { int r; if (adev->gfx.mec.hpd_eop_obj) { r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); if (unlikely(r != 0)) dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); adev->gfx.mec.hpd_eop_obj = NULL; } } #define MEC_HPD_SIZE 2048 static int gfx_v7_0_mec_init(struct amdgpu_device *adev) { int r; u32 *hpd; /* * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total * Nonetheless, we assign only 1 pipe because all other pipes will * be handled by KFD */ adev->gfx.mec.num_mec = 1; adev->gfx.mec.num_pipe = 1; adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8; if (adev->gfx.mec.hpd_eop_obj == NULL) { r = amdgpu_bo_create(adev, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->gfx.mec.hpd_eop_obj); if (r) { dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); return r; } } r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); if (unlikely(r != 0)) { gfx_v7_0_mec_fini(adev); return r; } r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.mec.hpd_eop_gpu_addr); if (r) { dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); gfx_v7_0_mec_fini(adev); return r; } r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); if (r) { dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); gfx_v7_0_mec_fini(adev); return r; } /* clear memory. Not sure if this is required or not */ memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2); amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); return 0; } struct hqd_registers { u32 cp_mqd_base_addr; u32 cp_mqd_base_addr_hi; u32 cp_hqd_active; u32 cp_hqd_vmid; u32 cp_hqd_persistent_state; u32 cp_hqd_pipe_priority; u32 cp_hqd_queue_priority; u32 cp_hqd_quantum; u32 cp_hqd_pq_base; u32 cp_hqd_pq_base_hi; u32 cp_hqd_pq_rptr; u32 cp_hqd_pq_rptr_report_addr; u32 cp_hqd_pq_rptr_report_addr_hi; u32 cp_hqd_pq_wptr_poll_addr; u32 cp_hqd_pq_wptr_poll_addr_hi; u32 cp_hqd_pq_doorbell_control; u32 cp_hqd_pq_wptr; u32 cp_hqd_pq_control; u32 cp_hqd_ib_base_addr; u32 cp_hqd_ib_base_addr_hi; u32 cp_hqd_ib_rptr; u32 cp_hqd_ib_control; u32 cp_hqd_iq_timer; u32 cp_hqd_iq_rptr; u32 cp_hqd_dequeue_request; u32 cp_hqd_dma_offload; u32 cp_hqd_sema_cmd; u32 cp_hqd_msg_type; u32 cp_hqd_atomic0_preop_lo; u32 cp_hqd_atomic0_preop_hi; u32 cp_hqd_atomic1_preop_lo; u32 cp_hqd_atomic1_preop_hi; u32 cp_hqd_hq_scheduler0; u32 cp_hqd_hq_scheduler1; u32 cp_mqd_control; }; struct bonaire_mqd { u32 header; u32 dispatch_initiator; u32 dimensions[3]; u32 start_idx[3]; u32 num_threads[3]; u32 pipeline_stat_enable; u32 perf_counter_enable; u32 pgm[2]; u32 tba[2]; u32 tma[2]; u32 pgm_rsrc[2]; u32 vmid; u32 resource_limits; u32 static_thread_mgmt01[2]; u32 tmp_ring_size; u32 static_thread_mgmt23[2]; u32 restart[3]; u32 thread_trace_enable; u32 reserved1; u32 user_data[16]; u32 vgtcs_invoke_count[2]; struct hqd_registers queue_state; u32 dequeue_cntr; u32 interrupt_queue[64]; }; /** * gfx_v7_0_cp_compute_resume - setup the compute queue registers * * @adev: amdgpu_device pointer * * Program the compute queues and test them to make sure they * are working. * Returns 0 for success, error for failure. */ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) { int r, i, j; u32 tmp; bool use_doorbell = true; u64 hqd_gpu_addr; u64 mqd_gpu_addr; u64 eop_gpu_addr; u64 wb_gpu_addr; u32 *buf; struct bonaire_mqd *mqd; r = gfx_v7_0_cp_compute_start(adev); if (r) return r; /* fix up chicken bits */ tmp = RREG32(mmCP_CPF_DEBUG); tmp |= (1 << 23); WREG32(mmCP_CPF_DEBUG, tmp); /* init the pipes */ mutex_lock(&adev->srbm_mutex); for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) { int me = (i < 4) ? 1 : 2; int pipe = (i < 4) ? i : (i - 4); eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2); cik_srbm_select(adev, me, pipe, 0, 0); /* write the EOP addr */ WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8); WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8); /* set the VMID assigned */ WREG32(mmCP_HPD_EOP_VMID, 0); /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ tmp = RREG32(mmCP_HPD_EOP_CONTROL); tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK; tmp |= order_base_2(MEC_HPD_SIZE / 8); WREG32(mmCP_HPD_EOP_CONTROL, tmp); } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); /* init the queues. Just two for now. */ for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; if (ring->mqd_obj == NULL) { r = amdgpu_bo_create(adev, sizeof(struct bonaire_mqd), PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &ring->mqd_obj); if (r) { dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); return r; } } r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) { gfx_v7_0_cp_compute_fini(adev); return r; } r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT, &mqd_gpu_addr); if (r) { dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r); gfx_v7_0_cp_compute_fini(adev); return r; } r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf); if (r) { dev_warn(adev->dev, "(%d) map MQD bo failed\n", r); gfx_v7_0_cp_compute_fini(adev); return r; } /* init the mqd struct */ memset(buf, 0, sizeof(struct bonaire_mqd)); mqd = (struct bonaire_mqd *)buf; mqd->header = 0xC0310800; mqd->static_thread_mgmt01[0] = 0xffffffff; mqd->static_thread_mgmt01[1] = 0xffffffff; mqd->static_thread_mgmt23[0] = 0xffffffff; mqd->static_thread_mgmt23[1] = 0xffffffff; mutex_lock(&adev->srbm_mutex); cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); /* disable wptr polling */ tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL); tmp &= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK; WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp); /* enable doorbell? */ mqd->queue_state.cp_hqd_pq_doorbell_control = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); if (use_doorbell) mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; else mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->queue_state.cp_hqd_pq_doorbell_control); /* disable the queue if it's active */ mqd->queue_state.cp_hqd_dequeue_request = 0; mqd->queue_state.cp_hqd_pq_rptr = 0; mqd->queue_state.cp_hqd_pq_wptr= 0; if (RREG32(mmCP_HQD_ACTIVE) & 1) { WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); for (j = 0; j < adev->usec_timeout; j++) { if (!(RREG32(mmCP_HQD_ACTIVE) & 1)) break; udelay(1); } WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request); WREG32(mmCP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr); WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr); } /* set the pointer to the MQD */ mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc; mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr); WREG32(mmCP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr); WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi); /* set MQD vmid to 0 */ mqd->queue_state.cp_mqd_control = RREG32(mmCP_MQD_CONTROL); mqd->queue_state.cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK; WREG32(mmCP_MQD_CONTROL, mqd->queue_state.cp_mqd_control); /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ hqd_gpu_addr = ring->gpu_addr >> 8; mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr; mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); WREG32(mmCP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base); WREG32(mmCP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi); /* set up the HQD, this is similar to CP_RB0_CNTL */ mqd->queue_state.cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL); mqd->queue_state.cp_hqd_pq_control &= ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK | CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK); mqd->queue_state.cp_hqd_pq_control |= order_base_2(ring->ring_size / 8); mqd->queue_state.cp_hqd_pq_control |= (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8); #ifdef __BIG_ENDIAN mqd->queue_state.cp_hqd_pq_control |= 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT; #endif mqd->queue_state.cp_hqd_pq_control &= ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK | CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK | CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK); mqd->queue_state.cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__PRIV_STATE_MASK | CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */ WREG32(mmCP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control); /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc; mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr); WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi); /* set the wb address wether it's enabled or not */ wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc; mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, mqd->queue_state.cp_hqd_pq_rptr_report_addr); WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi); /* enable the doorbell if requested */ if (use_doorbell) { mqd->queue_state.cp_hqd_pq_doorbell_control = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK; mqd->queue_state.cp_hqd_pq_doorbell_control |= (ring->doorbell_index << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT); mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; mqd->queue_state.cp_hqd_pq_doorbell_control &= ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK | CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK); } else { mqd->queue_state.cp_hqd_pq_doorbell_control = 0; } WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->queue_state.cp_hqd_pq_doorbell_control); /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */ ring->wptr = 0; mqd->queue_state.cp_hqd_pq_wptr = ring->wptr; WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr); mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR); /* set the vmid for the queue */ mqd->queue_state.cp_hqd_vmid = 0; WREG32(mmCP_HQD_VMID, mqd->queue_state.cp_hqd_vmid); /* activate the queue */ mqd->queue_state.cp_hqd_active = 1; WREG32(mmCP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); amdgpu_bo_kunmap(ring->mqd_obj); amdgpu_bo_unreserve(ring->mqd_obj); ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) ring->ready = false; } return 0; } static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable) { gfx_v7_0_cp_gfx_enable(adev, enable); gfx_v7_0_cp_compute_enable(adev, enable); } static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev) { int r; r = gfx_v7_0_cp_gfx_load_microcode(adev); if (r) return r; r = gfx_v7_0_cp_compute_load_microcode(adev); if (r) return r; return 0; } static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32(mmCP_INT_CNTL_RING0); if (enable) tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK | CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK); else tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK | CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK); WREG32(mmCP_INT_CNTL_RING0, tmp); } static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) { int r; gfx_v7_0_enable_gui_idle_interrupt(adev, false); r = gfx_v7_0_cp_load_microcode(adev); if (r) return r; r = gfx_v7_0_cp_gfx_resume(adev); if (r) return r; r = gfx_v7_0_cp_compute_resume(adev); if (r) return r; gfx_v7_0_enable_gui_idle_interrupt(adev, true); return 0; } static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; /* instruct DE to set a magic number */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(5))); amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); amdgpu_ring_write(ring, 1); /* let CE wait till condition satisfied */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ WAIT_REG_MEM_FUNCTION(3) | /* == */ WAIT_REG_MEM_ENGINE(2))); /* ce */ amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); amdgpu_ring_write(ring, 1); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, 4); /* poll interval */ /* instruct CE to reset wb of ce_sync to zero */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); amdgpu_ring_write(ring, 0); } /* * vm * VMID 0 is the physical GPU addresses as used by the kernel. * VMIDs 1-15 are used for userspace clients and are handled * by the amdgpu vm/hsa code. */ /** * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP * * @adev: amdgpu_device pointer * * Update the page table base and flush the VM TLB * using the CP (CIK). */ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0))); if (vm_id < 8) { amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); } else { amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); } amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, pd_addr >> 12); /* bits 0-15 are the VM contexts0-15 */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 1 << vm_id); /* wait for the invalidate to complete */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ WAIT_REG_MEM_FUNCTION(0) | /* always */ WAIT_REG_MEM_ENGINE(0))); /* me */ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0); /* ref */ amdgpu_ring_write(ring, 0); /* mask */ amdgpu_ring_write(ring, 0x20); /* poll interval */ /* compute doesn't have PFP */ if (usepfp) { /* sync PFP to ME, otherwise we might get invalid PFP reads */ amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); amdgpu_ring_write(ring, 0x0); /* synce CE with ME to prevent CE fetch CEIB before context switch done */ gfx_v7_0_ce_sync_me(ring); } } /* * RLC * The RLC is a multi-purpose microengine that handles a * variety of functions. */ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) { int r; /* save restore block */ if (adev->gfx.rlc.save_restore_obj) { r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); if (unlikely(r != 0)) dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); adev->gfx.rlc.save_restore_obj = NULL; } /* clear state block */ if (adev->gfx.rlc.clear_state_obj) { r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); if (unlikely(r != 0)) dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); adev->gfx.rlc.clear_state_obj = NULL; } /* clear state block */ if (adev->gfx.rlc.cp_table_obj) { r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); if (unlikely(r != 0)) dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); adev->gfx.rlc.cp_table_obj = NULL; } } static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) { const u32 *src_ptr; volatile u32 *dst_ptr; u32 dws, i; const struct cs_section_def *cs_data; int r; /* allocate rlc buffers */ if (adev->flags & AMDGPU_IS_APU) { if (adev->asic_type == CHIP_KAVERI) { adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list; adev->gfx.rlc.reg_list_size = (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list); } else { adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list; adev->gfx.rlc.reg_list_size = (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list); } } adev->gfx.rlc.cs_data = ci_cs_data; adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; src_ptr = adev->gfx.rlc.reg_list; dws = adev->gfx.rlc.reg_list_size; dws += (5 * 16) + 48 + 48 + 64; cs_data = adev->gfx.rlc.cs_data; if (src_ptr) { /* save restore block */ if (adev->gfx.rlc.save_restore_obj == NULL) { r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.save_restore_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); return r; } } r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); if (unlikely(r != 0)) { gfx_v7_0_rlc_fini(adev); return r; } r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.save_restore_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); if (r) { dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } /* write the sr buffer */ dst_ptr = adev->gfx.rlc.sr_ptr; for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) dst_ptr[i] = cpu_to_le32(src_ptr[i]); amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); } if (cs_data) { /* clear state block */ adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); if (adev->gfx.rlc.clear_state_obj == NULL) { r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.clear_state_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } } r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); if (unlikely(r != 0)) { gfx_v7_0_rlc_fini(adev); return r; } r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.clear_state_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); if (r) { dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; gfx_v7_0_get_csb_buffer(adev, dst_ptr); amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); } if (adev->gfx.rlc.cp_table_size) { if (adev->gfx.rlc.cp_table_obj == NULL) { r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.cp_table_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } } r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); if (unlikely(r != 0)) { dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.cp_table_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); if (r) { dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } gfx_v7_0_init_cp_pg_table(adev); amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); } return 0; } static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable) { u32 tmp; tmp = RREG32(mmRLC_LB_CNTL); if (enable) tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK; else tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK; WREG32(mmRLC_LB_CNTL, tmp); } static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) { u32 i, j, k; u32 mask; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v7_0_select_se_sh(adev, i, j); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; udelay(1); } } } gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; for (k = 0; k < adev->usec_timeout; k++) { if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) break; udelay(1); } } static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc) { u32 tmp; tmp = RREG32(mmRLC_CNTL); if (tmp != rlc) WREG32(mmRLC_CNTL, rlc); } static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) { u32 data, orig; orig = data = RREG32(mmRLC_CNTL); if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) { u32 i; data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK; WREG32(mmRLC_CNTL, data); for (i = 0; i < adev->usec_timeout; i++) { if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0) break; udelay(1); } gfx_v7_0_wait_for_rlc_serdes(adev); } return orig; } void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) { u32 tmp, i, mask; tmp = 0x1 | (1 << 1); WREG32(mmRLC_GPR_REG2, tmp); mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK | RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK; for (i = 0; i < adev->usec_timeout; i++) { if ((RREG32(mmRLC_GPM_STAT) & mask) == mask) break; udelay(1); } for (i = 0; i < adev->usec_timeout; i++) { if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0) break; udelay(1); } } void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) { u32 tmp; tmp = 0x1 | (0 << 1); WREG32(mmRLC_GPR_REG2, tmp); } /** * gfx_v7_0_rlc_stop - stop the RLC ME * * @adev: amdgpu_device pointer * * Halt the RLC ME (MicroEngine) (CIK). */ void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) { WREG32(mmRLC_CNTL, 0); gfx_v7_0_enable_gui_idle_interrupt(adev, false); gfx_v7_0_wait_for_rlc_serdes(adev); } /** * gfx_v7_0_rlc_start - start the RLC ME * * @adev: amdgpu_device pointer * * Unhalt the RLC ME (MicroEngine) (CIK). */ static void gfx_v7_0_rlc_start(struct amdgpu_device *adev) { WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK); gfx_v7_0_enable_gui_idle_interrupt(adev, true); udelay(50); } static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev) { u32 tmp = RREG32(mmGRBM_SOFT_RESET); tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; WREG32(mmGRBM_SOFT_RESET, tmp); udelay(50); tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; WREG32(mmGRBM_SOFT_RESET, tmp); udelay(50); } /** * gfx_v7_0_rlc_resume - setup the RLC hw * * @adev: amdgpu_device pointer * * Initialize the RLC registers, load the ucode, * and start the RLC (CIK). * Returns 0 for success, -EINVAL if the ucode is not available. */ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) { const struct rlc_firmware_header_v1_0 *hdr; const __le32 *fw_data; unsigned i, fw_size; u32 tmp; if (!adev->gfx.rlc_fw) return -EINVAL; hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; amdgpu_ucode_print_rlc_hdr(&hdr->header); adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); adev->gfx.rlc_feature_version = le32_to_cpu( hdr->ucode_feature_version); gfx_v7_0_rlc_stop(adev); /* disable CG */ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc; WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); gfx_v7_0_rlc_reset(adev); gfx_v7_0_init_pg(adev); WREG32(mmRLC_LB_CNTR_INIT, 0); WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); mutex_lock(&adev->grbm_idx_mutex); gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); WREG32(mmRLC_LB_PARAMS, 0x00600408); WREG32(mmRLC_LB_CNTL, 0x80000004); mutex_unlock(&adev->grbm_idx_mutex); WREG32(mmRLC_MC_CNTL, 0); WREG32(mmRLC_UCODE_CNTL, 0); fw_data = (const __le32 *) (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; WREG32(mmRLC_GPM_UCODE_ADDR, 0); for (i = 0; i < fw_size; i++) WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); /* XXX - find out what chips support lbpw */ gfx_v7_0_enable_lbpw(adev, false); if (adev->asic_type == CHIP_BONAIRE) WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0); gfx_v7_0_rlc_start(adev); return 0; } static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) { u32 data, orig, tmp, tmp2; orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) { gfx_v7_0_enable_gui_idle_interrupt(adev, true); tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK | RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK; WREG32(mmRLC_SERDES_WR_CTRL, tmp2); mutex_unlock(&adev->grbm_idx_mutex); gfx_v7_0_update_rlc(adev, tmp); data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; } else { gfx_v7_0_enable_gui_idle_interrupt(adev, false); RREG32(mmCB_CGTT_SCLK_CTRL); RREG32(mmCB_CGTT_SCLK_CTRL); RREG32(mmCB_CGTT_SCLK_CTRL); RREG32(mmCB_CGTT_SCLK_CTRL); data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); } if (orig != data) WREG32(mmRLC_CGCG_CGLS_CTRL, data); } static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) { u32 data, orig, tmp = 0; if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) { if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) { if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) { orig = data = RREG32(mmCP_MEM_SLP_CNTL); data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; if (orig != data) WREG32(mmCP_MEM_SLP_CNTL, data); } } orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); data |= 0x00000001; data &= 0xfffffffd; if (orig != data) WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK; WREG32(mmRLC_SERDES_WR_CTRL, data); mutex_unlock(&adev->grbm_idx_mutex); gfx_v7_0_update_rlc(adev, tmp); if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) { orig = data = RREG32(mmCGTS_SM_CTRL_REG); data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS)) data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT); if (orig != data) WREG32(mmCGTS_SM_CTRL_REG, data); } } else { orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); data |= 0x00000003; if (orig != data) WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); data = RREG32(mmRLC_MEM_SLP_CNTL); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; WREG32(mmRLC_MEM_SLP_CNTL, data); } data = RREG32(mmCP_MEM_SLP_CNTL); if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; WREG32(mmCP_MEM_SLP_CNTL, data); } orig = data = RREG32(mmCGTS_SM_CTRL_REG); data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; if (orig != data) WREG32(mmCGTS_SM_CTRL_REG, data); tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; WREG32(mmRLC_SERDES_WR_CTRL, data); mutex_unlock(&adev->grbm_idx_mutex); gfx_v7_0_update_rlc(adev, tmp); } } static void gfx_v7_0_update_cg(struct amdgpu_device *adev, bool enable) { gfx_v7_0_enable_gui_idle_interrupt(adev, false); /* order matters! */ if (enable) { gfx_v7_0_enable_mgcg(adev, true); gfx_v7_0_enable_cgcg(adev, true); } else { gfx_v7_0_enable_cgcg(adev, false); gfx_v7_0_enable_mgcg(adev, false); } gfx_v7_0_enable_gui_idle_interrupt(adev, true); } static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev, bool enable) { u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; else data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; if (orig != data) WREG32(mmRLC_PG_CNTL, data); } static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev, bool enable) { u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; else data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; if (orig != data) WREG32(mmRLC_PG_CNTL, data); } static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable) { u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP)) data &= ~0x8000; else data |= 0x8000; if (orig != data) WREG32(mmRLC_PG_CNTL, data); } static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) { u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS)) data &= ~0x2000; else data |= 0x2000; if (orig != data) WREG32(mmRLC_PG_CNTL, data); } static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev) { const __le32 *fw_data; volatile u32 *dst_ptr; int me, i, max_me = 4; u32 bo_offset = 0; u32 table_offset, table_size; if (adev->asic_type == CHIP_KAVERI) max_me = 5; if (adev->gfx.rlc.cp_table_ptr == NULL) return; /* write the cp table buffer */ dst_ptr = adev->gfx.rlc.cp_table_ptr; for (me = 0; me < max_me; me++) { if (me == 0) { const struct gfx_firmware_header_v1_0 *hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; fw_data = (const __le32 *) (adev->gfx.ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); table_offset = le32_to_cpu(hdr->jt_offset); table_size = le32_to_cpu(hdr->jt_size); } else if (me == 1) { const struct gfx_firmware_header_v1_0 *hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; fw_data = (const __le32 *) (adev->gfx.pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); table_offset = le32_to_cpu(hdr->jt_offset); table_size = le32_to_cpu(hdr->jt_size); } else if (me == 2) { const struct gfx_firmware_header_v1_0 *hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; fw_data = (const __le32 *) (adev->gfx.me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); table_offset = le32_to_cpu(hdr->jt_offset); table_size = le32_to_cpu(hdr->jt_size); } else if (me == 3) { const struct gfx_firmware_header_v1_0 *hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; fw_data = (const __le32 *) (adev->gfx.mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); table_offset = le32_to_cpu(hdr->jt_offset); table_size = le32_to_cpu(hdr->jt_size); } else { const struct gfx_firmware_header_v1_0 *hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; fw_data = (const __le32 *) (adev->gfx.mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); table_offset = le32_to_cpu(hdr->jt_offset); table_size = le32_to_cpu(hdr->jt_size); } for (i = 0; i < table_size; i ++) { dst_ptr[bo_offset + i] = cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); } bo_offset += table_size; } } static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, bool enable) { u32 data, orig; if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) { orig = data = RREG32(mmRLC_PG_CNTL); data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; if (orig != data) WREG32(mmRLC_PG_CNTL, data); orig = data = RREG32(mmRLC_AUTO_PG_CTRL); data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK; if (orig != data) WREG32(mmRLC_AUTO_PG_CTRL, data); } else { orig = data = RREG32(mmRLC_PG_CNTL); data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; if (orig != data) WREG32(mmRLC_PG_CNTL, data); orig = data = RREG32(mmRLC_AUTO_PG_CTRL); data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK; if (orig != data) WREG32(mmRLC_AUTO_PG_CTRL, data); data = RREG32(mmDB_RENDER_CONTROL); } } static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev, u32 se, u32 sh) { u32 mask = 0, tmp, tmp1; int i; gfx_v7_0_select_se_sh(adev, se, sh); tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); tmp &= 0xffff0000; tmp |= tmp1; tmp >>= 16; for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { mask <<= 1; mask |= 1; } return (~tmp) & mask; } static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev) { uint32_t tmp, active_cu_number; struct amdgpu_cu_info cu_info; gfx_v7_0_get_cu_info(adev, &cu_info); tmp = cu_info.ao_cu_mask; active_cu_number = cu_info.number; WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, tmp); tmp = RREG32(mmRLC_MAX_PG_CU); tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK; tmp |= (active_cu_number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); WREG32(mmRLC_MAX_PG_CU, tmp); } static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, bool enable) { u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG)) data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; else data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; if (orig != data) WREG32(mmRLC_PG_CNTL, data); } static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev, bool enable) { u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG)) data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; else data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; if (orig != data) WREG32(mmRLC_PG_CNTL, data); } #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev) { u32 data, orig; u32 i; if (adev->gfx.rlc.cs_data) { WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr)); WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr)); WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size); } else { WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); for (i = 0; i < 3; i++) WREG32(mmRLC_GPM_SCRATCH_DATA, 0); } if (adev->gfx.rlc.reg_list) { WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET); for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]); } orig = data = RREG32(mmRLC_PG_CNTL); data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK; if (orig != data) WREG32(mmRLC_PG_CNTL, data); WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8); WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); data = RREG32(mmCP_RB_WPTR_POLL_CNTL); data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); WREG32(mmCP_RB_WPTR_POLL_CNTL, data); data = 0x10101010; WREG32(mmRLC_PG_DELAY, data); data = RREG32(mmRLC_PG_DELAY_2); data &= ~0xff; data |= 0x3; WREG32(mmRLC_PG_DELAY_2, data); data = RREG32(mmRLC_AUTO_PG_CTRL); data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); WREG32(mmRLC_AUTO_PG_CTRL, data); } static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable) { gfx_v7_0_enable_gfx_cgpg(adev, enable); gfx_v7_0_enable_gfx_static_mgpg(adev, enable); gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable); } static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev) { u32 count = 0; const struct cs_section_def *sect = NULL; const struct cs_extent_def *ext = NULL; if (adev->gfx.rlc.cs_data == NULL) return 0; /* begin clear state */ count += 2; /* context control state */ count += 3; for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { for (ext = sect->section; ext->extent != NULL; ++ext) { if (sect->id == SECT_CONTEXT) count += 2 + ext->reg_count; else return 0; } } /* pa_sc_raster_config/pa_sc_raster_config1 */ count += 4; /* end clear state */ count += 2; /* clear state */ count += 2; return count; } static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer) { u32 count = 0, i; const struct cs_section_def *sect = NULL; const struct cs_extent_def *ext = NULL; if (adev->gfx.rlc.cs_data == NULL) return; if (buffer == NULL) return; buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); buffer[count++] = cpu_to_le32(0x80000000); buffer[count++] = cpu_to_le32(0x80000000); for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { for (ext = sect->section; ext->extent != NULL; ++ext) { if (sect->id == SECT_CONTEXT) { buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START); for (i = 0; i < ext->reg_count; i++) buffer[count++] = cpu_to_le32(ext->extent[i]); } else { return; } } } buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); switch (adev->asic_type) { case CHIP_BONAIRE: buffer[count++] = cpu_to_le32(0x16000012); buffer[count++] = cpu_to_le32(0x00000000); break; case CHIP_KAVERI: buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ buffer[count++] = cpu_to_le32(0x00000000); break; case CHIP_KABINI: case CHIP_MULLINS: buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ buffer[count++] = cpu_to_le32(0x00000000); break; case CHIP_HAWAII: buffer[count++] = cpu_to_le32(0x3a00161a); buffer[count++] = cpu_to_le32(0x0000002e); break; default: buffer[count++] = cpu_to_le32(0x00000000); buffer[count++] = cpu_to_le32(0x00000000); break; } buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); buffer[count++] = cpu_to_le32(0); } static void gfx_v7_0_init_pg(struct amdgpu_device *adev) { if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | AMDGPU_PG_SUPPORT_GFX_SMG | AMDGPU_PG_SUPPORT_GFX_DMG | AMDGPU_PG_SUPPORT_CP | AMDGPU_PG_SUPPORT_GDS | AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { gfx_v7_0_init_gfx_cgpg(adev); gfx_v7_0_enable_cp_pg(adev, true); gfx_v7_0_enable_gds_pg(adev, true); } gfx_v7_0_init_ao_cu_mask(adev); gfx_v7_0_update_gfx_pg(adev, true); } } static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) { if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | AMDGPU_PG_SUPPORT_GFX_SMG | AMDGPU_PG_SUPPORT_GFX_DMG | AMDGPU_PG_SUPPORT_CP | AMDGPU_PG_SUPPORT_GDS | AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { gfx_v7_0_update_gfx_pg(adev, false); if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { gfx_v7_0_enable_cp_pg(adev, false); gfx_v7_0_enable_gds_pg(adev, false); } } } /** * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot * * @adev: amdgpu_device pointer * * Fetches a GPU clock counter snapshot (SI). * Returns the 64 bit clock counter snapshot. */ uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; mutex_lock(&adev->gfx.gpu_clock_mutex); WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) | ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); mutex_unlock(&adev->gfx.gpu_clock_mutex); return clock; } static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, uint32_t vmid, uint32_t gds_base, uint32_t gds_size, uint32_t gws_base, uint32_t gws_size, uint32_t oa_base, uint32_t oa_size) { gds_base = gds_base >> AMDGPU_GDS_SHIFT; gds_size = gds_size >> AMDGPU_GDS_SHIFT; gws_base = gws_base >> AMDGPU_GWS_SHIFT; gws_size = gws_size >> AMDGPU_GWS_SHIFT; oa_base = oa_base >> AMDGPU_OA_SHIFT; oa_size = oa_size >> AMDGPU_OA_SHIFT; /* GDS Base */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, gds_base); /* GDS Size */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, gds_size); /* GWS */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); /* OA */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } static int gfx_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS; gfx_v7_0_set_ring_funcs(adev); gfx_v7_0_set_irq_funcs(adev); gfx_v7_0_set_gds_init(adev); return 0; } static int gfx_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i, r; /* EOP Event */ r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq); if (r) return r; /* Privileged reg */ r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq); if (r) return r; /* Privileged inst */ r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq); if (r) return r; gfx_v7_0_scratch_init(adev); r = gfx_v7_0_init_microcode(adev); if (r) { DRM_ERROR("Failed to load gfx firmware!\n"); return r; } r = gfx_v7_0_rlc_init(adev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; } /* allocate mec buffers */ r = gfx_v7_0_mec_init(adev); if (r) { DRM_ERROR("Failed to init MEC BOs!\n"); return r; } r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); if (r) { DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); return r; } for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; ring->ring_obj = NULL; sprintf(ring->name, "gfx"); r = amdgpu_ring_init(adev, ring, 1024 * 1024, PACKET3(PACKET3_NOP, 0x3FFF), 0xf, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, AMDGPU_RING_TYPE_GFX); if (r) return r; } /* set up the compute queues */ for (i = 0; i < adev->gfx.num_compute_rings; i++) { unsigned irq_type; /* max 32 queues per MEC */ if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) { DRM_ERROR("Too many (%d) compute rings!\n", i); break; } ring = &adev->gfx.compute_ring[i]; ring->ring_obj = NULL; ring->use_doorbell = true; ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i; ring->me = 1; /* first MEC */ ring->pipe = i / 8; ring->queue = i % 8; sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024 * 1024, PACKET3(PACKET3_NOP, 0x3FFF), 0xf, &adev->gfx.eop_irq, irq_type, AMDGPU_RING_TYPE_COMPUTE); if (r) return r; } /* reserve GDS, GWS and OA resource for gfx */ r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GDS, 0, NULL, &adev->gds.gds_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GWS, 0, NULL, &adev->gds.gws_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_OA, 0, NULL, &adev->gds.oa_gfx_bo); if (r) return r; return r; } static int gfx_v7_0_sw_fini(void *handle) { int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_bo_unref(&adev->gds.oa_gfx_bo); amdgpu_bo_unref(&adev->gds.gws_gfx_bo); amdgpu_bo_unref(&adev->gds.gds_gfx_bo); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); gfx_v7_0_cp_compute_fini(adev); gfx_v7_0_rlc_fini(adev); gfx_v7_0_mec_fini(adev); return 0; } static int gfx_v7_0_hw_init(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfx_v7_0_gpu_init(adev); /* init rlc */ r = gfx_v7_0_rlc_resume(adev); if (r) return r; r = gfx_v7_0_cp_resume(adev); if (r) return r; adev->gfx.ce_ram_size = 0x8000; return r; } static int gfx_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfx_v7_0_cp_enable(adev, false); gfx_v7_0_rlc_stop(adev); gfx_v7_0_fini_pg(adev); return 0; } static int gfx_v7_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return gfx_v7_0_hw_fini(adev); } static int gfx_v7_0_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return gfx_v7_0_hw_init(adev); } static bool gfx_v7_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK) return false; else return true; } static int gfx_v7_0_wait_for_idle(void *handle) { unsigned i; u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK; if (!tmp) return 0; udelay(1); } return -ETIMEDOUT; } static void gfx_v7_0_print_status(void *handle) { int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "GFX 7.x registers\n"); dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", RREG32(mmGRBM_STATUS)); dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", RREG32(mmGRBM_STATUS2)); dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", RREG32(mmGRBM_STATUS_SE0)); dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", RREG32(mmGRBM_STATUS_SE1)); dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", RREG32(mmGRBM_STATUS_SE2)); dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", RREG32(mmGRBM_STATUS_SE3)); dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", RREG32(mmCP_STALLED_STAT1)); dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", RREG32(mmCP_STALLED_STAT2)); dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", RREG32(mmCP_STALLED_STAT3)); dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPF_BUSY_STAT)); dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", RREG32(mmCP_CPF_STALLED_STAT1)); dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", RREG32(mmCP_CPC_STALLED_STAT1)); dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); for (i = 0; i < 32; i++) { dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n", i, RREG32(mmGB_TILE_MODE0 + (i * 4))); } for (i = 0; i < 16; i++) { dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n", i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4))); } for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { dev_info(adev->dev, " se: %d\n", i); gfx_v7_0_select_se_sh(adev, i, 0xffffffff); dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n", RREG32(mmPA_SC_RASTER_CONFIG)); dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n", RREG32(mmPA_SC_RASTER_CONFIG_1)); } gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n", RREG32(mmGB_ADDR_CONFIG)); dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n", RREG32(mmHDP_ADDR_CONFIG)); dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", RREG32(mmDMIF_ADDR_CALC)); dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n", RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET)); dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n", RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET)); dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", RREG32(mmUVD_UDEC_ADDR_CONFIG)); dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", RREG32(mmCP_MEQ_THRESHOLDS)); dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n", RREG32(mmSX_DEBUG_1)); dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n", RREG32(mmTA_CNTL_AUX)); dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n", RREG32(mmSPI_CONFIG_CNTL)); dev_info(adev->dev, " SQ_CONFIG=0x%08X\n", RREG32(mmSQ_CONFIG)); dev_info(adev->dev, " DB_DEBUG=0x%08X\n", RREG32(mmDB_DEBUG)); dev_info(adev->dev, " DB_DEBUG2=0x%08X\n", RREG32(mmDB_DEBUG2)); dev_info(adev->dev, " DB_DEBUG3=0x%08X\n", RREG32(mmDB_DEBUG3)); dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n", RREG32(mmCB_HW_CONTROL)); dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n", RREG32(mmSPI_CONFIG_CNTL_1)); dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n", RREG32(mmPA_SC_FIFO_SIZE)); dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n", RREG32(mmVGT_NUM_INSTANCES)); dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n", RREG32(mmCP_PERFMON_CNTL)); dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n", RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS)); dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n", RREG32(mmVGT_CACHE_INVALIDATION)); dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n", RREG32(mmVGT_GS_VERTEX_REUSE)); dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n", RREG32(mmPA_SC_LINE_STIPPLE_STATE)); dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n", RREG32(mmPA_CL_ENHANCE)); dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n", RREG32(mmPA_SC_ENHANCE)); dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n", RREG32(mmCP_ME_CNTL)); dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n", RREG32(mmCP_MAX_CONTEXT)); dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n", RREG32(mmCP_ENDIAN_SWAP)); dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n", RREG32(mmCP_DEVICE_ID)); dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n", RREG32(mmCP_SEM_WAIT_TIMER)); if (adev->asic_type != CHIP_HAWAII) dev_info(adev->dev, " CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n", RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL)); dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n", RREG32(mmCP_RB_WPTR_DELAY)); dev_info(adev->dev, " CP_RB_VMID=0x%08X\n", RREG32(mmCP_RB_VMID)); dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", RREG32(mmCP_RB0_CNTL)); dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n", RREG32(mmCP_RB0_WPTR)); dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n", RREG32(mmCP_RB0_RPTR_ADDR)); dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n", RREG32(mmCP_RB0_RPTR_ADDR_HI)); dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", RREG32(mmCP_RB0_CNTL)); dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n", RREG32(mmCP_RB0_BASE)); dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n", RREG32(mmCP_RB0_BASE_HI)); dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n", RREG32(mmCP_MEC_CNTL)); dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n", RREG32(mmCP_CPF_DEBUG)); dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n", RREG32(mmSCRATCH_ADDR)); dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n", RREG32(mmSCRATCH_UMSK)); /* init the pipes */ mutex_lock(&adev->srbm_mutex); for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) { int me = (i < 4) ? 1 : 2; int pipe = (i < 4) ? i : (i - 4); int queue; dev_info(adev->dev, " me: %d, pipe: %d\n", me, pipe); cik_srbm_select(adev, me, pipe, 0, 0); dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR=0x%08X\n", RREG32(mmCP_HPD_EOP_BASE_ADDR)); dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n", RREG32(mmCP_HPD_EOP_BASE_ADDR_HI)); dev_info(adev->dev, " CP_HPD_EOP_VMID=0x%08X\n", RREG32(mmCP_HPD_EOP_VMID)); dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", RREG32(mmCP_HPD_EOP_CONTROL)); for (queue = 0; queue < 8; queue++) { cik_srbm_select(adev, me, pipe, queue, 0); dev_info(adev->dev, " queue: %d\n", queue); dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", RREG32(mmCP_PQ_WPTR_POLL_CNTL)); dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n", RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL)); dev_info(adev->dev, " CP_HQD_ACTIVE=0x%08X\n", RREG32(mmCP_HQD_ACTIVE)); dev_info(adev->dev, " CP_HQD_DEQUEUE_REQUEST=0x%08X\n", RREG32(mmCP_HQD_DEQUEUE_REQUEST)); dev_info(adev->dev, " CP_HQD_PQ_RPTR=0x%08X\n", RREG32(mmCP_HQD_PQ_RPTR)); dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n", RREG32(mmCP_HQD_PQ_WPTR)); dev_info(adev->dev, " CP_HQD_PQ_BASE=0x%08X\n", RREG32(mmCP_HQD_PQ_BASE)); dev_info(adev->dev, " CP_HQD_PQ_BASE_HI=0x%08X\n", RREG32(mmCP_HQD_PQ_BASE_HI)); dev_info(adev->dev, " CP_HQD_PQ_CONTROL=0x%08X\n", RREG32(mmCP_HQD_PQ_CONTROL)); dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n", RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR)); dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n", RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI)); dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n", RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR)); dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n", RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI)); dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n", RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL)); dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n", RREG32(mmCP_HQD_PQ_WPTR)); dev_info(adev->dev, " CP_HQD_VMID=0x%08X\n", RREG32(mmCP_HQD_VMID)); dev_info(adev->dev, " CP_MQD_BASE_ADDR=0x%08X\n", RREG32(mmCP_MQD_BASE_ADDR)); dev_info(adev->dev, " CP_MQD_BASE_ADDR_HI=0x%08X\n", RREG32(mmCP_MQD_BASE_ADDR_HI)); dev_info(adev->dev, " CP_MQD_CONTROL=0x%08X\n", RREG32(mmCP_MQD_CONTROL)); } } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n", RREG32(mmCP_INT_CNTL_RING0)); dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", RREG32(mmRLC_LB_CNTL)); dev_info(adev->dev, " RLC_CNTL=0x%08X\n", RREG32(mmRLC_CNTL)); dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n", RREG32(mmRLC_CGCG_CGLS_CTRL)); dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n", RREG32(mmRLC_LB_CNTR_INIT)); dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n", RREG32(mmRLC_LB_CNTR_MAX)); dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n", RREG32(mmRLC_LB_INIT_CU_MASK)); dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n", RREG32(mmRLC_LB_PARAMS)); dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", RREG32(mmRLC_LB_CNTL)); dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n", RREG32(mmRLC_MC_CNTL)); dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n", RREG32(mmRLC_UCODE_CNTL)); if (adev->asic_type == CHIP_BONAIRE) dev_info(adev->dev, " RLC_DRIVER_CPDMA_STATUS=0x%08X\n", RREG32(mmRLC_DRIVER_CPDMA_STATUS)); mutex_lock(&adev->srbm_mutex); for (i = 0; i < 16; i++) { cik_srbm_select(adev, 0, 0, 0, i); dev_info(adev->dev, " VM %d:\n", i); dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n", RREG32(mmSH_MEM_CONFIG)); dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n", RREG32(mmSH_MEM_APE1_BASE)); dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n", RREG32(mmSH_MEM_APE1_LIMIT)); dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n", RREG32(mmSH_MEM_BASES)); } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } static int gfx_v7_0_soft_reset(void *handle) { u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK | GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK; if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK; srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; } /* GRBM_STATUS2 */ tmp = RREG32(mmGRBM_STATUS2); if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; /* SRBM_STATUS */ tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; if (grbm_soft_reset || srbm_soft_reset) { gfx_v7_0_print_status((void *)adev); /* disable CG/PG */ gfx_v7_0_fini_pg(adev); gfx_v7_0_update_cg(adev, false); /* stop the rlc */ gfx_v7_0_rlc_stop(adev); /* Disable GFX parsing/prefetching */ WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); /* Disable MEC parsing/prefetching */ WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); if (grbm_soft_reset) { tmp = RREG32(mmGRBM_SOFT_RESET); tmp |= grbm_soft_reset; dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); WREG32(mmGRBM_SOFT_RESET, tmp); tmp = RREG32(mmGRBM_SOFT_RESET); udelay(50); tmp &= ~grbm_soft_reset; WREG32(mmGRBM_SOFT_RESET, tmp); tmp = RREG32(mmGRBM_SOFT_RESET); } if (srbm_soft_reset) { tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); WREG32(mmSRBM_SOFT_RESET, tmp); tmp = RREG32(mmSRBM_SOFT_RESET); udelay(50); tmp &= ~srbm_soft_reset; WREG32(mmSRBM_SOFT_RESET, tmp); tmp = RREG32(mmSRBM_SOFT_RESET); } /* Wait a little for things to settle down */ udelay(50); gfx_v7_0_print_status((void *)adev); } return 0; } static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { u32 cp_int_cntl; switch (state) { case AMDGPU_IRQ_STATE_DISABLE: cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); break; default: break; } } static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, int me, int pipe, enum amdgpu_interrupt_state state) { u32 mec_int_cntl, mec_int_cntl_reg; /* * amdgpu controls only pipe 0 of MEC1. That's why this function only * handles the setting of interrupts for this specific pipe. All other * pipes' interrupts are set by amdkfd. */ if (me == 1) { switch (pipe) { case 0: mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; break; default: DRM_DEBUG("invalid pipe %d\n", pipe); return; } } else { DRM_DEBUG("invalid me %d\n", me); return; } switch (state) { case AMDGPU_IRQ_STATE_DISABLE: mec_int_cntl = RREG32(mec_int_cntl_reg); mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; WREG32(mec_int_cntl_reg, mec_int_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: mec_int_cntl = RREG32(mec_int_cntl_reg); mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; WREG32(mec_int_cntl_reg, mec_int_cntl); break; default: break; } } static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) { u32 cp_int_cntl; switch (state) { case AMDGPU_IRQ_STATE_DISABLE: cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK; WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK; WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); break; default: break; } return 0; } static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) { u32 cp_int_cntl; switch (state) { case AMDGPU_IRQ_STATE_DISABLE: cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK; WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK; WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); break; default: break; } return 0; } static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) { switch (type) { case AMDGPU_CP_IRQ_GFX_EOP: gfx_v7_0_set_gfx_eop_interrupt_state(adev, state); break; case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state); break; case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state); break; case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state); break; case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state); break; case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state); break; case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state); break; case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state); break; case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state); break; default: break; } return 0; } static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { u8 me_id, pipe_id; struct amdgpu_ring *ring; int i; DRM_DEBUG("IH: CP EOP\n"); me_id = (entry->ring_id & 0x0c) >> 2; pipe_id = (entry->ring_id & 0x03) >> 0; switch (me_id) { case 0: amdgpu_fence_process(&adev->gfx.gfx_ring[0]); break; case 1: case 2: for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; if ((ring->me == me_id) & (ring->pipe == pipe_id)) amdgpu_fence_process(ring); } break; } return 0; } static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal register access in command stream\n"); schedule_work(&adev->reset_work); return 0; } static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal instruction in command stream\n"); // XXX soft reset the gfx block only schedule_work(&adev->reset_work); return 0; } static int gfx_v7_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { bool gate = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (state == AMD_CG_STATE_GATE) gate = true; gfx_v7_0_enable_gui_idle_interrupt(adev, false); /* order matters! */ if (gate) { gfx_v7_0_enable_mgcg(adev, true); gfx_v7_0_enable_cgcg(adev, true); } else { gfx_v7_0_enable_cgcg(adev, false); gfx_v7_0_enable_mgcg(adev, false); } gfx_v7_0_enable_gui_idle_interrupt(adev, true); return 0; } static int gfx_v7_0_set_powergating_state(void *handle, enum amd_powergating_state state) { bool gate = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (state == AMD_PG_STATE_GATE) gate = true; if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | AMDGPU_PG_SUPPORT_GFX_SMG | AMDGPU_PG_SUPPORT_GFX_DMG | AMDGPU_PG_SUPPORT_CP | AMDGPU_PG_SUPPORT_GDS | AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { gfx_v7_0_update_gfx_pg(adev, gate); if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { gfx_v7_0_enable_cp_pg(adev, gate); gfx_v7_0_enable_gds_pg(adev, gate); } } return 0; } const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .early_init = gfx_v7_0_early_init, .late_init = NULL, .sw_init = gfx_v7_0_sw_init, .sw_fini = gfx_v7_0_sw_fini, .hw_init = gfx_v7_0_hw_init, .hw_fini = gfx_v7_0_hw_fini, .suspend = gfx_v7_0_suspend, .resume = gfx_v7_0_resume, .is_idle = gfx_v7_0_is_idle, .wait_for_idle = gfx_v7_0_wait_for_idle, .soft_reset = gfx_v7_0_soft_reset, .print_status = gfx_v7_0_print_status, .set_clockgating_state = gfx_v7_0_set_clockgating_state, .set_powergating_state = gfx_v7_0_set_powergating_state, }; /** * gfx_v7_0_ring_is_lockup - check if the 3D engine is locked up * * @adev: amdgpu_device pointer * @ring: amdgpu_ring structure holding ring information * * Check if the 3D engine is locked up (CIK). * Returns true if the engine is locked, false if not. */ static bool gfx_v7_0_ring_is_lockup(struct amdgpu_ring *ring) { if (gfx_v7_0_is_idle(ring->adev)) { amdgpu_ring_lockup_update(ring); return false; } return amdgpu_ring_test_lockup(ring); } static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .get_rptr = gfx_v7_0_ring_get_rptr_gfx, .get_wptr = gfx_v7_0_ring_get_wptr_gfx, .set_wptr = gfx_v7_0_ring_set_wptr_gfx, .parse_cs = NULL, .emit_ib = gfx_v7_0_ring_emit_ib_gfx, .emit_fence = gfx_v7_0_ring_emit_fence_gfx, .emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, .test_ring = gfx_v7_0_ring_test_ring, .test_ib = gfx_v7_0_ring_test_ib, .is_lockup = gfx_v7_0_ring_is_lockup, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { .get_rptr = gfx_v7_0_ring_get_rptr_compute, .get_wptr = gfx_v7_0_ring_get_wptr_compute, .set_wptr = gfx_v7_0_ring_set_wptr_compute, .parse_cs = NULL, .emit_ib = gfx_v7_0_ring_emit_ib_compute, .emit_fence = gfx_v7_0_ring_emit_fence_compute, .emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, .test_ring = gfx_v7_0_ring_test_ring, .test_ib = gfx_v7_0_ring_test_ib, .is_lockup = gfx_v7_0_ring_is_lockup, }; static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) { int i; for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx; for (i = 0; i < adev->gfx.num_compute_rings; i++) adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute; } static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = { .set = gfx_v7_0_set_eop_interrupt_state, .process = gfx_v7_0_eop_irq, }; static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = { .set = gfx_v7_0_set_priv_reg_fault_state, .process = gfx_v7_0_priv_reg_irq, }; static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = { .set = gfx_v7_0_set_priv_inst_fault_state, .process = gfx_v7_0_priv_inst_irq, }; static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev) { adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs; adev->gfx.priv_reg_irq.num_types = 1; adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs; adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs; } static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev) { /* init asci gds info */ adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE); adev->gds.gws.total_size = 64; adev->gds.oa.total_size = 16; if (adev->gds.mem.total_size == 64 * 1024) { adev->gds.mem.gfx_partition_size = 4096; adev->gds.mem.cs_partition_size = 4096; adev->gds.gws.gfx_partition_size = 4; adev->gds.gws.cs_partition_size = 4; adev->gds.oa.gfx_partition_size = 4; adev->gds.oa.cs_partition_size = 1; } else { adev->gds.mem.gfx_partition_size = 1024; adev->gds.mem.cs_partition_size = 1024; adev->gds.gws.gfx_partition_size = 16; adev->gds.gws.cs_partition_size = 16; adev->gds.oa.gfx_partition_size = 4; adev->gds.oa.cs_partition_size = 4; } } int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info) { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; if (!adev || !cu_info) return -EINVAL; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; bitmap = gfx_v7_0_get_cu_active_bitmap(adev, i, j); cu_info->bitmap[i][j] = bitmap; for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { if (bitmap & mask) { if (counter < 2) ao_bitmap |= mask; counter ++; } mask <<= 1; } active_cu_number += counter; ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; mutex_unlock(&adev->grbm_idx_mutex); return 0; }
gpl-2.0
go2ev-devteam/gopro-linux
drivers/net/can/at91_can.c
110
31845
/* * at91_can.c - CAN network driver for AT91 SoC CAN controller * * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de> * * This software may be distributed under the terms of the GNU General * Public License ("GPL") version 2 as distributed in the 'COPYING' * file from the main directory of the linux kernel source. * * Send feedback to <socketcan-users@lists.berlios.de> * * * Your platform definition file should specify something like: * * static struct at91_can_data ek_can_data = { * transceiver_switch = sam9263ek_transceiver_switch, * }; * * at91_add_device_can(&ek_can_data); * */ #include <linux/clk.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <mach/board.h> #define AT91_NAPI_WEIGHT 11 /* * RX/TX Mailbox split * don't dare to touch */ #define AT91_MB_RX_NUM 11 #define AT91_MB_TX_SHIFT 2 #define AT91_MB_RX_FIRST 1 #define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) #define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) #define AT91_MB_RX_SPLIT 8 #define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) #define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \ ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST)) #define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) #define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) #define AT91_MB_TX_LAST (AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1) #define AT91_NEXT_PRIO_SHIFT (AT91_MB_TX_SHIFT) #define AT91_NEXT_PRIO_MASK (0xf << AT91_MB_TX_SHIFT) #define AT91_NEXT_MB_MASK (AT91_MB_TX_NUM - 1) #define AT91_NEXT_MASK ((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK) /* Common registers */ enum at91_reg { AT91_MR = 0x000, AT91_IER = 0x004, AT91_IDR = 0x008, AT91_IMR = 0x00C, AT91_SR = 0x010, AT91_BR = 0x014, AT91_TIM = 0x018, AT91_TIMESTP = 0x01C, AT91_ECR = 0x020, AT91_TCR = 0x024, AT91_ACR = 0x028, }; /* Mailbox registers (0 <= i <= 15) */ #define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20)) #define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20)) #define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20)) #define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20)) #define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20)) #define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20)) #define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20)) #define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20)) /* Register bits */ #define AT91_MR_CANEN BIT(0) #define AT91_MR_LPM BIT(1) #define AT91_MR_ABM BIT(2) #define AT91_MR_OVL BIT(3) #define AT91_MR_TEOF BIT(4) #define AT91_MR_TTM BIT(5) #define AT91_MR_TIMFRZ BIT(6) #define AT91_MR_DRPT BIT(7) #define AT91_SR_RBSY BIT(29) #define AT91_MMR_PRIO_SHIFT (16) #define AT91_MID_MIDE BIT(29) #define AT91_MSR_MRTR BIT(20) #define AT91_MSR_MABT BIT(22) #define AT91_MSR_MRDY BIT(23) #define AT91_MSR_MMI BIT(24) #define AT91_MCR_MRTR BIT(20) #define AT91_MCR_MTCR BIT(23) /* Mailbox Modes */ enum at91_mb_mode { AT91_MB_MODE_DISABLED = 0, AT91_MB_MODE_RX = 1, AT91_MB_MODE_RX_OVRWR = 2, AT91_MB_MODE_TX = 3, AT91_MB_MODE_CONSUMER = 4, AT91_MB_MODE_PRODUCER = 5, }; /* Interrupt mask bits */ #define AT91_IRQ_MB_RX ((1 << (AT91_MB_RX_LAST + 1)) \ - (1 << AT91_MB_RX_FIRST)) #define AT91_IRQ_MB_TX ((1 << (AT91_MB_TX_LAST + 1)) \ - (1 << AT91_MB_TX_FIRST)) #define AT91_IRQ_MB_ALL (AT91_IRQ_MB_RX | AT91_IRQ_MB_TX) #define AT91_IRQ_ERRA (1 << 16) #define AT91_IRQ_WARN (1 << 17) #define AT91_IRQ_ERRP (1 << 18) #define AT91_IRQ_BOFF (1 << 19) #define AT91_IRQ_SLEEP (1 << 20) #define AT91_IRQ_WAKEUP (1 << 21) #define AT91_IRQ_TOVF (1 << 22) #define AT91_IRQ_TSTP (1 << 23) #define AT91_IRQ_CERR (1 << 24) #define AT91_IRQ_SERR (1 << 25) #define AT91_IRQ_AERR (1 << 26) #define AT91_IRQ_FERR (1 << 27) #define AT91_IRQ_BERR (1 << 28) #define AT91_IRQ_ERR_ALL (0x1fff0000) #define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) #define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ AT91_IRQ_ERRP | AT91_IRQ_BOFF) #define AT91_IRQ_ALL (0x1fffffff) struct at91_priv { struct can_priv can; /* must be the first member! */ struct net_device *dev; struct napi_struct napi; void __iomem *reg_base; u32 reg_sr; unsigned int tx_next; unsigned int tx_echo; unsigned int rx_next; struct clk *clk; struct at91_can_data *pdata; canid_t mb0_id; }; static struct can_bittiming_const at91_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 2, .brp_max = 128, .brp_inc = 1, }; static inline int get_tx_next_mb(const struct at91_priv *priv) { return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST; } static inline int get_tx_next_prio(const struct at91_priv *priv) { return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf; } static inline int get_tx_echo_mb(const struct at91_priv *priv) { return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST; } static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) { return __raw_readl(priv->reg_base + reg); } static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, u32 value) { __raw_writel(value, priv->reg_base + reg); } static inline void set_mb_mode_prio(const struct at91_priv *priv, unsigned int mb, enum at91_mb_mode mode, int prio) { at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16)); } static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, enum at91_mb_mode mode) { set_mb_mode_prio(priv, mb, mode, 0); } static inline u32 at91_can_id_to_reg_mid(canid_t can_id) { u32 reg_mid; if (can_id & CAN_EFF_FLAG) reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE; else reg_mid = (can_id & CAN_SFF_MASK) << 18; return reg_mid; } /* * Swtich transceiver on or off */ static void at91_transceiver_switch(const struct at91_priv *priv, int on) { if (priv->pdata && priv->pdata->transceiver_switch) priv->pdata->transceiver_switch(on); } static void at91_setup_mailboxes(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); unsigned int i; u32 reg_mid; /* * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first * mailbox is disabled. The next 11 mailboxes are used as a * reception FIFO. The last mailbox is configured with * overwrite option. The overwrite flag indicates a FIFO * overflow. */ reg_mid = at91_can_id_to_reg_mid(priv->mb0_id); for (i = 0; i < AT91_MB_RX_FIRST; i++) { set_mb_mode(priv, i, AT91_MB_MODE_DISABLED); at91_write(priv, AT91_MID(i), reg_mid); at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */ } for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) set_mb_mode(priv, i, AT91_MB_MODE_RX); set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); /* reset acceptance mask and id register */ for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) { at91_write(priv, AT91_MAM(i), 0x0 ); at91_write(priv, AT91_MID(i), AT91_MID_MIDE); } /* The last 4 mailboxes are used for transmitting. */ for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++) set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); /* Reset tx and rx helper pointers */ priv->tx_next = priv->tx_echo = 0; priv->rx_next = AT91_MB_RX_FIRST; } static int at91_set_bittiming(struct net_device *dev) { const struct at91_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; u32 reg_br; reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) | ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) | ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) | ((bt->phase_seg2 - 1) << 0); netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br); at91_write(priv, AT91_BR, reg_br); return 0; } static int at91_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { const struct at91_priv *priv = netdev_priv(dev); u32 reg_ecr = at91_read(priv, AT91_ECR); bec->rxerr = reg_ecr & 0xff; bec->txerr = reg_ecr >> 16; return 0; } static void at91_chip_start(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); u32 reg_mr, reg_ier; /* disable interrupts */ at91_write(priv, AT91_IDR, AT91_IRQ_ALL); /* disable chip */ reg_mr = at91_read(priv, AT91_MR); at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); at91_set_bittiming(dev); at91_setup_mailboxes(dev); at91_transceiver_switch(priv, 1); /* enable chip */ at91_write(priv, AT91_MR, AT91_MR_CANEN); priv->can.state = CAN_STATE_ERROR_ACTIVE; /* Enable interrupts */ reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME; at91_write(priv, AT91_IDR, AT91_IRQ_ALL); at91_write(priv, AT91_IER, reg_ier); } static void at91_chip_stop(struct net_device *dev, enum can_state state) { struct at91_priv *priv = netdev_priv(dev); u32 reg_mr; /* disable interrupts */ at91_write(priv, AT91_IDR, AT91_IRQ_ALL); reg_mr = at91_read(priv, AT91_MR); at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); at91_transceiver_switch(priv, 0); priv->can.state = state; } /* * theory of operation: * * According to the datasheet priority 0 is the highest priority, 15 * is the lowest. If two mailboxes have the same priority level the * message of the mailbox with the lowest number is sent first. * * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then * the next mailbox with prio 0, and so on, until all mailboxes are * used. Then we start from the beginning with mailbox * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1 * prio 1. When we reach the last mailbox with prio 15, we have to * stop sending, waiting for all messages to be delivered, then start * again with mailbox AT91_MB_TX_FIRST prio 0. * * We use the priv->tx_next as counter for the next transmission * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits * encode the mailbox number, the upper 4 bits the mailbox priority: * * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) || * (mb - AT91_MB_TX_FIRST); * */ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf = (struct can_frame *)skb->data; unsigned int mb, prio; u32 reg_mid, reg_mcr; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; mb = get_tx_next_mb(priv); prio = get_tx_next_prio(priv); if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) { netif_stop_queue(dev); netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); return NETDEV_TX_BUSY; } reg_mid = at91_can_id_to_reg_mid(cf->can_id); reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | (cf->can_dlc << 16) | AT91_MCR_MTCR; /* disable MB while writing ID (see datasheet) */ set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED); at91_write(priv, AT91_MID(mb), reg_mid); set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio); at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0)); at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4)); /* This triggers transmission */ at91_write(priv, AT91_MCR(mb), reg_mcr); stats->tx_bytes += cf->can_dlc; /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */ can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST); /* * we have to stop the queue and deliver all messages in case * of a prio+mb counter wrap around. This is the case if * tx_next buffer prio and mailbox equals 0. * * also stop the queue if next buffer is still in use * (== not ready) */ priv->tx_next++; if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) & AT91_MSR_MRDY) || (priv->tx_next & AT91_NEXT_MASK) == 0) netif_stop_queue(dev); /* Enable interrupt for this mailbox */ at91_write(priv, AT91_IER, 1 << mb); return NETDEV_TX_OK; } /** * at91_activate_rx_low - activate lower rx mailboxes * @priv: a91 context * * Reenables the lower mailboxes for reception of new CAN messages */ static inline void at91_activate_rx_low(const struct at91_priv *priv) { u32 mask = AT91_MB_RX_LOW_MASK; at91_write(priv, AT91_TCR, mask); } /** * at91_activate_rx_mb - reactive single rx mailbox * @priv: a91 context * @mb: mailbox to reactivate * * Reenables given mailbox for reception of new CAN messages */ static inline void at91_activate_rx_mb(const struct at91_priv *priv, unsigned int mb) { u32 mask = 1 << mb; at91_write(priv, AT91_TCR, mask); } /** * at91_rx_overflow_err - send error frame due to rx overflow * @dev: net device */ static void at91_rx_overflow_err(struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; struct can_frame *cf; netdev_dbg(dev, "RX buffer overflow\n"); stats->rx_over_errors++; stats->rx_errors++; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; } /** * at91_read_mb - read CAN msg from mailbox (lowlevel impl) * @dev: net device * @mb: mailbox number to read from * @cf: can frame where to store message * * Reads a CAN message from the given mailbox and stores data into * given can frame. "mb" and "cf" must be valid. */ static void at91_read_mb(struct net_device *dev, unsigned int mb, struct can_frame *cf) { const struct at91_priv *priv = netdev_priv(dev); u32 reg_msr, reg_mid; reg_mid = at91_read(priv, AT91_MID(mb)); if (reg_mid & AT91_MID_MIDE) cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK; reg_msr = at91_read(priv, AT91_MSR(mb)); if (reg_msr & AT91_MSR_MRTR) cf->can_id |= CAN_RTR_FLAG; cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf); *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); /* allow RX of extended frames */ at91_write(priv, AT91_MID(mb), AT91_MID_MIDE); if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI)) at91_rx_overflow_err(dev); } /** * at91_read_msg - read CAN message from mailbox * @dev: net device * @mb: mail box to read from * * Reads a CAN message from given mailbox, and put into linux network * RX queue, does all housekeeping chores (stats, ...) */ static void at91_read_msg(struct net_device *dev, unsigned int mb) { struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; skb = alloc_can_skb(dev, &cf); if (unlikely(!skb)) { stats->rx_dropped++; return; } at91_read_mb(dev, mb, cf); netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; } /** * at91_poll_rx - read multiple CAN messages from mailboxes * @dev: net device * @quota: max number of pkgs we're allowed to receive * * Theory of Operation: * * 11 of the 16 mailboxes on the chip are reserved for RX. we split * them into 2 groups. The lower group holds 7 and upper 4 mailboxes. * * Like it or not, but the chip always saves a received CAN message * into the first free mailbox it finds (starting with the * lowest). This makes it very difficult to read the messages in the * right order from the chip. This is how we work around that problem: * * The first message goes into mb nr. 1 and issues an interrupt. All * rx ints are disabled in the interrupt handler and a napi poll is * scheduled. We read the mailbox, but do _not_ reenable the mb (to * receive another message). * * lower mbxs upper * ____^______ __^__ * / \ / \ * +-+-+-+-+-+-+-+-++-+-+-+-+ * | |x|x|x|x|x|x|x|| | | | | * +-+-+-+-+-+-+-+-++-+-+-+-+ * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail * 0 1 2 3 4 5 6 7 8 9 0 1 / box * ^ * | * \ * unused, due to chip bug * * The variable priv->rx_next points to the next mailbox to read a * message from. As long we're in the lower mailboxes we just read the * mailbox but not reenable it. * * With completion of the last of the lower mailboxes, we reenable the * whole first group, but continue to look for filled mailboxes in the * upper mailboxes. Imagine the second group like overflow mailboxes, * which takes CAN messages if the lower goup is full. While in the * upper group we reenable the mailbox right after reading it. Giving * the chip more room to store messages. * * After finishing we look again in the lower group if we've still * quota. * */ static int at91_poll_rx(struct net_device *dev, int quota) { struct at91_priv *priv = netdev_priv(dev); u32 reg_sr = at91_read(priv, AT91_SR); const unsigned long *addr = (unsigned long *)&reg_sr; unsigned int mb; int received = 0; if (priv->rx_next > AT91_MB_RX_LOW_LAST && reg_sr & AT91_MB_RX_LOW_MASK) netdev_info(dev, "order of incoming frames cannot be guaranteed\n"); again: for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next); mb < AT91_MB_RX_LAST + 1 && quota > 0; reg_sr = at91_read(priv, AT91_SR), mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) { at91_read_msg(dev, mb); /* reactivate mailboxes */ if (mb == AT91_MB_RX_LOW_LAST) /* all lower mailboxed, if just finished it */ at91_activate_rx_low(priv); else if (mb > AT91_MB_RX_LOW_LAST) /* only the mailbox we read */ at91_activate_rx_mb(priv, mb); received++; quota--; } /* upper group completed, look again in lower */ if (priv->rx_next > AT91_MB_RX_LOW_LAST && quota > 0 && mb > AT91_MB_RX_LAST) { priv->rx_next = AT91_MB_RX_FIRST; goto again; } return received; } static void at91_poll_err_frame(struct net_device *dev, struct can_frame *cf, u32 reg_sr) { struct at91_priv *priv = netdev_priv(dev); /* CRC error */ if (reg_sr & AT91_IRQ_CERR) { netdev_dbg(dev, "CERR irq\n"); dev->stats.rx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; } /* Stuffing Error */ if (reg_sr & AT91_IRQ_SERR) { netdev_dbg(dev, "SERR irq\n"); dev->stats.rx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_STUFF; } /* Acknowledgement Error */ if (reg_sr & AT91_IRQ_AERR) { netdev_dbg(dev, "AERR irq\n"); dev->stats.tx_errors++; cf->can_id |= CAN_ERR_ACK; } /* Form error */ if (reg_sr & AT91_IRQ_FERR) { netdev_dbg(dev, "FERR irq\n"); dev->stats.rx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_FORM; } /* Bit Error */ if (reg_sr & AT91_IRQ_BERR) { netdev_dbg(dev, "BERR irq\n"); dev->stats.tx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_BIT; } } static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) { struct sk_buff *skb; struct can_frame *cf; if (quota == 0) return 0; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return 0; at91_poll_err_frame(dev, cf, reg_sr); netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; return 1; } static int at91_poll(struct napi_struct *napi, int quota) { struct net_device *dev = napi->dev; const struct at91_priv *priv = netdev_priv(dev); u32 reg_sr = at91_read(priv, AT91_SR); int work_done = 0; if (reg_sr & AT91_IRQ_MB_RX) work_done += at91_poll_rx(dev, quota - work_done); /* * The error bits are clear on read, * so use saved value from irq handler. */ reg_sr |= priv->reg_sr; if (reg_sr & AT91_IRQ_ERR_FRAME) work_done += at91_poll_err(dev, quota - work_done, reg_sr); if (work_done < quota) { /* enable IRQs for frame errors and all mailboxes >= rx_next */ u32 reg_ier = AT91_IRQ_ERR_FRAME; reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next); napi_complete(napi); at91_write(priv, AT91_IER, reg_ier); } return work_done; } /* * theory of operation: * * priv->tx_echo holds the number of the oldest can_frame put for * transmission into the hardware, but not yet ACKed by the CAN tx * complete IRQ. * * We iterate from priv->tx_echo to priv->tx_next and check if the * packet has been transmitted, echo it back to the CAN framework. If * we discover a not yet transmitted package, stop looking for more. * */ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) { struct at91_priv *priv = netdev_priv(dev); u32 reg_msr; unsigned int mb; /* masking of reg_sr not needed, already done by at91_irq */ for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { mb = get_tx_echo_mb(priv); /* no event in mailbox? */ if (!(reg_sr & (1 << mb))) break; /* Disable irq for this TX mailbox */ at91_write(priv, AT91_IDR, 1 << mb); /* * only echo if mailbox signals us a transfer * complete (MSR_MRDY). Otherwise it's a tansfer * abort. "can_bus_off()" takes care about the skbs * parked in the echo queue. */ reg_msr = at91_read(priv, AT91_MSR(mb)); if (likely(reg_msr & AT91_MSR_MRDY && ~reg_msr & AT91_MSR_MABT)) { /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */ can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST); dev->stats.tx_packets++; } } /* * restart queue if we don't have a wrap around but restart if * we get a TX int for the last can frame directly before a * wrap around. */ if ((priv->tx_next & AT91_NEXT_MASK) != 0 || (priv->tx_echo & AT91_NEXT_MASK) == 0) netif_wake_queue(dev); } static void at91_irq_err_state(struct net_device *dev, struct can_frame *cf, enum can_state new_state) { struct at91_priv *priv = netdev_priv(dev); u32 reg_idr = 0, reg_ier = 0; struct can_berr_counter bec; at91_get_berr_counter(dev, &bec); switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: /* * from: ERROR_ACTIVE * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF * => : there was a warning int */ if (new_state >= CAN_STATE_ERROR_WARNING && new_state <= CAN_STATE_BUS_OFF) { netdev_dbg(dev, "Error Warning IRQ\n"); priv->can.can_stats.error_warning++; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } case CAN_STATE_ERROR_WARNING: /* fallthrough */ /* * from: ERROR_ACTIVE, ERROR_WARNING * to : ERROR_PASSIVE, BUS_OFF * => : error passive int */ if (new_state >= CAN_STATE_ERROR_PASSIVE && new_state <= CAN_STATE_BUS_OFF) { netdev_dbg(dev, "Error Passive IRQ\n"); priv->can.can_stats.error_passive++; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } break; case CAN_STATE_BUS_OFF: /* * from: BUS_OFF * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE */ if (new_state <= CAN_STATE_ERROR_PASSIVE) { cf->can_id |= CAN_ERR_RESTARTED; netdev_dbg(dev, "restarted\n"); priv->can.can_stats.restarts++; netif_carrier_on(dev); netif_wake_queue(dev); } break; default: break; } /* process state changes depending on the new state */ switch (new_state) { case CAN_STATE_ERROR_ACTIVE: /* * actually we want to enable AT91_IRQ_WARN here, but * it screws up the system under certain * circumstances. so just enable AT91_IRQ_ERRP, thus * the "fallthrough" */ netdev_dbg(dev, "Error Active\n"); cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; case CAN_STATE_ERROR_WARNING: /* fallthrough */ reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; reg_ier = AT91_IRQ_ERRP; break; case CAN_STATE_ERROR_PASSIVE: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP; reg_ier = AT91_IRQ_BOFF; break; case CAN_STATE_BUS_OFF: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP | AT91_IRQ_WARN | AT91_IRQ_BOFF; reg_ier = 0; cf->can_id |= CAN_ERR_BUSOFF; netdev_dbg(dev, "bus-off\n"); netif_carrier_off(dev); priv->can.can_stats.bus_off++; /* turn off chip, if restart is disabled */ if (!priv->can.restart_ms) { at91_chip_stop(dev, CAN_STATE_BUS_OFF); return; } break; default: break; } at91_write(priv, AT91_IDR, reg_idr); at91_write(priv, AT91_IER, reg_ier); } static void at91_irq_err(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; enum can_state new_state; u32 reg_sr; reg_sr = at91_read(priv, AT91_SR); /* we need to look at the unmasked reg_sr */ if (unlikely(reg_sr & AT91_IRQ_BOFF)) new_state = CAN_STATE_BUS_OFF; else if (unlikely(reg_sr & AT91_IRQ_ERRP)) new_state = CAN_STATE_ERROR_PASSIVE; else if (unlikely(reg_sr & AT91_IRQ_WARN)) new_state = CAN_STATE_ERROR_WARNING; else if (likely(reg_sr & AT91_IRQ_ERRA)) new_state = CAN_STATE_ERROR_ACTIVE; else { netdev_err(dev, "BUG! hardware in undefined state\n"); return; } /* state hasn't changed */ if (likely(new_state == priv->can.state)) return; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return; at91_irq_err_state(dev, cf, new_state); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; priv->can.state = new_state; } /* * interrupt handler */ static irqreturn_t at91_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct at91_priv *priv = netdev_priv(dev); irqreturn_t handled = IRQ_NONE; u32 reg_sr, reg_imr; reg_sr = at91_read(priv, AT91_SR); reg_imr = at91_read(priv, AT91_IMR); /* Ignore masked interrupts */ reg_sr &= reg_imr; if (!reg_sr) goto exit; handled = IRQ_HANDLED; /* Receive or error interrupt? -> napi */ if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) { /* * The error bits are clear on read, * save for later use. */ priv->reg_sr = reg_sr; at91_write(priv, AT91_IDR, AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME); napi_schedule(&priv->napi); } /* Transmission complete interrupt */ if (reg_sr & AT91_IRQ_MB_TX) at91_irq_tx(dev, reg_sr); at91_irq_err(dev); exit: return handled; } static int at91_open(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); int err; clk_enable(priv->clk); /* check or determine and set bittime */ err = open_candev(dev); if (err) goto out; /* register interrupt handler */ if (request_irq(dev->irq, at91_irq, IRQF_SHARED, dev->name, dev)) { err = -EAGAIN; goto out_close; } /* start chip and queuing */ at91_chip_start(dev); napi_enable(&priv->napi); netif_start_queue(dev); return 0; out_close: close_candev(dev); out: clk_disable(priv->clk); return err; } /* * stop CAN bus activity */ static int at91_close(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); netif_stop_queue(dev); napi_disable(&priv->napi); at91_chip_stop(dev, CAN_STATE_STOPPED); free_irq(dev->irq, dev); clk_disable(priv->clk); close_candev(dev); return 0; } static int at91_set_mode(struct net_device *dev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: at91_chip_start(dev); netif_wake_queue(dev); break; default: return -EOPNOTSUPP; } return 0; } static const struct net_device_ops at91_netdev_ops = { .ndo_open = at91_open, .ndo_stop = at91_close, .ndo_start_xmit = at91_start_xmit, }; static ssize_t at91_sysfs_show_mb0_id(struct device *dev, struct device_attribute *attr, char *buf) { struct at91_priv *priv = netdev_priv(to_net_dev(dev)); if (priv->mb0_id & CAN_EFF_FLAG) return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id); else return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id); } static ssize_t at91_sysfs_set_mb0_id(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct net_device *ndev = to_net_dev(dev); struct at91_priv *priv = netdev_priv(ndev); unsigned long can_id; ssize_t ret; int err; rtnl_lock(); if (ndev->flags & IFF_UP) { ret = -EBUSY; goto out; } err = strict_strtoul(buf, 0, &can_id); if (err) { ret = err; goto out; } if (can_id & CAN_EFF_FLAG) can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; else can_id &= CAN_SFF_MASK; priv->mb0_id = can_id; ret = count; out: rtnl_unlock(); return ret; } static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); static struct attribute *at91_sysfs_attrs[] = { &dev_attr_mb0_id.attr, NULL, }; static struct attribute_group at91_sysfs_attr_group = { .attrs = at91_sysfs_attrs, }; static int __devinit at91_can_probe(struct platform_device *pdev) { struct net_device *dev; struct at91_priv *priv; struct resource *res; struct clk *clk; void __iomem *addr; int err, irq; clk = clk_get(&pdev->dev, "can_clk"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "no clock defined\n"); err = -ENODEV; goto exit; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || irq <= 0) { err = -ENODEV; goto exit_put; } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { err = -EBUSY; goto exit_put; } addr = ioremap_nocache(res->start, resource_size(res)); if (!addr) { err = -ENOMEM; goto exit_release; } dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM); if (!dev) { err = -ENOMEM; goto exit_iounmap; } dev->netdev_ops = &at91_netdev_ops; dev->irq = irq; dev->flags |= IFF_ECHO; dev->sysfs_groups[0] = &at91_sysfs_attr_group; priv = netdev_priv(dev); priv->can.clock.freq = clk_get_rate(clk); priv->can.bittiming_const = &at91_bittiming_const; priv->can.do_set_mode = at91_set_mode; priv->can.do_get_berr_counter = at91_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; priv->reg_base = addr; priv->dev = dev; priv->clk = clk; priv->pdata = pdev->dev.platform_data; priv->mb0_id = 0x7ff; netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); dev_set_drvdata(&pdev->dev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_candev(dev); if (err) { dev_err(&pdev->dev, "registering netdev failed\n"); goto exit_free; } dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", priv->reg_base, dev->irq); return 0; exit_free: free_candev(dev); exit_iounmap: iounmap(addr); exit_release: release_mem_region(res->start, resource_size(res)); exit_put: clk_put(clk); exit: return err; } static int __devexit at91_can_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct at91_priv *priv = netdev_priv(dev); struct resource *res; unregister_netdev(dev); platform_set_drvdata(pdev, NULL); iounmap(priv->reg_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); clk_put(priv->clk); free_candev(dev); return 0; } static struct platform_driver at91_can_driver = { .probe = at91_can_probe, .remove = __devexit_p(at91_can_remove), .driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, }, }; static int __init at91_can_module_init(void) { return platform_driver_register(&at91_can_driver); } static void __exit at91_can_module_exit(void) { platform_driver_unregister(&at91_can_driver); } module_init(at91_can_module_init); module_exit(at91_can_module_exit); MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
gpl-2.0
entdark/q3mme
trunk/code/tools/lcc/src/input.c
110
2934
#include "c.h" static void pragma(void); static void resynch(void); static int bsize; static unsigned char buffer[MAXLINE+1 + BUFSIZE+1]; unsigned char *cp; /* current input character */ char *file; /* current input file name */ char *firstfile; /* first input file */ unsigned char *limit; /* points to last character + 1 */ char *line; /* current line */ int lineno; /* line number of current line */ void nextline(void) { do { if (cp >= limit) { fillbuf(); if (cp >= limit) cp = limit; if (cp == limit) return; } else { lineno++; for (line = (char *)cp; *cp==' ' || *cp=='\t'; cp++) ; if (*cp == '#') { resynch(); nextline(); } } } while (*cp == '\n' && cp == limit); } void fillbuf(void) { if (bsize == 0) return; if (cp >= limit) cp = &buffer[MAXLINE+1]; else { int n = limit - cp; unsigned char *s = &buffer[MAXLINE+1] - n; assert(s >= buffer); line = (char *)s - ((char *)cp - line); while (cp < limit) *s++ = *cp++; cp = &buffer[MAXLINE+1] - n; } if (feof(stdin)) bsize = 0; else bsize = fread(&buffer[MAXLINE+1], 1, BUFSIZE, stdin); if (bsize < 0) { error("read error\n"); exit(EXIT_FAILURE); } limit = &buffer[MAXLINE+1+bsize]; *limit = '\n'; } void input_init(int argc, char *argv[]) { static int inited; if (inited) return; inited = 1; main_init(argc, argv); limit = cp = &buffer[MAXLINE+1]; bsize = -1; lineno = 0; file = NULL; fillbuf(); if (cp >= limit) cp = limit; nextline(); } /* pragma - handle #pragma ref id... */ static void pragma(void) { if ((t = gettok()) == ID && strcmp(token, "ref") == 0) for (;;) { while (*cp == ' ' || *cp == '\t') cp++; if (*cp == '\n' || *cp == 0) break; if ((t = gettok()) == ID && tsym) { tsym->ref++; use(tsym, src); } } } /* resynch - set line number/file name in # n [ "file" ] and #pragma ... */ static void resynch(void) { for (cp++; *cp == ' ' || *cp == '\t'; ) cp++; if (limit - cp < MAXLINE) fillbuf(); if (strncmp((char *)cp, "pragma", 6) == 0) { cp += 6; pragma(); } else if (*cp >= '0' && *cp <= '9') { line: for (lineno = 0; *cp >= '0' && *cp <= '9'; ) lineno = 10*lineno + *cp++ - '0'; lineno--; while (*cp == ' ' || *cp == '\t') cp++; if (*cp == '"') { file = (char *)++cp; while (*cp && *cp != '"' && *cp != '\n') cp++; file = stringn(file, (char *)cp - file); if (*cp == '\n') warning("missing \" in preprocessor line\n"); if (firstfile == 0) firstfile = file; } } else if (strncmp((char *)cp, "line", 4) == 0) { for (cp += 4; *cp == ' ' || *cp == '\t'; ) cp++; if (*cp >= '0' && *cp <= '9') goto line; if (Aflag >= 2) warning("unrecognized control line\n"); } else if (Aflag >= 2 && *cp != '\n') warning("unrecognized control line\n"); while (*cp) if (*cp++ == '\n') { if (cp == limit + 1) nextline(); else break; } }
gpl-2.0
jderrick/linux-torvalds
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
110
40122
/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #include <linux/firmware.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_ucode.h" #include "amdgpu_trace.h" #include "vi.h" #include "vid.h" #include "oss/oss_2_4_d.h" #include "oss/oss_2_4_sh_mask.h" #include "gmc/gmc_8_1_d.h" #include "gmc/gmc_8_1_sh_mask.h" #include "gca/gfx_8_0_d.h" #include "gca/gfx_8_0_enum.h" #include "gca/gfx_8_0_sh_mask.h" #include "bif/bif_5_0_d.h" #include "bif/bif_5_0_sh_mask.h" #include "iceland_sdma_pkt_open.h" static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev); MODULE_FIRMWARE("amdgpu/topaz_sdma.bin"); MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin"); static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = { SDMA0_REGISTER_OFFSET, SDMA1_REGISTER_OFFSET }; static const u32 golden_settings_iceland_a11[] = { mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, }; static const u32 iceland_mgcg_cgcg_init[] = { mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 }; /* * sDMA - System DMA * Starting with CIK, the GPU has new asynchronous * DMA engines. These engines are used for compute * and gfx. There are two DMA engines (SDMA0, SDMA1) * and each one supports 1 ring buffer used for gfx * and 2 queues used for compute. * * The programming model is very similar to the CP * (ring buffer, IBs, etc.), but sDMA has it's own * packet format that is different from the PM4 format * used by the CP. sDMA supports copying data, writing * embedded data, solid fills, and a number of other * things. It also has support for tiling/detiling of * buffers. */ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_TOPAZ: amdgpu_program_register_sequence(adev, iceland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); amdgpu_program_register_sequence(adev, golden_settings_iceland_a11, (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); break; default: break; } } /** * sdma_v2_4_init_microcode - load ucode images from disk * * @adev: amdgpu_device pointer * * Use the firmware interface to load the ucode images into * the driver (not loaded into hw). * Returns 0 on success, error on failure. */ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) { const char *chip_name; char fw_name[30]; int err, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; const struct sdma_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); switch (adev->asic_type) { case CHIP_TOPAZ: chip_name = "topaz"; break; default: BUG(); } for (i = 0; i < SDMA_MAX_INSTANCE; i++) { if (i == 0) snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); else snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->sdma[i].fw); if (err) goto out; hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; info->fw = adev->sdma[i].fw; header = (const struct common_firmware_header *)info->fw->data; adev->firmware.fw_size += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); } } out: if (err) { printk(KERN_ERR "sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name); for (i = 0; i < SDMA_MAX_INSTANCE; i++) { release_firmware(adev->sdma[i].fw); adev->sdma[i].fw = NULL; } } return err; } /** * sdma_v2_4_ring_get_rptr - get the current read pointer * * @ring: amdgpu ring pointer * * Get the current rptr from the hardware (VI+). */ static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring) { u32 rptr; /* XXX check if swapping is necessary on BE */ rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; return rptr; } /** * sdma_v2_4_ring_get_wptr - get the current write pointer * * @ring: amdgpu ring pointer * * Get the current wptr from the hardware (VI+). */ static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; return wptr; } /** * sdma_v2_4_ring_set_wptr - commit the write pointer * * @ring: amdgpu ring pointer * * Write the wptr back to the hardware (VI+). */ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); } /** * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer * @ib: IB object to schedule * * Schedule an IB in the DMA ring (VI). */ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; u32 next_rptr = ring->wptr + 5; while ((next_rptr & 7) != 2) next_rptr++; next_rptr += 6; amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ while ((ring->wptr & 7) != 2) amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); /* base must be 32 byte aligned */ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0); } /** * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring * * @ring: amdgpu ring pointer * * Emit an hdp flush packet on the requested DMA ring. */ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring) { u32 ref_and_mask = 0; if (ring == &ring->adev->sdma[0].ring) ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); else ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); amdgpu_ring_write(ring, ref_and_mask); /* reference */ amdgpu_ring_write(ring, ref_and_mask); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ } /** * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer * @fence: amdgpu fence object * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate * an interrupt if needed (VI). */ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; /* write the fence */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); amdgpu_ring_write(ring, lower_32_bits(addr)); amdgpu_ring_write(ring, upper_32_bits(addr)); amdgpu_ring_write(ring, lower_32_bits(seq)); /* optionally write high bits as well */ if (write64bit) { addr += 4; amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); amdgpu_ring_write(ring, lower_32_bits(addr)); amdgpu_ring_write(ring, upper_32_bits(addr)); amdgpu_ring_write(ring, upper_32_bits(seq)); } /* generate an interrupt */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); } /** * sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring * * @ring: amdgpu_ring structure holding ring information * @semaphore: amdgpu semaphore object * @emit_wait: wait or signal semaphore * * Add a DMA semaphore packet to the ring wait on or signal * other rings (VI). */ static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring, struct amdgpu_semaphore *semaphore, bool emit_wait) { u64 addr = semaphore->gpu_addr; u32 sig = emit_wait ? 0 : 1; amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); amdgpu_ring_write(ring, upper_32_bits(addr)); return true; } /** * sdma_v2_4_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer * * Stop the gfx async dma ring buffers (VI). */ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) { struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; u32 rb_cntl, ib_cntl; int i; if ((adev->mman.buffer_funcs_ring == sdma0) || (adev->mman.buffer_funcs_ring == sdma1)) amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); for (i = 0; i < SDMA_MAX_INSTANCE; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); } sdma0->ready = false; sdma1->ready = false; } /** * sdma_v2_4_rlc_stop - stop the compute async dma engines * * @adev: amdgpu_device pointer * * Stop the compute async dma queues (VI). */ static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev) { /* XXX todo */ } /** * sdma_v2_4_enable - stop the async dma engines * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs. * * Halt or unhalt the async dma engines (VI). */ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl; int i; if (enable == false) { sdma_v2_4_gfx_stop(adev); sdma_v2_4_rlc_stop(adev); } for (i = 0; i < SDMA_MAX_INSTANCE; i++) { f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); if (enable) f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); else f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl); } } /** * sdma_v2_4_gfx_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the gfx DMA ring buffers and enable them (VI). * Returns 0 for success, error for failure. */ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) { struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; u32 rb_bufsz; u32 wb_offset; int i, j, r; for (i = 0; i < SDMA_MAX_INSTANCE; i++) { ring = &adev->sdma[i].ring; wb_offset = (ring->rptr_offs * 4); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { vi_srbm_select(adev, 0, 0, 0, j); /* SDMA GFX */ WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); } vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); ring->wptr = 0; WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); /* enable DMA RB */ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif /* enable DMA IBs */ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; return r; } if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); } return 0; } /** * sdma_v2_4_rlc_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the compute DMA queues and enable them (VI). * Returns 0 for success, error for failure. */ static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev) { /* XXX todo */ return 0; } /** * sdma_v2_4_load_microcode - load the sDMA ME ucode * * @adev: amdgpu_device pointer * * Loads the sDMA0/1 ucode. * Returns 0 for success, -EINVAL if the ucode is not available. */ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) { const struct sdma_firmware_header_v1_0 *hdr; const __le32 *fw_data; u32 fw_size; int i, j; bool smc_loads_fw = false; /* XXX fix me */ if (!adev->sdma[0].fw || !adev->sdma[1].fw) return -EINVAL; /* halt the MEs */ sdma_v2_4_enable(adev, false); if (smc_loads_fw) { /* XXX query SMC for fw load complete */ } else { for (i = 0; i < SDMA_MAX_INSTANCE; i++) { hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); for (j = 0; j < fw_size; j++) WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); } } return 0; } /** * sdma_v2_4_start - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the DMA engines and enable them (VI). * Returns 0 for success, error for failure. */ static int sdma_v2_4_start(struct amdgpu_device *adev) { int r; if (!adev->firmware.smu_load) { r = sdma_v2_4_load_microcode(adev); if (r) return r; } else { r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, AMDGPU_UCODE_ID_SDMA0); if (r) return -EINVAL; r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, AMDGPU_UCODE_ID_SDMA1); if (r) return -EINVAL; } /* unhalt the MEs */ sdma_v2_4_enable(adev, true); /* start the gfx rings and rlc compute queues */ r = sdma_v2_4_gfx_resume(adev); if (r) return r; r = sdma_v2_4_rlc_resume(adev); if (r) return r; return 0; } /** * sdma_v2_4_ring_test_ring - simple async dma engine test * * @ring: amdgpu_ring structure holding ring information * * Test the DMA engine by writing using it to write an * value to memory. (VI). * Returns 0 for success, error for failure. */ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; unsigned i; unsigned index; int r; u32 tmp; u64 gpu_addr; r = amdgpu_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); return r; } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); r = amdgpu_ring_lock(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); amdgpu_wb_free(adev, index); return r; } amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_unlock_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); } if (i < adev->usec_timeout) { DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", ring->idx, tmp); r = -EINVAL; } amdgpu_wb_free(adev, index); return r; } /** * sdma_v2_4_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information * * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; unsigned i; unsigned index; int r; u32 tmp = 0; u64 gpu_addr; r = amdgpu_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); return r; } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); return r; } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); ib.ptr[1] = lower_32_bits(gpu_addr); ib.ptr[2] = upper_32_bits(gpu_addr); ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1); ib.ptr[4] = 0xDEADBEEF; ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); if (r) { amdgpu_ib_free(adev, &ib); amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); return r; } r = amdgpu_fence_wait(ib.fence, false); if (r) { amdgpu_ib_free(adev, &ib); amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); return r; } for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring->idx, i); } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } amdgpu_ib_free(adev, &ib); amdgpu_wb_free(adev, index); return r; } /** * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @src: src addr to copy from * @count: number of page entries to update * * Update PTEs by copying them from the GART using sDMA (CIK). */ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { while (count) { unsigned bytes = count * 8; if (bytes > 0x1FFFF8) bytes = 0x1FFFF8; ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); ib->ptr[ib->length_dw++] = bytes; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src); ib->ptr[ib->length_dw++] = upper_32_bits(src); ib->ptr[ib->length_dw++] = lower_32_bits(pe); ib->ptr[ib->length_dw++] = upper_32_bits(pe); pe += bytes; src += bytes; count -= bytes / 8; } } /** * sdma_v2_4_vm_write_pte - update PTEs by writing them manually * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; while (count) { ndw = count * 2; if (ndw > 0xFFFFE) ndw = 0xFFFFE; /* for non-physically contiguous pages (system) */ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); ib->ptr[ib->length_dw++] = pe; ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = ndw; for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & AMDGPU_PTE_SYSTEM) { value = amdgpu_vm_map_gart(ib->ring->adev, addr); value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & AMDGPU_PTE_VALID) { value = addr; } else { value = 0; } addr += incr; value |= flags; ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = upper_32_bits(value); } } } /** * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using sDMA (CIK). */ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; while (count) { ndw = count; if (ndw > 0x7FFFF) ndw = 0x7FFFF; if (flags & AMDGPU_PTE_VALID) value = addr; else value = 0; /* for physically contiguous pages (vram) */ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); ib->ptr[ib->length_dw++] = pe; /* dst addr */ ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = flags; /* mask */ ib->ptr[ib->length_dw++] = 0; ib->ptr[ib->length_dw++] = value; /* value */ ib->ptr[ib->length_dw++] = upper_32_bits(value); ib->ptr[ib->length_dw++] = incr; /* increment size */ ib->ptr[ib->length_dw++] = 0; ib->ptr[ib->length_dw++] = ndw; /* number of entries */ pe += ndw * 8; addr += ndw * incr; count -= ndw; } } /** * sdma_v2_4_vm_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) { while (ib->length_dw & 0x7) ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); } /** * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer * @vm: amdgpu_vm pointer * * Update the page table base and flush the VM TLB * using sDMA (VI). */ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); if (vm_id < 8) { amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); } else { amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); } amdgpu_ring_write(ring, pd_addr >> 12); /* flush TLB */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 1 << vm_id); /* wait for flush */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0); /* reference */ amdgpu_ring_write(ring, 0); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ } static int sdma_v2_4_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; sdma_v2_4_set_ring_funcs(adev); sdma_v2_4_set_buffer_funcs(adev); sdma_v2_4_set_vm_pte_funcs(adev); sdma_v2_4_set_irq_funcs(adev); return 0; } static int sdma_v2_4_sw_init(void *handle) { struct amdgpu_ring *ring; int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); if (r) return r; /* SDMA Privileged inst */ r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); if (r) return r; /* SDMA Privileged inst */ r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); if (r) return r; r = sdma_v2_4_init_microcode(adev); if (r) { DRM_ERROR("Failed to load sdma firmware!\n"); return r; } ring = &adev->sdma[0].ring; ring->ring_obj = NULL; ring->use_doorbell = false; ring = &adev->sdma[1].ring; ring->ring_obj = NULL; ring->use_doorbell = false; ring = &adev->sdma[0].ring; sprintf(ring->name, "sdma0"); r = amdgpu_ring_init(adev, ring, 256 * 1024, SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, AMDGPU_RING_TYPE_SDMA); if (r) return r; ring = &adev->sdma[1].ring; sprintf(ring->name, "sdma1"); r = amdgpu_ring_init(adev, ring, 256 * 1024, SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, AMDGPU_RING_TYPE_SDMA); if (r) return r; return r; } static int sdma_v2_4_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_ring_fini(&adev->sdma[0].ring); amdgpu_ring_fini(&adev->sdma[1].ring); return 0; } static int sdma_v2_4_hw_init(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; sdma_v2_4_init_golden_registers(adev); r = sdma_v2_4_start(adev); if (r) return r; return r; } static int sdma_v2_4_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; sdma_v2_4_enable(adev, false); return 0; } static int sdma_v2_4_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return sdma_v2_4_hw_fini(adev); } static int sdma_v2_4_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return sdma_v2_4_hw_init(adev); } static bool sdma_v2_4_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | SRBM_STATUS2__SDMA1_BUSY_MASK)) return false; return true; } static int sdma_v2_4_wait_for_idle(void *handle) { unsigned i; u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | SRBM_STATUS2__SDMA1_BUSY_MASK); if (!tmp) return 0; udelay(1); } return -ETIMEDOUT; } static void sdma_v2_4_print_status(void *handle) { int i, j; struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "VI SDMA registers\n"); dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", RREG32(mmSRBM_STATUS2)); for (i = 0; i < SDMA_MAX_INSTANCE; i++) { dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); mutex_lock(&adev->srbm_mutex); for (j = 0; j < 16; j++) { vi_srbm_select(adev, 0, 0, 0, j); dev_info(adev->dev, " VM %d:\n", j); dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n", i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n", i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); } vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } } static int sdma_v2_4_soft_reset(void *handle) { u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { /* sdma0 */ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; } if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { /* sdma1 */ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; } if (srbm_soft_reset) { sdma_v2_4_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); WREG32(mmSRBM_SOFT_RESET, tmp); tmp = RREG32(mmSRBM_SOFT_RESET); udelay(50); tmp &= ~srbm_soft_reset; WREG32(mmSRBM_SOFT_RESET, tmp); tmp = RREG32(mmSRBM_SOFT_RESET); /* Wait a little for things to settle down */ udelay(50); sdma_v2_4_print_status((void *)adev); } return 0; } static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) { u32 sdma_cntl; switch (type) { case AMDGPU_SDMA_IRQ_TRAP0: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); break; default: break; } break; case AMDGPU_SDMA_IRQ_TRAP1: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); break; default: break; } break; default: break; } return 0; } static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { u8 instance_id, queue_id; instance_id = (entry->ring_id & 0x3) >> 0; queue_id = (entry->ring_id & 0xc) >> 2; DRM_DEBUG("IH: SDMA trap\n"); switch (instance_id) { case 0: switch (queue_id) { case 0: amdgpu_fence_process(&adev->sdma[0].ring); break; case 1: /* XXX compute */ break; case 2: /* XXX compute */ break; } break; case 1: switch (queue_id) { case 0: amdgpu_fence_process(&adev->sdma[1].ring); break; case 1: /* XXX compute */ break; case 2: /* XXX compute */ break; } break; } return 0; } static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { DRM_ERROR("Illegal instruction in SDMA command stream\n"); schedule_work(&adev->reset_work); return 0; } static int sdma_v2_4_set_clockgating_state(void *handle, enum amd_clockgating_state state) { /* XXX handled via the smc on VI */ return 0; } static int sdma_v2_4_set_powergating_state(void *handle, enum amd_powergating_state state) { return 0; } const struct amd_ip_funcs sdma_v2_4_ip_funcs = { .early_init = sdma_v2_4_early_init, .late_init = NULL, .sw_init = sdma_v2_4_sw_init, .sw_fini = sdma_v2_4_sw_fini, .hw_init = sdma_v2_4_hw_init, .hw_fini = sdma_v2_4_hw_fini, .suspend = sdma_v2_4_suspend, .resume = sdma_v2_4_resume, .is_idle = sdma_v2_4_is_idle, .wait_for_idle = sdma_v2_4_wait_for_idle, .soft_reset = sdma_v2_4_soft_reset, .print_status = sdma_v2_4_print_status, .set_clockgating_state = sdma_v2_4_set_clockgating_state, .set_powergating_state = sdma_v2_4_set_powergating_state, }; /** * sdma_v2_4_ring_is_lockup - Check if the DMA engine is locked up * * @ring: amdgpu_ring structure holding ring information * * Check if the async DMA engine is locked up (VI). * Returns true if the engine appears to be locked up, false if not. */ static bool sdma_v2_4_ring_is_lockup(struct amdgpu_ring *ring) { if (sdma_v2_4_is_idle(ring->adev)) { amdgpu_ring_lockup_update(ring); return false; } return amdgpu_ring_test_lockup(ring); } static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { .get_rptr = sdma_v2_4_ring_get_rptr, .get_wptr = sdma_v2_4_ring_get_wptr, .set_wptr = sdma_v2_4_ring_set_wptr, .parse_cs = NULL, .emit_ib = sdma_v2_4_ring_emit_ib, .emit_fence = sdma_v2_4_ring_emit_fence, .emit_semaphore = sdma_v2_4_ring_emit_semaphore, .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, .test_ring = sdma_v2_4_ring_test_ring, .test_ib = sdma_v2_4_ring_test_ib, .is_lockup = sdma_v2_4_ring_is_lockup, }; static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) { adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs; adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs; } static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { .set = sdma_v2_4_set_trap_irq_state, .process = sdma_v2_4_process_trap_irq, }; static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = { .process = sdma_v2_4_process_illegal_inst_irq, }; static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) { adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; } /** * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine * * @ring: amdgpu_ring structure holding ring information * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer * * Copy GPU buffers using the DMA engine (VI). * Used by the amdgpu ttm implementation to move pages if * registered as the asic copy callback. */ static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count) { amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)); amdgpu_ring_write(ring, byte_count); amdgpu_ring_write(ring, 0); /* src/dst endian swap */ amdgpu_ring_write(ring, lower_32_bits(src_offset)); amdgpu_ring_write(ring, upper_32_bits(src_offset)); amdgpu_ring_write(ring, lower_32_bits(dst_offset)); amdgpu_ring_write(ring, upper_32_bits(dst_offset)); } /** * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine * * @ring: amdgpu_ring structure holding ring information * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer * * Fill GPU buffers using the DMA engine (VI). */ static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) { amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL)); amdgpu_ring_write(ring, lower_32_bits(dst_offset)); amdgpu_ring_write(ring, upper_32_bits(dst_offset)); amdgpu_ring_write(ring, src_data); amdgpu_ring_write(ring, byte_count); } static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = { .copy_max_bytes = 0x1fffff, .copy_num_dw = 7, .emit_copy_buffer = sdma_v2_4_emit_copy_buffer, .fill_max_bytes = 0x1fffff, .fill_num_dw = 7, .emit_fill_buffer = sdma_v2_4_emit_fill_buffer, }; static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) { if (adev->mman.buffer_funcs == NULL) { adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; } } static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { .copy_pte = sdma_v2_4_vm_copy_pte, .write_pte = sdma_v2_4_vm_write_pte, .set_pte_pde = sdma_v2_4_vm_set_pte_pde, .pad_ib = sdma_v2_4_vm_pad_ib, }; static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) { if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; } }
gpl-2.0
xinglin/qemu-2.0.2
roms/openbios/packages/init.c
110
1127
/* * Creation Date: <2003/12/23 00:28:05 samuel> * Time-stamp: <2003/12/28 19:43:41 samuel> * * <init.c> * * Module intialization * * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 * */ #include "config.h" #include "kernel/kernel.h" #include "packages.h" void modules_init( void ) { #ifdef CONFIG_CMDLINE cmdline_init(); #endif #ifdef CONFIG_DEBLOCKER deblocker_init(); #endif #ifdef CONFIG_DISK_LABEL disklabel_init(); #endif #ifdef CONFIG_HFSP hfsp_init(); #endif #ifdef CONFIG_HFS hfs_init(); #endif #ifdef CONFIG_EXT2 ext2_init(); #endif #ifdef CONFIG_ISO9660 iso9660_init(); #endif #ifdef CONFIG_GRUBFS grubfs_init(); #endif #ifdef CONFIG_MAC_PARTS macparts_init(); #endif #ifdef CONFIG_PC_PARTS pcparts_init(); #endif #ifdef CONFIG_SUN_PARTS sunparts_init(); #endif #ifdef CONFIG_LOADER_XCOFF xcoff_loader_init(); #endif #ifdef CONFIG_LOADER_ELF elf_loader_init(); #endif #ifdef CONFIG_LOADER_BOOTINFO bootinfo_loader_init(); #endif }
gpl-2.0
TheSSJ/zf2_mmkernel
net/ipv6/sit.c
622
38904
/* * IPv6 over IPv4 tunnel device - Simple Internet Transition (SIT) * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * Roger Venning <r.venning@telstra.com>: 6to4 support * Nate Thompson <nate@thebog.net>: 6to4 support * Fred Templin <fred.l.templin@boeing.com>: isatap support */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/icmp.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/init.h> #include <linux/netfilter_ipv4.h> #include <linux/if_ether.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/ip6_fib.h> #include <net/ip6_route.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/ip.h> #include <net/udp.h> #include <net/icmp.h> #include <net/ip_tunnels.h> #include <net/inet_ecn.h> #include <net/xfrm.h> #include <net/dsfield.h> #include <net/net_namespace.h> #include <net/netns/generic.h> /* This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c For comments look at net/ipv4/ip_gre.c --ANK */ #define HASH_SIZE 16 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); static int ipip6_tunnel_init(struct net_device *dev); static void ipip6_tunnel_setup(struct net_device *dev); static void ipip6_dev_free(struct net_device *dev); static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst, __be32 *v4dst); static struct rtnl_link_ops sit_link_ops __read_mostly; static int sit_net_id __read_mostly; struct sit_net { struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE]; struct ip_tunnel __rcu *tunnels_r[HASH_SIZE]; struct ip_tunnel __rcu *tunnels_l[HASH_SIZE]; struct ip_tunnel __rcu *tunnels_wc[1]; struct ip_tunnel __rcu **tunnels[4]; struct net_device *fb_tunnel_dev; }; /* * Must be invoked with rcu_read_lock */ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net, struct net_device *dev, __be32 remote, __be32 local) { unsigned int h0 = HASH(remote); unsigned int h1 = HASH(local); struct ip_tunnel *t; struct sit_net *sitn = net_generic(net, sit_net_id); for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) { if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr && (!dev || !t->parms.link || dev->ifindex == t->parms.link) && (t->dev->flags & IFF_UP)) return t; } for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) { if (remote == t->parms.iph.daddr && (!dev || !t->parms.link || dev->ifindex == t->parms.link) && (t->dev->flags & IFF_UP)) return t; } for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) { if (local == t->parms.iph.saddr && (!dev || !t->parms.link || dev->ifindex == t->parms.link) && (t->dev->flags & IFF_UP)) return t; } t = rcu_dereference(sitn->tunnels_wc[0]); if ((t != NULL) && (t->dev->flags & IFF_UP)) return t; return NULL; } static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn, struct ip_tunnel_parm *parms) { __be32 remote = parms->iph.daddr; __be32 local = parms->iph.saddr; unsigned int h = 0; int prio = 0; if (remote) { prio |= 2; h ^= HASH(remote); } if (local) { prio |= 1; h ^= HASH(local); } return &sitn->tunnels[prio][h]; } static inline struct ip_tunnel __rcu **ipip6_bucket(struct sit_net *sitn, struct ip_tunnel *t) { return __ipip6_bucket(sitn, &t->parms); } static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t) { struct ip_tunnel __rcu **tp; struct ip_tunnel *iter; for (tp = ipip6_bucket(sitn, t); (iter = rtnl_dereference(*tp)) != NULL; tp = &iter->next) { if (t == iter) { rcu_assign_pointer(*tp, t->next); break; } } } static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t) { struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t); rcu_assign_pointer(t->next, rtnl_dereference(*tp)); rcu_assign_pointer(*tp, t); } static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) { #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel *t = netdev_priv(dev); if (t->dev == sitn->fb_tunnel_dev) { ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); t->ip6rd.relay_prefix = 0; t->ip6rd.prefixlen = 16; t->ip6rd.relay_prefixlen = 0; } else { struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev); memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd)); } #endif } static int ipip6_tunnel_create(struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); struct net *net = dev_net(dev); struct sit_net *sitn = net_generic(net, sit_net_id); int err; err = ipip6_tunnel_init(dev); if (err < 0) goto out; ipip6_tunnel_clone_6rd(dev, sitn); if ((__force u16)t->parms.i_flags & SIT_ISATAP) dev->priv_flags |= IFF_ISATAP; err = register_netdevice(dev); if (err < 0) goto out; strcpy(t->parms.name, dev->name); dev->rtnl_link_ops = &sit_link_ops; dev_hold(dev); ipip6_tunnel_link(sitn, t); return 0; out: return err; } static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, struct ip_tunnel_parm *parms, int create) { __be32 remote = parms->iph.daddr; __be32 local = parms->iph.saddr; struct ip_tunnel *t, *nt; struct ip_tunnel __rcu **tp; struct net_device *dev; char name[IFNAMSIZ]; struct sit_net *sitn = net_generic(net, sit_net_id); for (tp = __ipip6_bucket(sitn, parms); (t = rtnl_dereference(*tp)) != NULL; tp = &t->next) { if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr && parms->link == t->parms.link) { if (create) return NULL; else return t; } } if (!create) goto failed; if (parms->name[0]) strlcpy(name, parms->name, IFNAMSIZ); else strcpy(name, "sit%d"); dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup); if (dev == NULL) return NULL; dev_net_set(dev, net); nt = netdev_priv(dev); nt->parms = *parms; if (ipip6_tunnel_create(dev) < 0) goto failed_free; return nt; failed_free: ipip6_dev_free(dev); failed: return NULL; } #define for_each_prl_rcu(start) \ for (prl = rcu_dereference(start); \ prl; \ prl = rcu_dereference(prl->next)) static struct ip_tunnel_prl_entry * __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) { struct ip_tunnel_prl_entry *prl; for_each_prl_rcu(t->prl) if (prl->addr == addr) break; return prl; } static int ipip6_tunnel_get_prl(struct ip_tunnel *t, struct ip_tunnel_prl __user *a) { struct ip_tunnel_prl kprl, *kp; struct ip_tunnel_prl_entry *prl; unsigned int cmax, c = 0, ca, len; int ret = 0; if (copy_from_user(&kprl, a, sizeof(kprl))) return -EFAULT; cmax = kprl.datalen / sizeof(kprl); if (cmax > 1 && kprl.addr != htonl(INADDR_ANY)) cmax = 1; /* For simple GET or for root users, * we try harder to allocate. */ kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ? kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : NULL; rcu_read_lock(); ca = t->prl_count < cmax ? t->prl_count : cmax; if (!kp) { /* We don't try hard to allocate much memory for * non-root users. * For root users, retry allocating enough memory for * the answer. */ kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC); if (!kp) { ret = -ENOMEM; goto out; } } c = 0; for_each_prl_rcu(t->prl) { if (c >= cmax) break; if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) continue; kp[c].addr = prl->addr; kp[c].flags = prl->flags; c++; if (kprl.addr != htonl(INADDR_ANY)) break; } out: rcu_read_unlock(); len = sizeof(*kp) * c; ret = 0; if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen)) ret = -EFAULT; kfree(kp); return ret; } static int ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) { struct ip_tunnel_prl_entry *p; int err = 0; if (a->addr == htonl(INADDR_ANY)) return -EINVAL; ASSERT_RTNL(); for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) { if (p->addr == a->addr) { if (chg) { p->flags = a->flags; goto out; } err = -EEXIST; goto out; } } if (chg) { err = -ENXIO; goto out; } p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL); if (!p) { err = -ENOBUFS; goto out; } p->next = t->prl; p->addr = a->addr; p->flags = a->flags; t->prl_count++; rcu_assign_pointer(t->prl, p); out: return err; } static void prl_list_destroy_rcu(struct rcu_head *head) { struct ip_tunnel_prl_entry *p, *n; p = container_of(head, struct ip_tunnel_prl_entry, rcu_head); do { n = rcu_dereference_protected(p->next, 1); kfree(p); p = n; } while (p); } static int ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) { struct ip_tunnel_prl_entry *x; struct ip_tunnel_prl_entry __rcu **p; int err = 0; ASSERT_RTNL(); if (a && a->addr != htonl(INADDR_ANY)) { for (p = &t->prl; (x = rtnl_dereference(*p)) != NULL; p = &x->next) { if (x->addr == a->addr) { *p = x->next; kfree_rcu(x, rcu_head); t->prl_count--; goto out; } } err = -ENXIO; } else { x = rtnl_dereference(t->prl); if (x) { t->prl_count = 0; call_rcu(&x->rcu_head, prl_list_destroy_rcu); t->prl = NULL; } } out: return err; } static int isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t) { struct ip_tunnel_prl_entry *p; int ok = 1; rcu_read_lock(); p = __ipip6_tunnel_locate_prl(t, iph->saddr); if (p) { if (p->flags & PRL_DEFAULT) skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT; else skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT; } else { const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr; if (ipv6_addr_is_isatap(addr6) && (addr6->s6_addr32[3] == iph->saddr) && ipv6_chk_prefix(addr6, t->dev)) skb->ndisc_nodetype = NDISC_NODETYPE_HOST; else ok = 0; } rcu_read_unlock(); return ok; } static void ipip6_tunnel_uninit(struct net_device *dev) { struct net *net = dev_net(dev); struct sit_net *sitn = net_generic(net, sit_net_id); if (dev == sitn->fb_tunnel_dev) { RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL); } else { ipip6_tunnel_unlink(sitn, netdev_priv(dev)); ipip6_tunnel_del_prl(netdev_priv(dev), NULL); } dev_put(dev); } static int ipip6_err(struct sk_buff *skb, u32 info) { /* All the routers (except for Linux) return only 8 bytes of packet payload. It means, that precise relaying of ICMP in the real Internet is absolutely infeasible. */ const struct iphdr *iph = (const struct iphdr *)skb->data; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct ip_tunnel *t; int err; switch (type) { default: case ICMP_PARAMETERPROB: return 0; case ICMP_DEST_UNREACH: switch (code) { case ICMP_SR_FAILED: case ICMP_PORT_UNREACH: /* Impossible event. */ return 0; default: /* All others are translated to HOST_UNREACH. rfc2003 contains "deep thoughts" about NET_UNREACH, I believe they are just ether pollution. --ANK */ break; } break; case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) return 0; break; case ICMP_REDIRECT: break; } err = -ENOENT; t = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, iph->daddr, iph->saddr); if (t == NULL) goto out; if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { ipv4_update_pmtu(skb, dev_net(skb->dev), info, t->parms.link, 0, IPPROTO_IPV6, 0); err = 0; goto out; } if (type == ICMP_REDIRECT) { ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, IPPROTO_IPV6, 0); err = 0; goto out; } if (t->parms.iph.daddr == 0) goto out; err = 0; if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) goto out; if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) t->err_count++; else t->err_count = 1; t->err_time = jiffies; out: return err; } static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr, const struct in6_addr *v6addr) { __be32 v4embed = 0; if (check_6rd(tunnel, v6addr, &v4embed) && v4addr != v4embed) return true; return false; } /* Checks if an address matches an address on the tunnel interface. * Used to detect the NAT of proto 41 packets and let them pass spoofing test. * Long story: * This function is called after we considered the packet as spoofed * in is_spoofed_6rd. * We may have a router that is doing NAT for proto 41 packets * for an internal station. Destination a.a.a.a/PREFIX:bbbb:bbbb * will be translated to n.n.n.n/PREFIX:bbbb:bbbb. And is_spoofed_6rd * function will return true, dropping the packet. * But, we can still check if is spoofed against the IP * addresses associated with the interface. */ static bool only_dnatted(const struct ip_tunnel *tunnel, const struct in6_addr *v6dst) { int prefix_len; #ifdef CONFIG_IPV6_SIT_6RD prefix_len = tunnel->ip6rd.prefixlen + 32 - tunnel->ip6rd.relay_prefixlen; #else prefix_len = 48; #endif return ipv6_chk_custom_prefix(v6dst, prefix_len, tunnel->dev); } /* Returns true if a packet is spoofed */ static bool packet_is_spoofed(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *tunnel) { const struct ipv6hdr *ipv6h; if (tunnel->dev->priv_flags & IFF_ISATAP) { if (!isatap_chksrc(skb, iph, tunnel)) return true; return false; } if (tunnel->dev->flags & IFF_POINTOPOINT) return false; ipv6h = ipv6_hdr(skb); if (unlikely(is_spoofed_6rd(tunnel, iph->saddr, &ipv6h->saddr))) { net_warn_ratelimited("Src spoofed %pI4/%pI6c -> %pI4/%pI6c\n", &iph->saddr, &ipv6h->saddr, &iph->daddr, &ipv6h->daddr); return true; } if (likely(!is_spoofed_6rd(tunnel, iph->daddr, &ipv6h->daddr))) return false; if (only_dnatted(tunnel, &ipv6h->daddr)) return false; net_warn_ratelimited("Dst spoofed %pI4/%pI6c -> %pI4/%pI6c\n", &iph->saddr, &ipv6h->saddr, &iph->daddr, &ipv6h->daddr); return true; } static int ipip6_rcv(struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); struct ip_tunnel *tunnel; int err; tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, iph->saddr, iph->daddr); if (tunnel != NULL) { struct pcpu_tstats *tstats; secpath_reset(skb); skb->mac_header = skb->network_header; skb_reset_network_header(skb); IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IPV6); skb->pkt_type = PACKET_HOST; if (packet_is_spoofed(skb, iph, tunnel)) { tunnel->dev->stats.rx_errors++; goto out; } __skb_tunnel_rx(skb, tunnel->dev); err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { if (log_ecn_error) net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", &iph->saddr, iph->tos); if (err > 1) { ++tunnel->dev->stats.rx_frame_errors; ++tunnel->dev->stats.rx_errors; goto out; } } tstats = this_cpu_ptr(tunnel->dev->tstats); tstats->rx_packets++; tstats->rx_bytes += skb->len; netif_rx(skb); return 0; } /* no tunnel matched, let upstream know, ipsec may handle it */ return 1; out: kfree_skb(skb); return 0; } /* * If the IPv6 address comes from 6rd / 6to4 (RFC 3056) addr space this function * stores the embedded IPv4 address in v4dst and returns true. */ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst, __be32 *v4dst) { #ifdef CONFIG_IPV6_SIT_6RD if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix, tunnel->ip6rd.prefixlen)) { unsigned int pbw0, pbi0; int pbi1; u32 d; pbw0 = tunnel->ip6rd.prefixlen >> 5; pbi0 = tunnel->ip6rd.prefixlen & 0x1f; d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >> tunnel->ip6rd.relay_prefixlen; pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen; if (pbi1 > 0) d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >> (32 - pbi1); *v4dst = tunnel->ip6rd.relay_prefix | htonl(d); return true; } #else if (v6dst->s6_addr16[0] == htons(0x2002)) { /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ memcpy(v4dst, &v6dst->s6_addr16[1], 4); return true; } #endif return false; } static inline __be32 try_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst) { __be32 dst = 0; check_6rd(tunnel, v6dst, &dst); return dst; } /* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tiph = &tunnel->parms.iph; const struct ipv6hdr *iph6 = ipv6_hdr(skb); u8 tos = tunnel->parms.iph.tos; __be16 df = tiph->frag_off; struct rtable *rt; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ struct iphdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ __be32 dst = tiph->daddr; struct flowi4 fl4; int mtu; const struct in6_addr *addr6; int addr_type; if (skb->protocol != htons(ETH_P_IPV6)) goto tx_error; if (tos == 1) tos = ipv6_get_dsfield(iph6); /* ISATAP (RFC4214) - must come before 6to4 */ if (dev->priv_flags & IFF_ISATAP) { struct neighbour *neigh = NULL; bool do_tx_error = false; if (skb_dst(skb)) neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); if (neigh == NULL) { net_dbg_ratelimited("nexthop == NULL\n"); goto tx_error; } addr6 = (const struct in6_addr *)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if ((addr_type & IPV6_ADDR_UNICAST) && ipv6_addr_is_isatap(addr6)) dst = addr6->s6_addr32[3]; else do_tx_error = true; neigh_release(neigh); if (do_tx_error) goto tx_error; } if (!dst) dst = try_6rd(tunnel, &iph6->daddr); if (!dst) { struct neighbour *neigh = NULL; bool do_tx_error = false; if (skb_dst(skb)) neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); if (neigh == NULL) { net_dbg_ratelimited("nexthop == NULL\n"); goto tx_error; } addr6 = (const struct in6_addr *)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if (addr_type == IPV6_ADDR_ANY) { addr6 = &ipv6_hdr(skb)->daddr; addr_type = ipv6_addr_type(addr6); } if ((addr_type & IPV6_ADDR_COMPATv4) != 0) dst = addr6->s6_addr32[3]; else do_tx_error = true; neigh_release(neigh); if (do_tx_error) goto tx_error; } rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, dst, tiph->saddr, 0, 0, IPPROTO_IPV6, RT_TOS(tos), tunnel->parms.link); if (IS_ERR(rt)) { dev->stats.tx_carrier_errors++; goto tx_error_icmp; } if (rt->rt_type != RTN_UNICAST) { ip_rt_put(rt); dev->stats.tx_carrier_errors++; goto tx_error_icmp; } tdev = rt->dst.dev; if (tdev == dev) { ip_rt_put(rt); dev->stats.collisions++; goto tx_error; } if (df) { mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); if (mtu < 68) { dev->stats.collisions++; ip_rt_put(rt); goto tx_error; } if (mtu < IPV6_MIN_MTU) { mtu = IPV6_MIN_MTU; df = 0; } if (tunnel->parms.iph.daddr && skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); if (skb->len > mtu) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ip_rt_put(rt); goto tx_error; } } if (tunnel->err_count > 0) { if (time_before(jiffies, tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { tunnel->err_count--; dst_link_failure(skb); } else tunnel->err_count = 0; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) { ip_rt_put(rt); dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; iph6 = ipv6_hdr(skb); } skb->transport_header = skb->network_header; skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags = 0; skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); /* * Push down and install the IPIP header. */ iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = df; iph->protocol = IPPROTO_IPV6; iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); iph->daddr = fl4.daddr; iph->saddr = fl4.saddr; if ((iph->ttl = tiph->ttl) == 0) iph->ttl = iph6->hop_limit; skb->ip_summed = CHECKSUM_NONE; ip_select_ident(skb, NULL); iptunnel_xmit(skb, dev); return NETDEV_TX_OK; tx_error_icmp: dst_link_failure(skb); tx_error: dev->stats.tx_errors++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static void ipip6_tunnel_bind_dev(struct net_device *dev) { struct net_device *tdev = NULL; struct ip_tunnel *tunnel; const struct iphdr *iph; struct flowi4 fl4; tunnel = netdev_priv(dev); iph = &tunnel->parms.iph; if (iph->daddr) { struct rtable *rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, iph->daddr, iph->saddr, 0, 0, IPPROTO_IPV6, RT_TOS(iph->tos), tunnel->parms.link); if (!IS_ERR(rt)) { tdev = rt->dst.dev; ip_rt_put(rt); } dev->flags |= IFF_POINTOPOINT; } if (!tdev && tunnel->parms.link) tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link); if (tdev) { dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); dev->mtu = tdev->mtu - sizeof(struct iphdr); if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; } dev->iflink = tunnel->parms.link; } static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) { struct net *net = dev_net(t->dev); struct sit_net *sitn = net_generic(net, sit_net_id); ipip6_tunnel_unlink(sitn, t); synchronize_net(); t->parms.iph.saddr = p->iph.saddr; t->parms.iph.daddr = p->iph.daddr; memcpy(t->dev->dev_addr, &p->iph.saddr, 4); memcpy(t->dev->broadcast, &p->iph.daddr, 4); ipip6_tunnel_link(sitn, t); t->parms.iph.ttl = p->iph.ttl; t->parms.iph.tos = p->iph.tos; if (t->parms.link != p->link) { t->parms.link = p->link; ipip6_tunnel_bind_dev(t->dev); } netdev_state_change(t->dev); } #ifdef CONFIG_IPV6_SIT_6RD static int ipip6_tunnel_update_6rd(struct ip_tunnel *t, struct ip_tunnel_6rd *ip6rd) { struct in6_addr prefix; __be32 relay_prefix; if (ip6rd->relay_prefixlen > 32 || ip6rd->prefixlen + (32 - ip6rd->relay_prefixlen) > 64) return -EINVAL; ipv6_addr_prefix(&prefix, &ip6rd->prefix, ip6rd->prefixlen); if (!ipv6_addr_equal(&prefix, &ip6rd->prefix)) return -EINVAL; if (ip6rd->relay_prefixlen) relay_prefix = ip6rd->relay_prefix & htonl(0xffffffffUL << (32 - ip6rd->relay_prefixlen)); else relay_prefix = 0; if (relay_prefix != ip6rd->relay_prefix) return -EINVAL; t->ip6rd.prefix = prefix; t->ip6rd.relay_prefix = relay_prefix; t->ip6rd.prefixlen = ip6rd->prefixlen; t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen; netdev_state_change(t->dev); return 0; } #endif static int ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) { int err = 0; struct ip_tunnel_parm p; struct ip_tunnel_prl prl; struct ip_tunnel *t; struct net *net = dev_net(dev); struct sit_net *sitn = net_generic(net, sit_net_id); #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd ip6rd; #endif switch (cmd) { case SIOCGETTUNNEL: #ifdef CONFIG_IPV6_SIT_6RD case SIOCGET6RD: #endif t = NULL; if (dev == sitn->fb_tunnel_dev) { if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { err = -EFAULT; break; } t = ipip6_tunnel_locate(net, &p, 0); } if (t == NULL) t = netdev_priv(dev); err = -EFAULT; if (cmd == SIOCGETTUNNEL) { memcpy(&p, &t->parms, sizeof(p)); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) goto done; #ifdef CONFIG_IPV6_SIT_6RD } else { ip6rd.prefix = t->ip6rd.prefix; ip6rd.relay_prefix = t->ip6rd.relay_prefix; ip6rd.prefixlen = t->ip6rd.prefixlen; ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen; if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd, sizeof(ip6rd))) goto done; #endif } err = 0; break; case SIOCADDTUNNEL: case SIOCCHGTUNNEL: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto done; err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) goto done; err = -EINVAL; if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 || p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF))) goto done; if (p.iph.ttl) p.iph.frag_off |= htons(IP_DF); t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL); if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { if (t != NULL) { if (t->dev != dev) { err = -EEXIST; break; } } else { if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) || (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) { err = -EINVAL; break; } t = netdev_priv(dev); } ipip6_tunnel_update(t, &p); } if (t) { err = 0; if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) err = -EFAULT; } else err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); break; case SIOCDELTUNNEL: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto done; if (dev == sitn->fb_tunnel_dev) { err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) goto done; err = -ENOENT; if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL) goto done; err = -EPERM; if (t == netdev_priv(sitn->fb_tunnel_dev)) goto done; dev = t->dev; } unregister_netdevice(dev); err = 0; break; case SIOCGETPRL: err = -EINVAL; if (dev == sitn->fb_tunnel_dev) goto done; err = -ENOENT; if (!(t = netdev_priv(dev))) goto done; err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data); break; case SIOCADDPRL: case SIOCDELPRL: case SIOCCHGPRL: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto done; err = -EINVAL; if (dev == sitn->fb_tunnel_dev) goto done; err = -EFAULT; if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl))) goto done; err = -ENOENT; if (!(t = netdev_priv(dev))) goto done; switch (cmd) { case SIOCDELPRL: err = ipip6_tunnel_del_prl(t, &prl); break; case SIOCADDPRL: case SIOCCHGPRL: err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL); break; } netdev_state_change(dev); break; #ifdef CONFIG_IPV6_SIT_6RD case SIOCADD6RD: case SIOCCHG6RD: case SIOCDEL6RD: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto done; err = -EFAULT; if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data, sizeof(ip6rd))) goto done; t = netdev_priv(dev); if (cmd != SIOCDEL6RD) { err = ipip6_tunnel_update_6rd(t, &ip6rd); if (err < 0) goto done; } else ipip6_tunnel_clone_6rd(dev, sitn); err = 0; break; #endif default: err = -EINVAL; } done: return err; } static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops ipip6_netdev_ops = { .ndo_uninit = ipip6_tunnel_uninit, .ndo_start_xmit = ipip6_tunnel_xmit, .ndo_do_ioctl = ipip6_tunnel_ioctl, .ndo_change_mtu = ipip6_tunnel_change_mtu, .ndo_get_stats64 = ip_tunnel_get_stats64, }; static void ipip6_dev_free(struct net_device *dev) { free_percpu(dev->tstats); free_netdev(dev); } static void ipip6_tunnel_setup(struct net_device *dev) { dev->netdev_ops = &ipip6_netdev_ops; dev->destructor = ipip6_dev_free; dev->type = ARPHRD_SIT; dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr); dev->flags = IFF_NOARP; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; dev->iflink = 0; dev->addr_len = 4; dev->features |= NETIF_F_NETNS_LOCAL; dev->features |= NETIF_F_LLTX; } static int ipip6_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); tunnel->dev = dev; memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); ipip6_tunnel_bind_dev(dev); dev->tstats = alloc_percpu(struct pcpu_tstats); if (!dev->tstats) return -ENOMEM; return 0; } static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct iphdr *iph = &tunnel->parms.iph; struct net *net = dev_net(dev); struct sit_net *sitn = net_generic(net, sit_net_id); tunnel->dev = dev; strcpy(tunnel->parms.name, dev->name); iph->version = 4; iph->protocol = IPPROTO_IPV6; iph->ihl = 5; iph->ttl = 64; dev->tstats = alloc_percpu(struct pcpu_tstats); if (!dev->tstats) return -ENOMEM; dev_hold(dev); rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); return 0; } static void ipip6_netlink_parms(struct nlattr *data[], struct ip_tunnel_parm *parms) { memset(parms, 0, sizeof(*parms)); parms->iph.version = 4; parms->iph.protocol = IPPROTO_IPV6; parms->iph.ihl = 5; parms->iph.ttl = 64; if (!data) return; if (data[IFLA_IPTUN_LINK]) parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); if (data[IFLA_IPTUN_LOCAL]) parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]); if (data[IFLA_IPTUN_REMOTE]) parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]); if (data[IFLA_IPTUN_TTL]) { parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]); if (parms->iph.ttl) parms->iph.frag_off = htons(IP_DF); } if (data[IFLA_IPTUN_TOS]) parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]); if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC])) parms->iph.frag_off = htons(IP_DF); if (data[IFLA_IPTUN_FLAGS]) parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]); } #ifdef CONFIG_IPV6_SIT_6RD /* This function returns true when 6RD attributes are present in the nl msg */ static bool ipip6_netlink_6rd_parms(struct nlattr *data[], struct ip_tunnel_6rd *ip6rd) { bool ret = false; memset(ip6rd, 0, sizeof(*ip6rd)); if (!data) return ret; if (data[IFLA_IPTUN_6RD_PREFIX]) { ret = true; nla_memcpy(&ip6rd->prefix, data[IFLA_IPTUN_6RD_PREFIX], sizeof(struct in6_addr)); } if (data[IFLA_IPTUN_6RD_RELAY_PREFIX]) { ret = true; ip6rd->relay_prefix = nla_get_be32(data[IFLA_IPTUN_6RD_RELAY_PREFIX]); } if (data[IFLA_IPTUN_6RD_PREFIXLEN]) { ret = true; ip6rd->prefixlen = nla_get_u16(data[IFLA_IPTUN_6RD_PREFIXLEN]); } if (data[IFLA_IPTUN_6RD_RELAY_PREFIXLEN]) { ret = true; ip6rd->relay_prefixlen = nla_get_u16(data[IFLA_IPTUN_6RD_RELAY_PREFIXLEN]); } return ret; } #endif static int ipip6_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net *net = dev_net(dev); struct ip_tunnel *nt; #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd ip6rd; #endif int err; nt = netdev_priv(dev); ipip6_netlink_parms(data, &nt->parms); if (ipip6_tunnel_locate(net, &nt->parms, 0)) return -EEXIST; err = ipip6_tunnel_create(dev); if (err < 0) return err; #ifdef CONFIG_IPV6_SIT_6RD if (ipip6_netlink_6rd_parms(data, &ip6rd)) err = ipip6_tunnel_update_6rd(nt, &ip6rd); #endif return err; } static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel *t; struct ip_tunnel_parm p; struct net *net = dev_net(dev); struct sit_net *sitn = net_generic(net, sit_net_id); #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd ip6rd; #endif if (dev == sitn->fb_tunnel_dev) return -EINVAL; ipip6_netlink_parms(data, &p); if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) || (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr)) return -EINVAL; t = ipip6_tunnel_locate(net, &p, 0); if (t) { if (t->dev != dev) return -EEXIST; } else t = netdev_priv(dev); ipip6_tunnel_update(t, &p); #ifdef CONFIG_IPV6_SIT_6RD if (ipip6_netlink_6rd_parms(data, &ip6rd)) return ipip6_tunnel_update_6rd(t, &ip6rd); #endif return 0; } static size_t ipip6_get_size(const struct net_device *dev) { return /* IFLA_IPTUN_LINK */ nla_total_size(4) + /* IFLA_IPTUN_LOCAL */ nla_total_size(4) + /* IFLA_IPTUN_REMOTE */ nla_total_size(4) + /* IFLA_IPTUN_TTL */ nla_total_size(1) + /* IFLA_IPTUN_TOS */ nla_total_size(1) + /* IFLA_IPTUN_PMTUDISC */ nla_total_size(1) + /* IFLA_IPTUN_FLAGS */ nla_total_size(2) + #ifdef CONFIG_IPV6_SIT_6RD /* IFLA_IPTUN_6RD_PREFIX */ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_IPTUN_6RD_RELAY_PREFIX */ nla_total_size(4) + /* IFLA_IPTUN_6RD_PREFIXLEN */ nla_total_size(2) + /* IFLA_IPTUN_6RD_RELAY_PREFIXLEN */ nla_total_size(2) + #endif 0; } static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct ip_tunnel_parm *parm = &tunnel->parms; if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) || nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) || nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) || nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) || nla_put_u8(skb, IFLA_IPTUN_PMTUDISC, !!(parm->iph.frag_off & htons(IP_DF))) || nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags)) goto nla_put_failure; #ifdef CONFIG_IPV6_SIT_6RD if (nla_put(skb, IFLA_IPTUN_6RD_PREFIX, sizeof(struct in6_addr), &tunnel->ip6rd.prefix) || nla_put_be32(skb, IFLA_IPTUN_6RD_RELAY_PREFIX, tunnel->ip6rd.relay_prefix) || nla_put_u16(skb, IFLA_IPTUN_6RD_PREFIXLEN, tunnel->ip6rd.prefixlen) || nla_put_u16(skb, IFLA_IPTUN_6RD_RELAY_PREFIXLEN, tunnel->ip6rd.relay_prefixlen)) goto nla_put_failure; #endif return 0; nla_put_failure: return -EMSGSIZE; } static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = { [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, [IFLA_IPTUN_LOCAL] = { .type = NLA_U32 }, [IFLA_IPTUN_REMOTE] = { .type = NLA_U32 }, [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, [IFLA_IPTUN_TOS] = { .type = NLA_U8 }, [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 }, [IFLA_IPTUN_FLAGS] = { .type = NLA_U16 }, #ifdef CONFIG_IPV6_SIT_6RD [IFLA_IPTUN_6RD_PREFIX] = { .len = sizeof(struct in6_addr) }, [IFLA_IPTUN_6RD_RELAY_PREFIX] = { .type = NLA_U32 }, [IFLA_IPTUN_6RD_PREFIXLEN] = { .type = NLA_U16 }, [IFLA_IPTUN_6RD_RELAY_PREFIXLEN] = { .type = NLA_U16 }, #endif }; static void ipip6_dellink(struct net_device *dev, struct list_head *head) { struct net *net = dev_net(dev); struct sit_net *sitn = net_generic(net, sit_net_id); if (dev != sitn->fb_tunnel_dev) unregister_netdevice_queue(dev, head); } static struct rtnl_link_ops sit_link_ops __read_mostly = { .kind = "sit", .maxtype = IFLA_IPTUN_MAX, .policy = ipip6_policy, .priv_size = sizeof(struct ip_tunnel), .setup = ipip6_tunnel_setup, .newlink = ipip6_newlink, .changelink = ipip6_changelink, .get_size = ipip6_get_size, .fill_info = ipip6_fill_info, .dellink = ipip6_dellink, }; static struct xfrm_tunnel sit_handler __read_mostly = { .handler = ipip6_rcv, .err_handler = ipip6_err, .priority = 1, }; static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) { int prio; for (prio = 1; prio < 4; prio++) { int h; for (h = 0; h < HASH_SIZE; h++) { struct ip_tunnel *t; t = rtnl_dereference(sitn->tunnels[prio][h]); while (t != NULL) { unregister_netdevice_queue(t->dev, head); t = rtnl_dereference(t->next); } } } } static int __net_init sit_init_net(struct net *net) { struct sit_net *sitn = net_generic(net, sit_net_id); struct ip_tunnel *t; int err; sitn->tunnels[0] = sitn->tunnels_wc; sitn->tunnels[1] = sitn->tunnels_l; sitn->tunnels[2] = sitn->tunnels_r; sitn->tunnels[3] = sitn->tunnels_r_l; sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0", ipip6_tunnel_setup); if (!sitn->fb_tunnel_dev) { err = -ENOMEM; goto err_alloc_dev; } dev_net_set(sitn->fb_tunnel_dev, net); sitn->fb_tunnel_dev->rtnl_link_ops = &sit_link_ops; err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev); if (err) goto err_dev_free; ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn); if ((err = register_netdev(sitn->fb_tunnel_dev))) goto err_reg_dev; t = netdev_priv(sitn->fb_tunnel_dev); strcpy(t->parms.name, sitn->fb_tunnel_dev->name); return 0; err_reg_dev: dev_put(sitn->fb_tunnel_dev); err_dev_free: ipip6_dev_free(sitn->fb_tunnel_dev); err_alloc_dev: return err; } static void __net_exit sit_exit_net(struct net *net) { struct sit_net *sitn = net_generic(net, sit_net_id); LIST_HEAD(list); rtnl_lock(); sit_destroy_tunnels(sitn, &list); unregister_netdevice_queue(sitn->fb_tunnel_dev, &list); unregister_netdevice_many(&list); rtnl_unlock(); } static struct pernet_operations sit_net_ops = { .init = sit_init_net, .exit = sit_exit_net, .id = &sit_net_id, .size = sizeof(struct sit_net), }; static void __exit sit_cleanup(void) { rtnl_link_unregister(&sit_link_ops); xfrm4_tunnel_deregister(&sit_handler, AF_INET6); unregister_pernet_device(&sit_net_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ } static int __init sit_init(void) { int err; pr_info("IPv6 over IPv4 tunneling driver\n"); err = register_pernet_device(&sit_net_ops); if (err < 0) return err; err = xfrm4_tunnel_register(&sit_handler, AF_INET6); if (err < 0) { pr_info("%s: can't add protocol\n", __func__); goto xfrm_tunnel_failed; } err = rtnl_link_register(&sit_link_ops); if (err < 0) goto rtnl_link_failed; out: return err; rtnl_link_failed: xfrm4_tunnel_deregister(&sit_handler, AF_INET6); xfrm_tunnel_failed: unregister_pernet_device(&sit_net_ops); goto out; } module_init(sit_init); module_exit(sit_cleanup); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("sit"); MODULE_ALIAS_NETDEV("sit0");
gpl-2.0
SiliconAcid/android_kernel_lge_g3
drivers/staging/prima/CORE/MAC/src/pe/lim/limLogDump.c
622
94790
/* * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /*============================================================================ limLogDump.c Implements the dump commands specific to the lim module. Copyright (c) 2007 QUALCOMM Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary ============================================================================*/ #include "vos_types.h" #include "limApi.h" #if defined(ANI_LOGDUMP) #include "limUtils.h" #include "limSecurityUtils.h" #include "schApi.h" #include "limSerDesUtils.h" #include "limAssocUtils.h" #include "limSendMessages.h" #include "logDump.h" #include "limTrace.h" #if defined WLAN_FEATURE_VOWIFI #include "rrmApi.h" #endif #if defined WLAN_FEATURE_VOWIFI_11R #include <limFT.h> #endif #include "smeInside.h" #include "wlan_qct_wda.h" #include "wlan_qct_wdi_dts.h" void WDA_TimerTrafficStatsInd(tWDA_CbContext *pWDA); #ifdef WLANTL_DEBUG extern void WLANTLPrintPktsRcvdPerRssi(v_PVOID_t pAdapter, v_U8_t staId, v_BOOL_t flush); extern void WLANTLPrintPktsRcvdPerRateIdx(v_PVOID_t pAdapter, v_U8_t staId, v_BOOL_t flush); #endif static char *getRole( tLimSystemRole role ) { switch (role) { case eLIM_UNKNOWN_ROLE: return "eLIM_UNKNOWN_ROLE"; case eLIM_AP_ROLE: return "eLIM_AP_ROLE"; case eLIM_STA_IN_IBSS_ROLE: return "eLIM_STA_IN_IBSS_ROLE"; case eLIM_STA_ROLE: return "eLIM_STA_ROLE"; case eLIM_BT_AMP_STA_ROLE: return "eLIM_BT_AMP_STA_ROLE"; case eLIM_BT_AMP_AP_ROLE: return "eLIM_BT_AMP_AP_ROLE"; default: return "UNKNOWN"; } } char *dumpLim( tpAniSirGlobal pMac, char *p, tANI_U32 sessionId) { #ifdef FIXME_GEN6 //iterate through the sessionTable and dump sta entries for each session. //Keep this code under 'WLAN_DEBUG' compile flag. tANI_U16 i, j; tpPESession psessionEntry = peFindSessionBySessionId(pMac, sessionId); if (psessionEntry == NULL) { p += log_sprintf( pMac, p, "Invalid sessionId: %d \n ", sessionId); return p; } p += log_sprintf( pMac,p, "\n ----- LIM Debug Information ----- \n"); p += log_sprintf( pMac,p, "LIM Role = (%d) %s\n", pMac->lim.gLimSystemRole, getRole(pMac->lim.gLimSystemRole)); p += log_sprintf( pMac,p, "SME State = (%d) %s", pMac->lim.gLimSmeState, limSmeStateStr(pMac->lim.gLimSmeState)); p += log_sprintf( pMac,p, "MLM State = (%d) %s", pMac->lim.gLimMlmState, limMlmStateStr(pMac->lim.gLimMlmState)); p += log_sprintf( pMac,p, "802.11n session HT Capability: %s\n", (psessionEntry->htCapability == 1) ? "Enabled" : "Disabled"); p += log_sprintf( pMac,p, "gLimProcessDefdMsgs: %s\n", (pMac->lim.gLimProcessDefdMsgs == 1) ? "Enabled" : "Disabled"); if (pMac->lim.gLimSystemRole == eLIM_STA_ROLE) { p += log_sprintf( pMac,p, "AID = %X\t\t\n", pMac->lim.gLimAID); p += log_sprintf( pMac,p, "SSID mismatch in Beacon Count = %d\n", pMac->lim.gLimBcnSSIDMismatchCnt); p += log_sprintf( pMac,p, "Number of link establishments = %d\n", pMac->lim.gLimNumLinkEsts); } else if (pMac->lim.gLimSystemRole == eLIM_AP_ROLE) { p += log_sprintf( pMac,p, "Num of STAs associated = %d\n", peGetCurrentSTAsCount(pMac)); p += log_sprintf( pMac,p, "Num of Pre-auth contexts = %d\n", pMac->lim.gLimNumPreAuthContexts); p += log_sprintf( pMac,p, "Num of AssocReq dropped in invalid State = %d\n", pMac->lim.gLimNumAssocReqDropInvldState); p += log_sprintf( pMac,p, "Num of ReassocReq dropped in invalid State = %d\n", pMac->lim.gLimNumReassocReqDropInvldState); p += log_sprintf( pMac,p, "Num of Hash Miss Event ignored = %d\n", pMac->lim.gLimNumHashMissIgnored); } p += log_sprintf( pMac,p, "Num of RxCleanup Count = %d\n", pMac->lim.gLimNumRxCleanup); p += log_sprintf( pMac,p, "Unexpected Beacon Count = %d\n", pMac->lim.gLimUnexpBcnCnt); p += log_sprintf( pMac,p, "Number of Re/Assoc rejects of 11b STAs = %d\n", pMac->lim.gLim11bStaAssocRejectCount); p += log_sprintf( pMac,p, "No. of HeartBeat Failures in LinkEst State = %d\n", pMac->lim.gLimHBfailureCntInLinkEstState); p += log_sprintf( pMac,p, "No. of Probe Failures after HB failed = %d\n", pMac->lim.gLimProbeFailureAfterHBfailedCnt); p += log_sprintf( pMac,p, "No. of HeartBeat Failures in Other States = %d\n", pMac->lim.gLimHBfailureCntInOtherStates); p += log_sprintf( pMac,p, "No. of Beacons Rxed During HB Interval = %d\n", pMac->lim.gLimRxedBeaconCntDuringHB); p += log_sprintf( pMac,p, "Self Operating Mode = %s\n", limDot11ModeStr(pMac, (tANI_U8)pMac->lim.gLimDot11Mode)); p += log_sprintf( pMac,p, "\n"); if (pMac->lim.gLimSystemRole == eLIM_AP_ROLE) i = 2; else i = 1; for (; i< pMac->lim.maxStation; i++) { tpDphHashNode pSta = dphGetHashEntry(pMac, (unsigned short)i); if (pSta && pSta->added) { p += log_sprintf( pMac,p, "\nSTA AID: %d STA ID: %d Valid: %d AuthType: %d MLM State: %s", i, pSta->staIndex, pSta->valid, pSta->mlmStaContext.authType, limMlmStateStr(pSta->mlmStaContext.mlmState)); p += log_sprintf( pMac,p, "\tAID:%-2d OpRateMode:%s ShPrmbl:%d HT:%d GF:%d TxChWidth:%d MimoPS:%d LsigProt:%d\n", pSta->assocId, limStaOpRateModeStr(pSta->supportedRates.opRateMode), pSta->shortPreambleEnabled, pSta->mlmStaContext.htCapability, pSta->htGreenfield, pSta->htSupportedChannelWidthSet, pSta->htMIMOPSState, pSta->htLsigTXOPProtection); p += log_sprintf( pMac,p, "\tAMPDU [MaxSz(Factor):%d, Dens: %d] AMSDU-MaxLen: %d\n", pSta->htMaxRxAMpduFactor, pSta->htAMpduDensity,pSta->htMaxAmsduLength); p += log_sprintf( pMac,p, "\tDSSCCkMode40Mhz: %d, SGI20: %d, SGI40: %d\n", pSta->htDsssCckRate40MHzSupport, pSta->htShortGI20Mhz, pSta->htShortGI40Mhz); p += log_sprintf( pMac,p, "\t11b Rates: "); for(j=0; j<SIR_NUM_11B_RATES; j++) if(pSta->supportedRates.llbRates[j] > 0) p += log_sprintf( pMac,p, "%d ", pSta->supportedRates.llbRates[j]); p += log_sprintf( pMac,p, "\n\t11a Rates: "); for(j=0; j<SIR_NUM_11A_RATES; j++) if(pSta->supportedRates.llaRates[j] > 0) p += log_sprintf( pMac,p, "%d ", pSta->supportedRates.llaRates[j]); p += log_sprintf( pMac,p, "\n\tPolaris Rates: "); for(j=0; j<SIR_NUM_POLARIS_RATES; j++) if(pSta->supportedRates.aniLegacyRates[j] > 0) p += log_sprintf( pMac,p, "%d ", pSta->supportedRates.aniLegacyRates[j]); p += log_sprintf( pMac,p, "\n\tTitan and Taurus Proprietary Rate Bitmap: %08x\n", pSta->supportedRates.aniEnhancedRateBitmap); p += log_sprintf( pMac,p, "\tMCS Rate Set Bitmap: "); for(j=0; j<SIR_MAC_MAX_SUPPORTED_MCS_SET; j++) p += log_sprintf( pMac,p, "%x ", pSta->supportedRates.supportedMCSSet[j]); } } p += log_sprintf( pMac,p, "\nProbe response disable = %d\n", pMac->lim.gLimProbeRespDisableFlag); p += log_sprintf( pMac,p, "Scan mode enable = %d\n", pMac->sys.gSysEnableScanMode); p += log_sprintf( pMac,p, "BackgroundScanDisable = %d\n", pMac->lim.gLimBackgroundScanDisable); p += log_sprintf( pMac,p, "ForceBackgroundScanDisable = %d\n", pMac->lim.gLimForceBackgroundScanDisable); p += log_sprintf( pMac,p, "LinkMonitor mode enable = %d\n", pMac->sys.gSysEnableLinkMonitorMode); p += log_sprintf( pMac,p, "Qos Capable = %d\n", SIR_MAC_GET_QOS(pMac->lim.gLimCurrentBssCaps)); p += log_sprintf( pMac,p, "Wme Capable = %d\n", LIM_BSS_CAPS_GET(WME, pMac->lim.gLimCurrentBssQosCaps)); p += log_sprintf( pMac,p, "Wsm Capable = %d\n", LIM_BSS_CAPS_GET(WSM, pMac->lim.gLimCurrentBssQosCaps)); if (pMac->lim.gLimSystemRole == eLIM_STA_IN_IBSS_ROLE) { p += log_sprintf( pMac,p, "Number of peers in IBSS = %d\n", pMac->lim.gLimNumIbssPeers); if (pMac->lim.gLimNumIbssPeers) { tLimIbssPeerNode *pTemp; pTemp = pMac->lim.gLimIbssPeerList; p += log_sprintf( pMac,p, "MAC-Addr Ani Edca WmeInfo HT Caps #S,#E(Rates)\n"); while (pTemp != NULL) { p += log_sprintf( pMac,p, "%02X:%02X:%02X:%02X:%02X:%02X ", pTemp->peerMacAddr[0], pTemp->peerMacAddr[1], pTemp->peerMacAddr[2], pTemp->peerMacAddr[3], pTemp->peerMacAddr[4], pTemp->peerMacAddr[5]); p += log_sprintf( pMac,p, " %d %d,%d %d %d %04X %d,%d\n", pTemp->aniIndicator, pTemp->edcaPresent, pTemp->wmeEdcaPresent, pTemp->wmeInfoPresent, pTemp->htCapable, pTemp->capabilityInfo, pTemp->supportedRates.numRates, pTemp->extendedRates.numRates); pTemp = pTemp->next; } } } p += log_sprintf( pMac,p, "System Scan/Learn Mode bit = %d\n", pMac->lim.gLimSystemInScanLearnMode); p += log_sprintf( pMac,p, "Scan override = %d\n", pMac->lim.gLimScanOverride); p += log_sprintf( pMac,p, "CB State protection = %d\n", pMac->lim.gLimCBStateProtection); p += log_sprintf( pMac,p, "Count of Titan STA's = %d\n", pMac->lim.gLimTitanStaCount); //current BSS capability p += log_sprintf( pMac,p, "**********Current BSS Capability********\n"); p += log_sprintf( pMac,p, "Ess = %d, ", SIR_MAC_GET_ESS(pMac->lim.gLimCurrentBssCaps)); p += log_sprintf( pMac,p, "Privacy = %d, ", SIR_MAC_GET_PRIVACY(pMac->lim.gLimCurrentBssCaps)); p += log_sprintf( pMac,p, "Short Preamble = %d, ", SIR_MAC_GET_SHORT_PREAMBLE(pMac->lim.gLimCurrentBssCaps)); p += log_sprintf( pMac,p, "Short Slot = %d, ", SIR_MAC_GET_SHORT_SLOT_TIME(pMac->lim.gLimCurrentBssCaps)); p += log_sprintf( pMac,p, "Qos = %d\n", SIR_MAC_GET_QOS(pMac->lim.gLimCurrentBssCaps)); //Protection related information p += log_sprintf( pMac,p, "*****Protection related information******\n"); p += log_sprintf( pMac,p, "Protection %s\n", pMac->lim.gLimProtectionControl ? "Enabled" : "Disabled"); p += log_sprintf( pMac,p, "OBSS MODE = %d\n", pMac->lim.gHTObssMode); p += log_sprintf( pMac, p, "HT operating Mode = %d, llbCoexist = %d, llgCoexist = %d, ht20Coexist = %d, nonGfPresent = %d, RifsMode = %d, lsigTxop = %d\n", pMac->lim.gHTOperMode, pMac->lim.llbCoexist, pMac->lim.llgCoexist, pMac->lim.ht20MhzCoexist, pMac->lim.gHTNonGFDevicesPresent, pMac->lim.gHTRifsMode, pMac->lim.gHTLSigTXOPFullSupport); p += log_sprintf(pMac, p, "2nd Channel offset = %d\n", psessionEntry->hHTSecondaryChannelOffset); #endif return p; } /******************************************* * FUNCTION: triggerBeaconGen() * * This logdump sends SIR_SCH_BEACON_GEN_IND to SCH. * SCH then proceeds to generate a beacon template * and copy it to the Host/SoftMAC shared memory * * TODO - This routine can safely be deleted once * beacon generation is working ******************************************/ char *triggerBeaconGen( tpAniSirGlobal pMac, char *p ) { tSirMsgQ mesg = { (tANI_U16) SIR_LIM_BEACON_GEN_IND, (tANI_U16) 0, (tANI_U32) 0 }; pMac->lim.gLimSmeState = eLIM_SME_NORMAL_STATE; MTRACE(macTrace(pMac, TRACE_CODE_SME_STATE, NO_SESSION, pMac->lim.gLimSmeState)); pMac->lim.gLimSystemRole = eLIM_AP_ROLE; p += log_sprintf( pMac, p, "Posted SIR_LIM_BEACON_GEN_IND with result = %s\n", (eSIR_SUCCESS == limPostMsgApi( pMac, &mesg ))? "Success": "Failure" ); return p; } /******************************************* * FUNCTION: testLimSendProbeRsp() * * This logdump sends SIR_MAC_MGMT_PROBE_RSP * * TODO - This routine can safely be deleted once * the MGMT frame transmission is working ******************************************/ char *testLimSendProbeRsp( tpAniSirGlobal pMac, char *p ) { tSirMacAddr peerMacAddr = { 0, 1, 2, 3, 4, 5 }; tAniSSID ssId; tANI_U32 len = SIR_MAC_MAX_SSID_LENGTH; tpPESession psessionEntry = &pMac->lim.gpSession[0]; //TBD-RAJESH HOW TO GET sessionEntry????? if( eSIR_SUCCESS != wlan_cfgGetStr( pMac, WNI_CFG_SSID, (tANI_U8 *) &ssId.ssId, (tANI_U32 *) &len )) { // Could not get SSID from CFG. Log error. p += log_sprintf( pMac, p, "Unable to retrieve SSID\n" ); return p; } else ssId.length = (tANI_U8) len; p += log_sprintf( pMac, p, "Calling limSendProbeRspMgmtFrame...\n" ); limSendProbeRspMgmtFrame( pMac, peerMacAddr, &ssId, -1, 1, psessionEntry , 0); return p; } static char *sendSmeScanReq(tpAniSirGlobal pMac, char *p) { tSirMsgQ msg; tSirSmeScanReq scanReq, *pScanReq; p += log_sprintf( pMac,p, "sendSmeScanReq: Preparing eWNI_SME_SCAN_REQ message\n"); pScanReq = (tSirSmeScanReq *) &scanReq; pScanReq = vos_mem_malloc(sizeof(tSirSmeScanReq)); if (NULL == pScanReq) { p += log_sprintf( pMac,p,"sendSmeScanReq: AllocateMemory() failed \n"); return p; } pScanReq->messageType = eWNI_SME_SCAN_REQ; pScanReq->minChannelTime = 30; pScanReq->maxChannelTime = 130; pScanReq->bssType = eSIR_INFRASTRUCTURE_MODE; limGetMyMacAddr(pMac, pScanReq->bssId); pScanReq->numSsid = 1; vos_mem_copy((void *) &pScanReq->ssId[0].ssId, (void *)"Ivan", 4); pScanReq->ssId[0].length = 4; pScanReq->scanType = eSIR_ACTIVE_SCAN; pScanReq->returnAfterFirstMatch = 0; pScanReq->returnUniqueResults = 0; pScanReq->returnFreshResults = SIR_BG_SCAN_PURGE_RESUTLS|SIR_BG_SCAN_RETURN_FRESH_RESULTS; pScanReq->channelList.numChannels = 1; pScanReq->channelList.channelNumber[0] = 6; pScanReq->uIEFieldLen = 0; pScanReq->uIEFieldOffset = sizeof(tSirSmeScanReq); pScanReq->sessionId = 0; msg.type = eWNI_SME_SCAN_REQ; msg.bodyptr = pScanReq; msg.bodyval = 0; p += log_sprintf( pMac,p, "sendSmeScanReq: limPostMsgApi(eWNI_SME_SCAN_REQ) \n"); limPostMsgApi(pMac, &msg); return p; } static char *sendSmeDisAssocReq(tpAniSirGlobal pMac, char *p,tANI_U32 arg1 ,tANI_U32 arg2) { tpDphHashNode pStaDs; tSirMsgQ msg; tSirSmeDisassocReq *pDisAssocReq; tpPESession psessionEntry; //arg1 - assocId //arg2 - sessionId if( arg1 < 1 ) { p += log_sprintf( pMac,p,"Invalid session OR Assoc ID \n"); return p; } if((psessionEntry = peFindSessionBySessionId(pMac,(tANI_U8)arg2) )== NULL) { p += log_sprintf( pMac,p,"Session does not exist for given session Id \n"); return p; } pStaDs = dphGetHashEntry(pMac, (tANI_U16)arg1, &psessionEntry->dph.dphHashTable); if(NULL == pStaDs) { p += log_sprintf( pMac,p, "Could not find station with assocId = %d\n", arg1); return p; } pDisAssocReq = vos_mem_malloc(sizeof(tSirSmeDisassocReq)); if (NULL == pDisAssocReq) { p += log_sprintf( pMac,p,"sendSmeDisAssocReq: AllocateMemory() failed \n"); return p; } if( ( (psessionEntry->limSystemRole == eLIM_STA_ROLE) || (psessionEntry ->limSystemRole == eLIM_BT_AMP_STA_ROLE) ) && (psessionEntry->statypeForBss == STA_ENTRY_PEER)) { sirCopyMacAddr(pDisAssocReq->bssId,psessionEntry->bssId); sirCopyMacAddr(pDisAssocReq->peerMacAddr,psessionEntry->bssId); } if((psessionEntry->limSystemRole == eLIM_BT_AMP_AP_ROLE) || (psessionEntry->limSystemRole == eLIM_AP_ROLE) ) { sirCopyMacAddr(pDisAssocReq->peerMacAddr,pStaDs->staAddr); sirCopyMacAddr(pDisAssocReq->bssId,psessionEntry->bssId); } pDisAssocReq->messageType = eWNI_SME_DISASSOC_REQ; pDisAssocReq->length = sizeof(tSirSmeDisassocReq); pDisAssocReq->reasonCode = eSIR_MAC_UNSPEC_FAILURE_REASON; pDisAssocReq->sessionId = 0; pDisAssocReq->transactionId = 0; msg.type = eWNI_SME_DISASSOC_REQ; msg.bodyptr = pDisAssocReq; msg.bodyval = 0; p += log_sprintf( pMac,p, "sendSmeDisAssocReq: limPostMsgApi(eWNI_SME_DISASSOC_REQ) \n"); limPostMsgApi(pMac, &msg); return p; } static char *sendSmeStartBssReq(tpAniSirGlobal pMac, char *p,tANI_U32 arg1) { tSirMsgQ msg; tSirSmeStartBssReq *pStartBssReq; unsigned char *pBuf; ePhyChanBondState cbMode; tSirNwType nwType; p += log_sprintf( pMac,p, "sendSmeStartBssReq: Preparing eWNI_SME_START_BSS_REQ message\n"); if(arg1 > 2) { p += log_sprintf( pMac,p,"Invalid Argument1 \n"); return p; } pStartBssReq = vos_mem_malloc(sizeof(tSirSmeStartBssReq)); if (NULL == pStartBssReq) { p += log_sprintf( pMac,p,"sendSmeStartBssReq: AllocateMemory() failed \n"); return p; } pStartBssReq->messageType = eWNI_SME_START_BSS_REQ; pStartBssReq->length = 29; // 0x1d if(arg1 == 0) //BTAMP STATION { pStartBssReq->bssType = eSIR_BTAMP_STA_MODE; pStartBssReq->ssId.length = 5; vos_mem_copy((void *) &pStartBssReq->ssId.ssId, (void *)"BTSTA", 5); } else if(arg1 == 1) //BTAMP AP { pStartBssReq->bssType = eSIR_BTAMP_AP_MODE; pStartBssReq->ssId.length = 4; vos_mem_copy((void *) &pStartBssReq->ssId.ssId, (void *)"BTAP", 4); } else //IBSS { pStartBssReq->bssType = eSIR_IBSS_MODE; pStartBssReq->ssId.length = 4; vos_mem_copy((void *) &pStartBssReq->ssId.ssId, (void *)"Ibss", 4); } // Filling in channel ID 6 pBuf = &(pStartBssReq->ssId.ssId[pStartBssReq->ssId.length]); *pBuf = 6; pBuf++; // Filling in CB mode cbMode = PHY_SINGLE_CHANNEL_CENTERED; vos_mem_copy(pBuf, (tANI_U8 *)&cbMode, sizeof(ePhyChanBondState)); pBuf += sizeof(ePhyChanBondState); // Filling in RSN IE Length to zero vos_mem_set(pBuf, sizeof(tANI_U16), 0); //tSirRSNie->length pBuf += sizeof(tANI_U16); // Filling in NW Type nwType = eSIR_11G_NW_TYPE; vos_mem_copy(pBuf, (tANI_U8 *)&nwType, sizeof(tSirNwType)); pBuf += sizeof(tSirNwType); /* ---- To be filled by LIM later ---- pStartBssReq->operationalRateSet pStartBssReq->extendedRateSet pStartBssReq->dot11mode pStartBssReq->bssId pStartBssReq->selfMacAddr pStartBssReq->beaconInterval pStartBssReq->sessionId = 0; pStartBssReq->transactionId = 0; * ------------------------------------ */ msg.type = eWNI_SME_START_BSS_REQ; msg.bodyptr = pStartBssReq; msg.bodyval = 0; p += log_sprintf( pMac,p, "sendSmeStartBssReq: limPostMsgApi(eWNI_SME_START_BSS_REQ) \n"); limPostMsgApi(pMac, &msg); return p; } static char *sendSmeStopBssReq(tpAniSirGlobal pMac, char *p, tANI_U32 sessionId) { tSirMsgQ msg; tSirSmeStopBssReq stopBssReq, *pStopBssReq; tANI_U16 msgLen = 0; tpPESession psessionEntry; psessionEntry = peFindSessionBySessionId(pMac, (tANI_U8)sessionId); if ( psessionEntry == NULL ) { limLog(pMac, LOGP, FL("Session entry does not exist for given sessionID \n")); return p; } p += log_sprintf( pMac,p, "sendSmeStopBssReq: Preparing eWNI_SME_STOP_BSS_REQ message\n"); pStopBssReq = (tSirSmeStopBssReq *) &stopBssReq; pStopBssReq = vos_mem_malloc(sizeof(tSirSmeStopBssReq)); if (NULL == pStopBssReq) { p += log_sprintf( pMac,p,"sendSmeStopBssReq: AllocateMemory() failed \n"); return p; } pStopBssReq->messageType = eWNI_SME_STOP_BSS_REQ; msgLen += sizeof(tANI_U32); // msgType + length pStopBssReq->reasonCode = eSIR_SME_SUCCESS; msgLen += sizeof(tSirResultCodes); vos_mem_copy((void *) &pStopBssReq->bssId, (void *)psessionEntry->bssId, 6); msgLen += sizeof(tSirMacAddr); pStopBssReq->sessionId = (tANI_U8)sessionId; msgLen += sizeof(tANI_U8); pStopBssReq->transactionId = 0; msgLen += sizeof(tANI_U16); pStopBssReq->length = msgLen; msg.type = eWNI_SME_STOP_BSS_REQ; msg.bodyptr = pStopBssReq; msg.bodyval = 0; p += log_sprintf( pMac,p, "sendSmeStopBssReq: limPostMsgApi(eWNI_SME_STOP_BSS_REQ) \n"); limPostMsgApi(pMac, &msg); return p; } static char *sendSmeJoinReq(tpAniSirGlobal pMac, char *p) { tSirMsgQ msg; tSirSmeJoinReq *pJoinReq; unsigned char *pBuf; tANI_U16 msgLen = 307; tANI_U8 msgDump[307] = { 0x06, 0x12, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x01, 0x00, 0xDE, 0xAD, 0xBA, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x21, 0x04, 0x02, 0x00, 0x00, 0x00, 0x01, 0x1E, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0xA8, 0x85, 0x4F, 0x7A, 0x00, 0x06, 0x41, 0x6E, 0x69, 0x4E, 0x65, 0x74, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96, 0x03, 0x01, 0x06, 0x07, 0x06, 0x55, 0x53, 0x49, 0x01, 0x0E, 0x1E, 0x2A, 0x01, 0x00, 0x32, 0x08, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x2D, 0x1A, 0xEE, 0x11, 0x03, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3D, 0x16, 0x06, 0x07, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDD, 0x18, 0x00, 0x50, 0xF2, 0x02, 0x01, 0x01, 0x01, 0x00, 0x03, 0xA4, 0x00, 0x00, 0x27, 0xA4, 0x00, 0x00, 0x42, 0x43, 0x5E, 0x00, 0x62, 0x32, 0x2F, 0x00, 0xDD, 0x14, 0x00, 0x0A, 0xF5, 0x00, 0x03, 0x01, 0x03, 0x05, 0x0A, 0x02, 0x80, 0xC0, 0x12, 0x06, 0xFF, 0xFF, 0xFF, 0xFF, 0xB6, 0x0D, 0xDD, 0x6E, 0x00, 0x50, 0xF2, 0x04, 0x10, 0x4A, 0x00, 0x01, 0x10, 0x10, 0x44, 0x00, 0x01, 0x01, 0x10, 0x3B, 0x00, 0x01, 0x03, 0x10, 0x47, 0x00, 0x10, 0xDB, 0xC6, 0x77, 0x28, 0xB9, 0xF3, 0xD8, 0x58, 0x86, 0xFF, 0xFC, 0x6B, 0xB6, 0xB9, 0x27, 0x79, 0x10, 0x21, 0x00, 0x08, 0x51, 0x75, 0x61, 0x6C, 0x63, 0x6F, 0x6D, 0x6D, 0x10, 0x23, 0x00, 0x07, 0x57, 0x46, 0x52, 0x34, 0x30, 0x33, 0x31, 0x10, 0x24, 0x00, 0x06, 0x4D, 0x4E, 0x31, 0x32, 0x33, 0x34, 0x10, 0x42, 0x00, 0x06, 0x53, 0x4E, 0x31, 0x32, 0x33, 0x34, 0x10, 0x54, 0x00, 0x08, 0x00, 0x06, 0x00, 0x50, 0xF2, 0x04, 0x00, 0x01, 0x10, 0x11, 0x00, 0x06, 0x31, 0x31, 0x6E, 0x2D, 0x41, 0x50, 0x10, 0x08, 0x00, 0x02, 0x01, 0x8E }; pJoinReq = vos_mem_malloc(msgLen); if (NULL == pJoinReq) { p += log_sprintf( pMac,p,"sendSmeJoinReq: AllocateMemory() failed \n"); return p; } pBuf = (unsigned char *)pJoinReq; vos_mem_copy(pBuf, (tANI_U8 *)msgDump, msgLen); msg.type = eWNI_SME_JOIN_REQ; msg.bodyptr = pJoinReq; msg.bodyval = 0; limPostMsgApi(pMac, &msg); return p; } static char *printSessionInfo(tpAniSirGlobal pMac, char *p) { tpPESession psessionEntry = &pMac->lim.gpSession[0]; tANI_U8 i; p += log_sprintf( pMac, p, "Dump PE Session \n"); for(i=0; i < pMac->lim.maxBssId; i++) { if( pMac->lim.gpSession[i].valid ) { psessionEntry = &pMac->lim.gpSession[i]; p += log_sprintf( pMac,p, "*****************************************\n"); p += log_sprintf( pMac,p, " PE Session [%d] \n", i); p += log_sprintf( pMac,p, "available: %d \n", psessionEntry->available); p += log_sprintf( pMac,p, "peSessionId: %d, smeSessionId: %d, transactionId: %d \n", psessionEntry->peSessionId, psessionEntry->smeSessionId, psessionEntry->smeSessionId); p += log_sprintf( pMac,p, "bssId: %02X:%02X:%02X:%02X:%02X:%02X \n", psessionEntry->bssId[0], psessionEntry->bssId[1], psessionEntry->bssId[2], psessionEntry->bssId[3], psessionEntry->bssId[4], psessionEntry->bssId[5]); p += log_sprintf( pMac,p, "selfMacAddr: %02X:%02X:%02X:%02X:%02X:%02X \n", psessionEntry->selfMacAddr[0], psessionEntry->selfMacAddr[1], psessionEntry->selfMacAddr[2], psessionEntry->selfMacAddr[3], psessionEntry->selfMacAddr[4], psessionEntry->selfMacAddr[5]); p += log_sprintf( pMac,p, "bssIdx: %d \n", psessionEntry->bssIdx); p += log_sprintf( pMac,p, "valid: %d \n", psessionEntry->valid); p += log_sprintf( pMac,p, "limMlmState: (%d) %s ", psessionEntry->limMlmState, limMlmStateStr(psessionEntry->limMlmState) ); p += log_sprintf( pMac,p, "limPrevMlmState: (%d) %s ", psessionEntry->limPrevMlmState, limMlmStateStr(psessionEntry->limMlmState) ); p += log_sprintf( pMac,p, "limSmeState: (%d) %s ", psessionEntry->limSmeState, limSmeStateStr(psessionEntry->limSmeState) ); p += log_sprintf( pMac,p, "limPrevSmeState: (%d) %s ", psessionEntry->limPrevSmeState, limSmeStateStr(psessionEntry->limPrevSmeState) ); p += log_sprintf( pMac,p, "limSystemRole: (%d) %s \n", psessionEntry->limSystemRole, getRole(psessionEntry->limSystemRole) ); p += log_sprintf( pMac,p, "bssType: (%d) %s \n", psessionEntry->bssType, limBssTypeStr(psessionEntry->bssType)); p += log_sprintf( pMac,p, "operMode: %d \n", psessionEntry->operMode); p += log_sprintf( pMac,p, "dot11mode: %d \n", psessionEntry->dot11mode); p += log_sprintf( pMac,p, "htCapability: %d \n", psessionEntry->htCapability); p += log_sprintf( pMac,p, "limRFBand: %d \n", psessionEntry->limRFBand); p += log_sprintf( pMac,p, "limIbssActive: %d \n", psessionEntry->limIbssActive); p += log_sprintf( pMac,p, "limCurrentAuthType: %d \n", psessionEntry->limCurrentAuthType); p += log_sprintf( pMac,p, "limCurrentBssCaps: %d \n", psessionEntry->limCurrentBssCaps); p += log_sprintf( pMac,p, "limCurrentBssQosCaps: %d \n", psessionEntry->limCurrentBssQosCaps); p += log_sprintf( pMac,p, "limCurrentBssPropCap: %d \n", psessionEntry->limCurrentBssPropCap); p += log_sprintf( pMac,p, "limSentCapsChangeNtf: %d \n", psessionEntry->limSentCapsChangeNtf); p += log_sprintf( pMac,p, "LimAID: %d \n", psessionEntry->limAID); p += log_sprintf( pMac,p, "ReassocbssId: %02X:%02X:%02X:%02X:%02X:%02X \n", psessionEntry->limReAssocbssId[0], psessionEntry->limReAssocbssId[1], psessionEntry->limReAssocbssId[2], psessionEntry->limReAssocbssId[3], psessionEntry->limReAssocbssId[4], psessionEntry->limReAssocbssId[5]); p += log_sprintf( pMac,p, "limReassocChannelId: %d \n", psessionEntry->limReassocChannelId); p += log_sprintf( pMac,p, "limReassocBssCaps: %d \n", psessionEntry->limReassocBssCaps); p += log_sprintf( pMac,p, "limReassocBssQosCaps: %d \n", psessionEntry->limReassocBssQosCaps); p += log_sprintf( pMac,p, "limReassocBssPropCap: %d \n", psessionEntry->limReassocBssPropCap); p += log_sprintf( pMac,p, "********************************************\n"); } } return p; } void limSetEdcaBcastACMFlag(tpAniSirGlobal pMac, tANI_U32 ac, tANI_U32 acmFlag) { tpPESession psessionEntry = &pMac->lim.gpSession[0]; //TBD-RAJESH HOW TO GET sessionEntry????? psessionEntry->gLimEdcaParamsBC[ac].aci.acm = (tANI_U8)acmFlag; psessionEntry->gLimEdcaParamSetCount++; schSetFixedBeaconFields(pMac,psessionEntry); } static char * limDumpEdcaParams(tpAniSirGlobal pMac, char *p) { tANI_U8 i = 0; tpPESession psessionEntry = &pMac->lim.gpSession[0]; //TBD-RAJESH HOW TO GET sessionEntry????? p += log_sprintf( pMac,p, "EDCA parameter set count = %d\n", psessionEntry->gLimEdcaParamSetCount); p += log_sprintf( pMac,p, "Broadcast parameters\n"); p += log_sprintf( pMac,p, "AC\tACI\tACM\tAIFSN\tCWMax\tCWMin\tTxopLimit\t\n"); for(i = 0; i < MAX_NUM_AC; i++) { //right now I am just interested in ACM bit. this can be extended for all other EDCA paramters. p += log_sprintf( pMac,p, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", i, psessionEntry->gLimEdcaParamsBC[i].aci.aci, psessionEntry->gLimEdcaParamsBC[i].aci.acm, psessionEntry->gLimEdcaParamsBC[i].aci.aifsn, psessionEntry->gLimEdcaParamsBC[i].cw.max, psessionEntry->gLimEdcaParamsBC[i].cw.min, psessionEntry->gLimEdcaParamsBC[i].txoplimit); } p += log_sprintf( pMac,p, "\nLocal parameters\n"); p += log_sprintf( pMac,p, "AC\tACI\tACM\tAIFSN\tCWMax\tCWMin\tTxopLimit\t\n"); for(i = 0; i < MAX_NUM_AC; i++) { //right now I am just interested in ACM bit. this can be extended for all other EDCA paramters. p += log_sprintf( pMac,p, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", i, psessionEntry->gLimEdcaParams[i].aci.aci, psessionEntry->gLimEdcaParams[i].aci.acm, psessionEntry->gLimEdcaParams[i].aci.aifsn, psessionEntry->gLimEdcaParams[i].cw.max, psessionEntry->gLimEdcaParams[i].cw.min, psessionEntry->gLimEdcaParams[i].txoplimit); } return p; } static char* limDumpTspecEntry(tpAniSirGlobal pMac, char *p, tANI_U32 tspecEntryNo) { tpLimTspecInfo pTspecList; if(tspecEntryNo >= LIM_NUM_TSPEC_MAX) { p += log_sprintf( pMac,p, "Tspec Entry no. %d is out of allowed range(0 .. %d)\n", tspecEntryNo, (LIM_NUM_TSPEC_MAX - 1)); return p; } pTspecList = &pMac->lim.tspecInfo[tspecEntryNo]; if (pTspecList->inuse) p += log_sprintf( pMac,p, "Entry %d is VALID\n", tspecEntryNo); else { p += log_sprintf( pMac,p, "Entry %d is UNUSED\n", tspecEntryNo); return p; } p += log_sprintf( pMac,p, "\tSta %0x:%0x:%0x:%0x:%0x:%0x, AID %d, Index %d\n", pTspecList->staAddr[0], pTspecList->staAddr[1], pTspecList->staAddr[2], pTspecList->staAddr[3], pTspecList->staAddr[4], pTspecList->staAddr[5], pTspecList->assocId, pTspecList->idx); p += log_sprintf( pMac,p, "\tType %d, Length %d, ackPolicy %d, userPrio %d, accessPolicy = %d, Dir %d, tsid %d\n", pTspecList->tspec.type, pTspecList->tspec.length, pTspecList->tspec.tsinfo.traffic.ackPolicy, pTspecList->tspec.tsinfo.traffic.userPrio, pTspecList->tspec.tsinfo.traffic.accessPolicy, pTspecList->tspec.tsinfo.traffic.direction, pTspecList->tspec.tsinfo.traffic.tsid); p += log_sprintf( pMac,p, "\tPsb %d, Agg %d, TrafficType %d, schedule %d; msduSz: nom %d, max %d\n", pTspecList->tspec.tsinfo.traffic.psb, pTspecList->tspec.tsinfo.traffic.aggregation, pTspecList->tspec.tsinfo.traffic.trafficType, pTspecList->tspec.tsinfo.schedule.schedule, pTspecList->tspec.nomMsduSz, pTspecList->tspec.maxMsduSz); p += log_sprintf( pMac,p, "\tSvcInt: Min %d, Max %d; dataRate: Min %d, mean %d, peak %d\n", pTspecList->tspec.minSvcInterval, pTspecList->tspec.maxSvcInterval, pTspecList->tspec.minDataRate, pTspecList->tspec.meanDataRate, pTspecList->tspec.peakDataRate); p += log_sprintf( pMac,p, "\tmaxBurstSz %d, delayBound %d, minPhyRate %d, surplusBw %d, mediumTime %d\n", pTspecList->tspec.maxBurstSz, pTspecList->tspec.delayBound, pTspecList->tspec.minPhyRate, pTspecList->tspec.surplusBw, pTspecList->tspec.mediumTime); return p; } static char* dumpTspecTableSummary(tpAniSirGlobal pMac, tpLimTspecInfo pTspecList, char *p, int ctspec) { p += log_sprintf( pMac, p, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", ctspec, pTspecList->idx, pTspecList->assocId, pTspecList->tspec.tsinfo.traffic.ackPolicy, pTspecList->tspec.tsinfo.traffic.userPrio, pTspecList->tspec.tsinfo.traffic.psb, pTspecList->tspec.tsinfo.traffic.aggregation, pTspecList->tspec.tsinfo.traffic.accessPolicy, pTspecList->tspec.tsinfo.traffic.direction, pTspecList->tspec.tsinfo.traffic.tsid, pTspecList->tspec.tsinfo.traffic.trafficType); return p; } static char* limDumpDphTableSummary(tpAniSirGlobal pMac,char *p) { tANI_U8 i, j; p += log_sprintf( pMac,p, "DPH Table dump\n"); for(j=0; j < pMac->lim.maxBssId; j++) { /* Find first free room in session table */ if(pMac->lim.gpSession[j].valid) { p += log_sprintf( pMac,p, "aid staId bssid encPol qosMode wme 11e wsm staaddr\n"); for(i = 0; i < pMac->lim.gpSession[j].dph.dphHashTable.size; i++) { if (pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].added) { p += log_sprintf( pMac,p, "%d %d %d %d %d %d %d %d %x:%x:%x:%x:%x:%x\n", pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].assocId, pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].staIndex, pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].bssId, pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].encPolicy, pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].qosMode, pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].wmeEnabled, pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].lleEnabled, pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].wsmEnabled, pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].staAuthenticated, pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].staAddr[0], pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].staAddr[1], pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].staAddr[2], pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].staAddr[3], pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].staAddr[4], pMac->lim.gpSession[j].dph.dphHashTable.pDphNodeArray[i].staAddr[5]); } } } } return p; } // add the specified tspec to the tspec list static char* limDumpTsecTable( tpAniSirGlobal pMac, char* p) { int ctspec; tpLimTspecInfo pTspecList = &pMac->lim.tspecInfo[0]; p += log_sprintf( pMac,p, "=======LIM TSPEC TABLE DUMP\n"); p += log_sprintf( pMac,p, "Num\tIdx\tAID\tAckPol\tUP\tPSB\tAgg\tAccessPol\tDir\tTSID\ttraffic\n"); for (ctspec = 0; ctspec < LIM_NUM_TSPEC_MAX; ctspec++, pTspecList++) { if (pTspecList->inuse) p = dumpTspecTableSummary(pMac, pTspecList, p, ctspec); } return p; } static char * dump_lim_tspec_table( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; p = limDumpTsecTable(pMac, p); return p; } static char * dump_lim_tspec_entry( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; p = limDumpTspecEntry(pMac, p, arg1); return p; } static char * dump_lim_dph_table_summary( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; p = limDumpDphTableSummary(pMac, p); return p; } static char * dump_lim_link_monitor_stats( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tANI_U32 ind, val; (void) arg2; (void) arg3; (void) arg4; p += log_sprintf( pMac,p, "\n ----- LIM Heart Beat Stats ----- \n"); p += log_sprintf( pMac,p, "No. of HeartBeat Failures in LinkEst State = %d\n", pMac->lim.gLimHBfailureCntInLinkEstState); p += log_sprintf( pMac,p, "No. of Probe Failures after HB failed = %d\n", pMac->lim.gLimProbeFailureAfterHBfailedCnt); p += log_sprintf( pMac,p, "No. of HeartBeat Failures in Other States = %d\n", pMac->lim.gLimHBfailureCntInOtherStates); if (wlan_cfgGetInt(pMac, WNI_CFG_HEART_BEAT_THRESHOLD, &val) == eSIR_SUCCESS) p += log_sprintf( pMac,p, "Cfg HeartBeat Threshold = %d\n", val); p += log_sprintf( pMac,p, "# Beacons Rcvd in HB interval # of times\n"); for (ind = 1; ind < MAX_NO_BEACONS_PER_HEART_BEAT_INTERVAL; ind++) { p += log_sprintf( pMac,p, "\t\t\t\t\t\t\t\t%2d\t\t\t\t\t\t\t\t\t\t\t%8d\n", ind, pMac->lim.gLimHeartBeatBeaconStats[ind]); } p += log_sprintf( pMac,p, "\t\t\t\t\t\t\t\t%2d>\t\t\t\t\t\t\t\t\t\t%8d\n", MAX_NO_BEACONS_PER_HEART_BEAT_INTERVAL-1, pMac->lim.gLimHeartBeatBeaconStats[0]); if (arg1 != 0) { for (ind = 0; ind < MAX_NO_BEACONS_PER_HEART_BEAT_INTERVAL; ind++) pMac->lim.gLimHeartBeatBeaconStats[ind] = 0; pMac->lim.gLimHBfailureCntInLinkEstState = 0; pMac->lim.gLimProbeFailureAfterHBfailedCnt = 0; pMac->lim.gLimHBfailureCntInOtherStates = 0; p += log_sprintf( pMac,p, "\nReset HeartBeat Statistics\n"); } return p; } static char * dump_lim_edca_params( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; p = limDumpEdcaParams(pMac, p); return p; } static char * dump_lim_acm_set( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg3; (void) arg4; limSetEdcaBcastACMFlag(pMac, arg1 /*ac(0..3)*/, arg2 /*(acmFlag = 1 to set ACM*/); return p; } static char * dump_lim_bgscan_toggle( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; pMac->lim.gLimForceBackgroundScanDisable = (arg1 == 0) ? 1 : 0; p += log_sprintf( pMac,p, "Bgnd scan is now %s\n", (pMac->lim.gLimForceBackgroundScanDisable) ? "Disabled" : "On"); return p; } static char * dump_lim_linkmonitor_toggle( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; pMac->sys.gSysEnableLinkMonitorMode = (arg1 == 0) ? 0 : 1; p += log_sprintf( pMac,p, "LinkMonitor mode enable = %s\n", (pMac->sys.gSysEnableLinkMonitorMode) ? "On" : "Off"); return p; } static char * dump_lim_proberesp_toggle( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; pMac->lim.gLimProbeRespDisableFlag = (arg1 == 0) ? 0 : 1; p += log_sprintf( pMac,p, "ProbeResponse mode disable = %s\n", (pMac->lim.gLimProbeRespDisableFlag) ? "On" : "Off"); return p; } static char * dump_lim_add_sta( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { #ifdef FIXME_GEN6 tpDphHashNode pStaDs; tpPESession psessionEntry = &pMac->lim.gpSession[0]; //TBD-RAJESH HOW TO GET sessionEntry????? tSirMacAddr staMac = {0}; tANI_U16 peerIdx; if(arg2 > 5) goto addStaFail; peerIdx = limAssignPeerIdx(pMac, psessionEntry); pStaDs = dphGetHashEntry(pMac, peerIdx); if(NULL == pStaDs) { staMac[5] = (tANI_U8) arg1; pStaDs = dphAddHashEntry(pMac, staMac, peerIdx, &psessionEntry->dph.dphHashTable); if(NULL == pStaDs) goto addStaFail; pStaDs->staType = STA_ENTRY_PEER; switch(arg2) { //11b station case 0: { pStaDs->mlmStaContext.htCapability = 0; pStaDs->erpEnabled = 0; p += log_sprintf( pMac,p, "11b"); } break; //11g station case 1: { pStaDs->mlmStaContext.htCapability = 0; pStaDs->erpEnabled = 1; p += log_sprintf( pMac,p, "11g"); } break; //ht20 station non-GF case 2: { pStaDs->mlmStaContext.htCapability = 1; pStaDs->erpEnabled = 1; pStaDs->htSupportedChannelWidthSet = 0; pStaDs->htGreenfield = 0; p += log_sprintf( pMac,p, "HT20 non-GF"); } break; //ht20 station GF case 3: { pStaDs->mlmStaContext.htCapability = 1; pStaDs->erpEnabled = 1; pStaDs->htSupportedChannelWidthSet = 0; pStaDs->htGreenfield = 1; p += log_sprintf( pMac,p, "HT20 GF"); } break; //ht40 station non-GF case 4: { pStaDs->mlmStaContext.htCapability = 1; pStaDs->erpEnabled = 1; pStaDs->htSupportedChannelWidthSet = 1; pStaDs->htGreenfield = 0; p += log_sprintf( pMac,p, "HT40 non-GF"); } break; //ht40 station GF case 5: { pStaDs->mlmStaContext.htCapability = 1; pStaDs->erpEnabled = 1; pStaDs->htSupportedChannelWidthSet = 1; pStaDs->htGreenfield = 1; p += log_sprintf( pMac,p, "HT40 GF"); } break; default: { p += log_sprintf( pMac,p, "arg2 not in range [0..3]. Station not added.\n"); goto addStaFail; } break; } pStaDs->added = 1; p += log_sprintf( pMac,p, " station with mac address 00:00:00:00:00:%x added.\n", (tANI_U8)arg1); limAddSta(pMac, pStaDs,psessionEntry); } else { addStaFail: p += log_sprintf( pMac,p, "Could not add station\n"); p += log_sprintf( pMac,p, "arg1: 6th byte of the station MAC address\n"); p += log_sprintf( pMac,p, "arg2[0..5] : station type as described below\n"); p += log_sprintf( pMac,p, "\t 0: 11b, 1: 11g, 2: HT20 non-GF, 3: HT20 GF, 4: HT40 non-GF, 5: HT40 GF\n"); } #endif return p; } static char * dump_lim_del_sta( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tpDphHashNode pStaDs; tLimMlmDisassocInd mlmDisassocInd; tpPESession psessionEntry; tANI_U8 reasonCode = eSIR_MAC_DISASSOC_DUE_TO_INACTIVITY_REASON; if((psessionEntry = peFindSessionBySessionId(pMac,(tANI_U8)arg2) )== NULL) { p += log_sprintf( pMac,p,"Session does not exist for given session Id \n"); return p; } pStaDs = dphGetHashEntry(pMac, (tANI_U16)arg1, &psessionEntry->dph.dphHashTable); if(NULL == pStaDs) { p += log_sprintf( pMac,p, "Could not find station with assocId = %d\n", arg1); return p; } if (pStaDs->mlmStaContext.mlmState != eLIM_MLM_LINK_ESTABLISHED_STATE) { p += log_sprintf( pMac,p, "received Disassoc frame from peer that is in state %X \n", pStaDs->mlmStaContext.mlmState); return p; } pStaDs->mlmStaContext.cleanupTrigger = eLIM_PEER_ENTITY_DISASSOC; pStaDs->mlmStaContext.disassocReason = (tSirMacReasonCodes) reasonCode; // Issue Disassoc Indication to SME. vos_mem_copy((tANI_U8 *) &mlmDisassocInd.peerMacAddr, (tANI_U8 *) pStaDs->staAddr, sizeof(tSirMacAddr)); mlmDisassocInd.reasonCode = reasonCode; mlmDisassocInd.disassocTrigger = eLIM_PEER_ENTITY_DISASSOC; mlmDisassocInd.sessionId = psessionEntry->peSessionId; limPostSmeMessage(pMac, LIM_MLM_DISASSOC_IND, (tANI_U32 *) &mlmDisassocInd); // Receive path cleanup limCleanupRxPath(pMac, pStaDs,psessionEntry); return p; } static char * set_lim_prot_cfg( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { /********************************** * Protection Enable * *LOWER byte for associated stations *UPPER byte for overlapping stations. *11g ==> protection from 11g *11b ==> protection from 11b *each byte will have the following info *bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 *reserved reserved RIFS Lsig n-GF ht20 11g 11b ********************************** WNI_CFG_PROTECTION_ENABLED I 4 9 V RW NP RESTART LIM 0 0xff 0xff V RW NP RESTART LIM 0 0xffff 0xffff #ENUM FROM_llB 0 #ENUM FROM_llG 1 #ENUM HT_20 2 #ENUM NON_GF 3 #ENUM LSIG_TXOP 4 #ENUM RIFS 5 #ENUM OLBC_FROM_llB 8 #ENUM OLBC_FROM_llG 9 #ENUM OLBC_HT20 10 #ENUM OLBC_NON_GF 11 #ENUM OLBC_LSIG_TXOP 12 #ENUM OLBC_RIFS 13 ******************************************/ if(1 == arg1) dump_cfg_set(pMac, WNI_CFG_PROTECTION_ENABLED, 0xff, arg3, arg4, p); else if(2 == arg1) dump_cfg_set(pMac, WNI_CFG_PROTECTION_ENABLED, arg2 & 0xff, arg3, arg4, p); else { p += log_sprintf( pMac,p, "To set protection config:\n"); p += log_sprintf( pMac,p, "arg1: operation type(1 -> set to Default 0xff, 2-> set to a arg2, else print help)\n"); } return p; } static char * dump_lim_set_protection_control( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { dump_cfg_set(pMac, WNI_CFG_FORCE_POLICY_PROTECTION, arg1, arg2, arg3, p); limSetCfgProtection(pMac, NULL); return p; } static char * dump_lim_send_SM_Power_Mode( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tSirMsgQ msg; tpSirMbMsg pMBMsg; tSirMacHTMIMOPowerSaveState state; p += log_sprintf( pMac,p, "%s: Verifying the Arguments\n", __func__); if ((arg1 > 3) || (arg1 == 2)) { p += log_sprintf( pMac,p, "Invalid Argument , enter one of the valid states\n"); return p; } state = (tSirMacHTMIMOPowerSaveState) arg1; pMBMsg = vos_mem_malloc(WNI_CFG_MB_HDR_LEN + sizeof(tSirMacHTMIMOPowerSaveState)); if(NULL == pMBMsg) { p += log_sprintf( pMac,p, "pMBMsg is NULL\n"); return p; } pMBMsg->type = eWNI_PMC_SMPS_STATE_IND; pMBMsg->msgLen = (tANI_U16)(WNI_CFG_MB_HDR_LEN + sizeof(tSirMacHTMIMOPowerSaveState)); vos_mem_copy(pMBMsg->data, &state, sizeof(tSirMacHTMIMOPowerSaveState)); msg.type = eWNI_PMC_SMPS_STATE_IND; msg.bodyptr = pMBMsg; msg.bodyval = 0; if (limPostMsgApi(pMac, &msg) != TX_SUCCESS) { p += log_sprintf( pMac,p, "Updating the SMPower Request has failed \n"); vos_mem_free(pMBMsg); } else { p += log_sprintf( pMac,p, "Updating the SMPower Request is Done \n"); } return p; } static char * dump_lim_addba_req( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tSirRetStatus status; tpDphHashNode pSta; tpPESession psessionEntry = &pMac->lim.gpSession[0]; //TBD-RAJESH HOW TO GET sessionEntry????? (void) arg4; // Get DPH Sta entry for this ASSOC ID pSta = dphGetHashEntry( pMac, (tANI_U16) arg1, &psessionEntry->dph.dphHashTable); if( NULL == pSta ) { p += log_sprintf( pMac, p, "\n%s: Could not find entry in DPH table for assocId = %d\n", __func__, arg1 ); } else { status = limPostMlmAddBAReq( pMac, pSta, (tANI_U8) arg2, (tANI_U16) arg3,psessionEntry); p += log_sprintf( pMac, p, "\n%s: Attempted to send an ADDBA Req to STA Index %d, for TID %d. Send Status = %s\n", __func__, pSta->staIndex, arg2, limResultCodeStr( status )); } return p; } static char * dump_lim_delba_req( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tSirRetStatus status; tpDphHashNode pSta; tpPESession psessionEntry = &pMac->lim.gpSession[0]; //TBD-RAJESH HOW TO GET sessionEntry????? // Get DPH Sta entry for this ASSOC ID pSta = dphGetHashEntry( pMac, (tANI_U16) arg1, &psessionEntry->dph.dphHashTable ); if( NULL == pSta ) { p += log_sprintf( pMac, p, "\n%s: Could not find entry in DPH table for assocId = %d\n", __func__, arg1 ); } else { status = limPostMlmDelBAReq( pMac, pSta, (tANI_U8) arg2, (tANI_U8) arg3, (tANI_U16) arg4 ,psessionEntry); p += log_sprintf( pMac, p, "\n%s: Attempted to send a DELBA Ind to STA Index %d, " "as the BA \"%s\" for TID %d, with Reason code %d. " "Send Status = %s\n", __func__, pSta->staIndex, (arg2 == 1)? "Initiator": "Recipient", arg3, // TID arg4, // Reason Code limResultCodeStr( status )); } return p; } static char * dump_lim_ba_timeout( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { /* FIXME: NO HAL IN UMAC for PRIMA */ p += log_sprintf( pMac, p, "\n%s: Attempted to trigger a BA Timeout Ind to STA Index %d, for TID %d, Direction %d\n", __func__, arg1, // STA index arg2, // TID arg3 ); // BA Direction return p; } static char * dump_lim_list_active_ba( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tANI_U32 i; tpDphHashNode pSta; //TBD-RAJESH HOW TO GET sessionEntry????? tpPESession psessionEntry = &pMac->lim.gpSession[0]; //TBD-RAJESH (void) arg2; (void) arg3; (void) arg4; // Get DPH Sta entry for this ASSOC ID pSta = dphGetHashEntry( pMac, (tANI_U16) arg1, &psessionEntry->dph.dphHashTable); if( NULL == pSta ) { p += log_sprintf( pMac, p, "\n%s: Could not find entry in DPH table for assocId = %d\n", __func__, arg1 ); } else { p += log_sprintf( pMac, p, "\nList of Active BA sessions for STA Index %d with Assoc ID %d\n", pSta->staIndex, arg1 ); p += log_sprintf( pMac, p, "TID\tRxBA\tTxBA\tRxBufferSize\tTxBufferSize\tRxBATimeout\tTxBATimeout\n"); for( i = 0; i < STACFG_MAX_TC; i ++ ) p += log_sprintf( pMac, p, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", i, // TID pSta->tcCfg[i].fUseBARx, pSta->tcCfg[i].fUseBATx, pSta->tcCfg[i].rxBufSize, pSta->tcCfg[i].txBufSize, pSta->tcCfg[i].tuRxBAWaitTimeout, pSta->tcCfg[i].tuTxBAWaitTimeout ); } return p; } static char * dump_lim_AddBA_DeclineStat( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { int Tid, Enable=(arg1 & 0x1); tANI_U8 val; if (arg1 > 1) { log_sprintf( pMac,p, "%s:Invalid Value is entered for Enable/Disable \n", __func__ ); arg1 &= 1; } val = pMac->lim.gAddBA_Declined; if (arg2 > 7) { log_sprintf( pMac,p, "%s:Invalid Value is entered for Tid \n", __func__ ); Tid = arg2 & 0x7; } else Tid = arg2; if ( Enable) val |= Enable << Tid; else val &= ~(0x1 << Tid); if (cfgSetInt(pMac, (tANI_U16)WNI_CFG_ADDBA_REQ_DECLINE, (tANI_U32) val) != eSIR_SUCCESS) log_sprintf( pMac,p, "%s:Config Set for ADDBA REQ Decline has failed \n", __func__ ); log_sprintf( pMac,p, "%s:Decline value %d is being set for TID %d ,\n \tAddBA_Decline Cfg value is %d \n", __func__ , arg1, Tid, (int) val); return p; } static char * dump_lim_set_dot11_mode( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tpPESession psessionEntry =&pMac->lim.gpSession[0]; //TBD-RAJESH HOW TO GET sessionEntry????? dump_cfg_set(pMac, WNI_CFG_DOT11_MODE, arg1, arg2, arg3, p); if ( (limGetSystemRole(psessionEntry) == eLIM_AP_ROLE) || (limGetSystemRole(psessionEntry) == eLIM_STA_IN_IBSS_ROLE)) schSetFixedBeaconFields(pMac,psessionEntry); p += log_sprintf( pMac,p, "The Dot11 Mode is set to %s", limDot11ModeStr(pMac, (tANI_U8)psessionEntry->dot11mode)); return p; } static char* dump_lim_update_cb_Mode(tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tANI_U32 localPwrConstraint; tpPESession psessionEntry = peFindSessionBySessionId(pMac, arg1); if (psessionEntry == NULL) { p += log_sprintf( pMac, p, "Invalid sessionId: %d \n ", arg1); return p; } if ( !psessionEntry->htCapability ) { p += log_sprintf( pMac,p, "Error: Dot11 mode is non-HT, can not change the CB mode.\n"); return p; } psessionEntry->htSupportedChannelWidthSet = arg2?1:0; psessionEntry->htRecommendedTxWidthSet = psessionEntry->htSupportedChannelWidthSet; psessionEntry->htSecondaryChannelOffset = arg2; if(eSIR_SUCCESS != cfgSetInt(pMac, WNI_CFG_CHANNEL_BONDING_MODE, arg2 ? WNI_CFG_CHANNEL_BONDING_MODE_ENABLE : WNI_CFG_CHANNEL_BONDING_MODE_DISABLE)) p += log_sprintf(pMac,p, "cfgSetInt failed for WNI_CFG_CHANNEL_BONDING_MODE\n"); wlan_cfgGetInt(pMac, WNI_CFG_LOCAL_POWER_CONSTRAINT, &localPwrConstraint); limSendSwitchChnlParams(pMac, psessionEntry->currentOperChannel, psessionEntry->htSecondaryChannelOffset, (tPowerdBm) localPwrConstraint, psessionEntry->peSessionId); if ( (limGetSystemRole(psessionEntry) == eLIM_AP_ROLE) || (limGetSystemRole(psessionEntry) == eLIM_STA_IN_IBSS_ROLE)) schSetFixedBeaconFields(pMac,psessionEntry); return p; } static char* dump_lim_abort_scan(tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; //csrScanAbortMacScan(pMac); return p; } static char* dump_lim_start_stop_bg_scan(tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; if (TX_TIMER_VALID(pMac->lim.limTimers.gLimBackgroundScanTimer)) { limDeactivateAndChangeTimer(pMac, eLIM_BACKGROUND_SCAN_TIMER); } if(arg1 == 1) { if (tx_timer_activate( &pMac->lim.limTimers.gLimBackgroundScanTimer) != TX_SUCCESS) { pMac->lim.gLimBackgroundScanTerminate = TRUE; } else { pMac->lim.gLimBackgroundScanTerminate = FALSE; pMac->lim.gLimBackgroundScanDisable = false; pMac->lim.gLimForceBackgroundScanDisable = false; } } else { pMac->lim.gLimBackgroundScanTerminate = TRUE; pMac->lim.gLimBackgroundScanChannelId = 0; pMac->lim.gLimBackgroundScanDisable = true; pMac->lim.gLimForceBackgroundScanDisable = true; } return p; } static char* dump_lim_get_pe_statistics(tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tpAniGetPEStatsReq pReq; tANI_U32 statsMask; (void) arg2; (void) arg3; (void) arg4; switch(arg1) { case 1: statsMask = PE_SUMMARY_STATS_INFO; break; case 2: statsMask = PE_GLOBAL_CLASS_A_STATS_INFO; break; case 3: statsMask = PE_GLOBAL_CLASS_B_STATS_INFO; break; case 4: statsMask = PE_GLOBAL_CLASS_C_STATS_INFO; break; case 5: statsMask = PE_PER_STA_STATS_INFO; break; default: return p; } pReq = vos_mem_malloc(sizeof(tAniGetPEStatsReq)); if (NULL == pReq) { p += log_sprintf( pMac,p, "Error: Unable to allocate memory.\n"); return p; } vos_mem_set(pReq, sizeof(*pReq), 0); pReq->msgType = eWNI_SME_GET_STATISTICS_REQ; pReq->statsMask = statsMask; pReq->staId = (tANI_U16)arg2; pMac->lim.gLimRspReqd = eANI_BOOLEAN_TRUE; limPostSmeMessage(pMac, eWNI_SME_GET_STATISTICS_REQ, (tANI_U32 *) pReq); return p; } extern char* setLOGLevel( tpAniSirGlobal pMac, char *p, tANI_U32 module, tANI_U32 level ); static char * dump_lim_set_log_level( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { p = setLOGLevel(pMac, p, arg1, arg2); return p; } static char * dump_lim_update_log_level( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { vos_trace_setLevel( arg1, arg2 ); return p; } static char * dump_lim_scan_req_send( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; p = sendSmeScanReq(pMac, p); return p; } static char * dump_lim_send_start_bss_req( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; p = sendSmeStartBssReq(pMac, p,arg1); return p; } static char * dump_lim_send_join_req( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; p = sendSmeJoinReq(pMac, p); return p; } static char * dump_lim_send_disassoc_req( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; p = sendSmeDisAssocReq(pMac, p, arg1 ,arg2); return p; } static char * dump_lim_stop_bss_req( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; p = sendSmeStopBssReq(pMac, p, arg1); return p; } static char * dump_lim_session_print( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; p = printSessionInfo(pMac, p); return p; } static char * dump_lim_sme_reassoc_req( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tANI_U32 sessionId = arg1; tCsrRoamModifyProfileFields modifyProfileFields; tANI_U32 roamId; (void) arg2; (void) arg3; (void) arg4; if( CSR_IS_SESSION_VALID( pMac, sessionId ) ) { if(HAL_STATUS_SUCCESS(sme_AcquireGlobalLock( &pMac->sme ))) { csrGetModifyProfileFields(pMac, sessionId, &modifyProfileFields); csrReassoc( pMac, sessionId, &modifyProfileFields, &roamId, 0); sme_ReleaseGlobalLock( &pMac->sme ); } } else { p += log_sprintf( pMac,p, "Invalid session = %d\n", sessionId); } return p; } static char * dump_lim_dot11h_stats( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { return p; } static char * dump_lim_enable_measurement( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; if (arg1) { pMac->sys.gSysEnableLearnMode = eANI_BOOLEAN_TRUE; p += log_sprintf(pMac, p, "Measurement enabled\n"); } else { pMac->sys.gSysEnableLearnMode = eANI_BOOLEAN_FALSE; p += log_sprintf(pMac, p, "Measurement disabled\n"); } return p; } static char * dump_lim_enable_quietIE( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; #if 0 if (arg1) { pMac->lim.gLimSpecMgmt.fQuietEnabled = eANI_BOOLEAN_TRUE; p += log_sprintf(pMac, p, "QuietIE enabled\n"); } else { pMac->lim.gLimSpecMgmt.fQuietEnabled = eANI_BOOLEAN_FALSE; p += log_sprintf(pMac, p, "QuietIE disabled\n"); } #endif return p; } static char * dump_lim_disable_enable_scan( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; if (arg1) { pMac->lim.fScanDisabled = 1; p += log_sprintf(pMac, p, "Scan disabled\n"); } else { pMac->lim.fScanDisabled = 0; p += log_sprintf(pMac, p, "scan enabled\n"); } return p; } static char *finishScan(tpAniSirGlobal pMac, char *p) { tSirMsgQ msg; p += log_sprintf( pMac,p, "logDump finishScan \n"); msg.type = SIR_LIM_MIN_CHANNEL_TIMEOUT; msg.bodyval = 0; msg.bodyptr = NULL; limPostMsgApi(pMac, &msg); return p; } static char * dump_lim_info( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg2; (void) arg3; (void) arg4; p = dumpLim( pMac, p, arg1); return p; } static char * dump_lim_finishscan_send( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; p = finishScan(pMac, p); return p; } static char * dump_lim_prb_rsp_send( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; p = testLimSendProbeRsp( pMac, p ); return p; } static char * dump_sch_beacon_trigger( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { (void) arg1; (void) arg2; (void) arg3; (void) arg4; p = triggerBeaconGen(pMac, p); return p; } static char* dump_lim_set_scan_in_powersave( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { p += log_sprintf( pMac,p, "logDump set scan in powersave to %d \n", arg1); dump_cfg_set(pMac, WNI_CFG_SCAN_IN_POWERSAVE, arg1, arg2, arg3, p); return p; } #if defined WLAN_FEATURE_VOWIFI static char * dump_lim_send_rrm_action( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tpPESession psessionEntry; tSirMacRadioMeasureReport *pRRMReport = vos_mem_malloc(4*sizeof(tSirMacRadioMeasureReport)); tANI_U8 num = (tANI_U8)(arg4 > 4 ? 4 : arg4); tANI_U8 i; if (!pRRMReport) { p += log_sprintf(pMac, p, "Unable to allocate memory to process command\n"); goto done; } if((psessionEntry = peFindSessionBySessionId(pMac,(tANI_U8)arg2) )== NULL) { p += log_sprintf( pMac,p,"Session does not exist for given session Id \n"); goto done; } switch (arg3) { case 0: /* send two reports with incapable bit set */ pRRMReport[0].type = 6; pRRMReport[1].type = 7; limSendRadioMeasureReportActionFrame( pMac, 1, 2, &pRRMReport[0], psessionEntry->bssId, psessionEntry ); break; case 1: for ( i = 0 ; i < num ; i++ ) { pRRMReport[i].type = 5; if ( i == 3 ) pRRMReport[i].refused = 1; else pRRMReport[i].refused = 0; pRRMReport[i].report.beaconReport.regClass = 32; pRRMReport[i].report.beaconReport.channel = i; pRRMReport[i].report.beaconReport.measDuration = 23; pRRMReport[i].report.beaconReport.phyType = i << 4; //some value. pRRMReport[i].report.beaconReport.bcnProbeRsp = 1; pRRMReport[i].report.beaconReport.rsni = 10; pRRMReport[i].report.beaconReport.rcpi = 40; pRRMReport[i].report.beaconReport.bssid[0] = 0x00; pRRMReport[i].report.beaconReport.bssid[1] = 0xAA; pRRMReport[i].report.beaconReport.bssid[2] = 0xBB; pRRMReport[i].report.beaconReport.bssid[3] = 0xCC; pRRMReport[i].report.beaconReport.bssid[4] = 0x00; pRRMReport[i].report.beaconReport.bssid[5] = 0x01 << i; pRRMReport[i].report.beaconReport.antennaId = 10; pRRMReport[i].report.beaconReport.parentTSF = 0x1234; pRRMReport[i].report.beaconReport.numIes = i * 10; { tANI_U8 j; for( j = 0; j < pRRMReport[i].report.beaconReport.numIes ; j++ ) { pRRMReport[i].report.beaconReport.Ies[j] = j + i; //Junk values. } } } limSendRadioMeasureReportActionFrame( pMac, 1, num, &pRRMReport[0], psessionEntry->bssId, psessionEntry ); break; case 2: //send Neighbor request. { tSirMacNeighborReportReq neighbor; neighbor.dialogToken = 2; neighbor.ssid_present = (tANI_U8) arg4; neighbor.ssid.length = 5; vos_mem_copy(neighbor.ssid.ssId, "abcde", 5); limSendNeighborReportRequestFrame( pMac, &neighbor, psessionEntry->bssId, psessionEntry ); } break; case 3: //send Link measure report. { tSirMacLinkReport link; link.dialogToken = 4; link.txPower = 34; link.rxAntenna = 2; link.txAntenna = 1; link.rcpi = 9; link.rsni = 3; limSendLinkReportActionFrame( pMac, &link, psessionEntry->bssId, psessionEntry ); } break; default: break; } done: vos_mem_free(pRRMReport); return p; } static char * dump_lim_unpack_rrm_action( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tpPESession psessionEntry; tANI_U32 status; tANI_U8 size[] = { 0x2C, 0x2F, 0x25, 0x2C, 0x1C, 0x05 }; tANI_U8 pBody[][100] = { { /*Beacon Request 0*/ 0x05, 0x00, 0x01, 0x00, 0x00, //Measurement request IE 0x26, 0x25, 0x01, 0x00, //Beacon request type 0x05, //Beacon request starts here 0x0C, 0x01, 0x30, 0x00, 0x14, 0x00, 0x01, //BSSID 0xFF, 0xFF, 0xFF, 0xFF, 0xff, 0xFF, //SSID 0x00, 0x05, 0x57, 0x69, 0x46, 0x69, 0x31, //Reporting Condition 0x01, 0x02, 0x00, 0x00, //Reporting Detail 0x02, 0x01, 0x1, //Request IE 0x0A, 0x05, 0x00, 0x30, 0x46, 0x36, 0xDD }, { /*Beacon Request 1*/ 0x05, 0x00, 0x01, 0x00, 0x00, //Measurement request IE 0x26, 0x28, 0x01, 0x00, //Beacon request type 0x05, //Beacon request starts here 0x0C, 0xFF, 0x30, 0x00, 0x14, 0x00, 0x01, //BSSID 0xFF, 0xFF, 0xFF, 0xFF, 0xff, 0xFF, //SSID /* 0x00, 0x08, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, */ //Reporting Condition 0x01, 0x02, 0x00, 0x00, //Reporting Detail 0x02, 0x01, 0x1, //Request IE 0x0A, 0x05, 0x00, 0x30, 0x46, 0x36, 0xDD, //AP channel report 0x33, 0x03, 0x0C, 0x01, 0x06, 0x33, 0x03, 0x0C, 0x24, 0x30, }, { /*Beacon Request 2*/ 0x05, 0x00, 0x01, 0x00, 0x00, //Measurement request IE 0x26, 0x1E, 0x01, 0x00, //Beacon request type 0x05, //Beacon request starts here 0x0C, 0x00, 0x30, 0x00, 0x14, 0x00, 0x02, //BSSID 0xFF, 0xFF, 0xFF, 0xFF, 0xff, 0xFF, //SSID 0x00, 0x05, 0x57, 0x69, 0x46, 0x69, 0x31, //0x00, 0x08, 0x41, 0x53, 0x54, 0x2D, 0x57, 0x41, 0x50, 0x49, //Reporting Condition 0x01, 0x02, 0x00, 0x00, //Reporting Detail 0x02, 0x01, 0x0 //Request IE }, { /*Beacon Request 3*/ 0x05, 0x00, 0x01, 0x00, 0x00, //Measurement request IE 0x26, 0x25, 0x01, 0x00, //Beacon request type 0x05, //Beacon request starts here 0x0C, 0x01, 0x30, 0x00, 0x69, 0x00, 0x00, //BSSID 0xFF, 0xFF, 0xFF, 0xFF, 0xff, 0xFF, //SSID 0x00, 0x05, 0x57, 0x69, 0x46, 0x69, 0x31, //Reporting Condition 0x01, 0x02, 0x00, 0x00, //Reporting Detail 0x02, 0x01, 0x1, //Request IE 0x0A, 0x05, 0x00, 0x30, 0x46, 0x36, 0xDD }, { /*Neighbor report*/ 0x05, 0x05, 0x01, //Measurement request IE 0x34, 0x17, //BSSID 0xFF, 0xFF, 0xFF, 0xFF, 0xff, 0xFF, //BSSID INFOo 0xED, 0x01, 0x00, 0x00, //Reg class, channel, Phy type 0x20, 0x01, 0x02, //TSF Info 0x01, 0x04, 0x02, 0x00, 0x60, 0x00, //Condensed country 0x02, 0x02, 0x62, 0x63 }, { /* Link measurement request */ 0x05, 0x02, 0x00, //Txpower used 0x00, //Max Tx Power 0x00 } }; if((psessionEntry = peFindSessionBySessionId(pMac,(tANI_U8)arg1) )== NULL) { p += log_sprintf( pMac,p,"Session does not exist for given session Id \n"); return p; } switch (arg2) { case 0: case 1: case 2: case 3: { tDot11fRadioMeasurementRequest *frm = vos_mem_malloc(sizeof(tDot11fRadioMeasurementRequest)); if (!frm) { p += log_sprintf(pMac, p, "Unable to allocate memory to process command\n"); break; } if( (status = dot11fUnpackRadioMeasurementRequest( pMac, &pBody[arg2][0], size[arg2], frm )) != 0 ) p += log_sprintf( pMac, p, "failed to unpack.....status = %x\n", status); else rrmProcessRadioMeasurementRequest( pMac, psessionEntry->bssId, frm, psessionEntry ); vos_mem_free(frm); } break; case 4: { tDot11fNeighborReportResponse *frm = vos_mem_malloc(sizeof(tDot11fNeighborReportResponse)); if (!frm) { p += log_sprintf(pMac, p, "Unable to allocate memory to process command\n"); break; } pBody[arg2][2] = (tANI_U8)arg3; //Dialog Token if( (status = dot11fUnpackNeighborReportResponse( pMac, &pBody[arg2][0], size[arg2], frm )) != 0 ) p += log_sprintf( pMac, p, "failed to unpack.....status = %x\n", status); else rrmProcessNeighborReportResponse( pMac, frm, psessionEntry ); vos_mem_free(frm); } break; case 5: { // FIXME. } break; case 6: { tPowerdBm localConstraint = (tPowerdBm) arg3; tPowerdBm maxTxPower = cfgGetRegulatoryMaxTransmitPower( pMac, psessionEntry->currentOperChannel ); maxTxPower = VOS_MIN( maxTxPower, maxTxPower-localConstraint ); if( maxTxPower != psessionEntry->maxTxPower ) { rrmSendSetMaxTxPowerReq( pMac, maxTxPower, psessionEntry ); psessionEntry->maxTxPower = maxTxPower; } } break; default: p += log_sprintf( pMac, p, "Invalid option" ); break; } return p; } #endif #ifdef WLAN_FEATURE_NEIGHBOR_ROAMING #ifdef RSSI_HACK /* This dump command is needed to set the RSSI values in TL while testing handoff. Handoff code was tested * using this dump command. Whatever the value gives as the first parameter will be considered as the average * RSSI by TL and invokes corresponding callback registered by the clients */ extern int dumpCmdRSSI; static char * dump_lim_set_tl_data_pkt_rssi( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { dumpCmdRSSI = arg1; limLog(pMac, LOGE, FL("Setting TL data packet RSSI to %d"), dumpCmdRSSI); return p; } #endif #endif #if defined WLAN_FEATURE_VOWIFI_11R /* This command is used to trigger FT Preauthentication with the AP with BSSID below */ static char * dump_lim_ft_event( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { static tANI_U8 macAddr[6] = {0x00, 0xde, 0xad, 0xaf, 0xaf, 0x04}; tpPESession psessionEntry; tSirMsgQ msg; tpSirFTPreAuthReq pftPreAuthReq; tANI_U16 auth_req_len = 0; tCsrRoamConnectedProfile Profile; csrRoamCopyConnectProfile(pMac, arg2, &Profile); if((psessionEntry = peFindSessionBySessionId(pMac,(tANI_U8)arg2) )== NULL) { p += log_sprintf( pMac, p,"Session does not exist usage: 363 <0> sessionid channel \n"); return p; } switch (arg1) { case 0: // Send Pre-auth event { /*----------------*/ p += log_sprintf( pMac,p, "Preparing Pre Auth Req message\n"); auth_req_len = sizeof(tSirFTPreAuthReq); pftPreAuthReq = vos_mem_malloc(auth_req_len); if (NULL == pftPreAuthReq) { p += log_sprintf( pMac,p,"Pre auth dump: AllocateMemory() failed \n"); return p; } pftPreAuthReq->pbssDescription = vos_mem_malloc(sizeof(Profile.pBssDesc->length)+ Profile.pBssDesc->length); pftPreAuthReq->messageType = eWNI_SME_FT_PRE_AUTH_REQ; pftPreAuthReq->length = auth_req_len + sizeof(Profile.pBssDesc->length) + Profile.pBssDesc->length; pftPreAuthReq->preAuthchannelNum = 6; vos_mem_copy((void *) &pftPreAuthReq->currbssId, (void *)psessionEntry->bssId, 6); vos_mem_copy((void *) &pftPreAuthReq->preAuthbssId, (void *)macAddr, 6); pftPreAuthReq->ft_ies_length = (tANI_U16)pMac->ft.ftSmeContext.auth_ft_ies_length; // Also setup the mac address in sme context. vos_mem_copy(pMac->ft.ftSmeContext.preAuthbssId, macAddr, 6); vos_mem_copy(pftPreAuthReq->ft_ies, pMac->ft.ftSmeContext.auth_ft_ies, pMac->ft.ftSmeContext.auth_ft_ies_length); vos_mem_copy(Profile.pBssDesc->bssId, macAddr, 6); p += log_sprintf( pMac,p, "\n ----- LIM Debug Information ----- \n"); p += log_sprintf( pMac, p, "%s: length = %d\n", __func__, (int)pMac->ft.ftSmeContext.auth_ft_ies_length); p += log_sprintf( pMac, p, "%s: length = %02x\n", __func__, (int)pMac->ft.ftSmeContext.auth_ft_ies[0]); p += log_sprintf( pMac, p, "%s: Auth Req %02x %02x %02x\n", __func__, pftPreAuthReq->ft_ies[0], pftPreAuthReq->ft_ies[1], pftPreAuthReq->ft_ies[2]); p += log_sprintf( pMac, p, "%s: Session %02x %02x %02x\n", __func__, psessionEntry->bssId[0], psessionEntry->bssId[1], psessionEntry->bssId[2]); p += log_sprintf( pMac, p, "%s: Session %02x %02x %02x %p\n", __func__, pftPreAuthReq->currbssId[0], pftPreAuthReq->currbssId[1], pftPreAuthReq->currbssId[2], pftPreAuthReq); Profile.pBssDesc->channelId = (tANI_U8)arg3; vos_mem_copy((void *)pftPreAuthReq->pbssDescription, (void *)Profile.pBssDesc, Profile.pBssDesc->length); msg.type = eWNI_SME_FT_PRE_AUTH_REQ; msg.bodyptr = pftPreAuthReq; msg.bodyval = 0; p += log_sprintf( pMac, p, "limPostMsgApi(eWNI_SME_FT_PRE_AUTH_REQ) \n"); limPostMsgApi(pMac, &msg); } break; default: break; } return p; } #endif static char * dump_lim_channel_switch_announcement( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tpPESession psessionEntry; tANI_U8 nMode = arg2; tANI_U8 nNewChannel = arg3; tANI_U8 nCount = arg4; tANI_U8 peer[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; if((psessionEntry = peFindSessionBySessionId(pMac,(tANI_U8)arg1) )== NULL) { p += log_sprintf( pMac, p,"Session does not exist usage: 363 <0> sessionid channel \n"); VOS_TRACE(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_WARN,"Session Not found!!!!"); return p; } limSendChannelSwitchMgmtFrame( pMac, peer, nMode, nNewChannel, nCount, psessionEntry ); psessionEntry->gLimChannelSwitch.switchCount = nCount; psessionEntry->gLimSpecMgmt.dot11hChanSwState = eLIM_11H_CHANSW_RUNNING; psessionEntry->gLimChannelSwitch.switchMode = nMode; psessionEntry->gLimChannelSwitch.primaryChannel = nNewChannel; schSetFixedBeaconFields(pMac, psessionEntry); limSendBeaconInd(pMac, psessionEntry); return p; } #ifdef WLAN_FEATURE_11AC static char * dump_lim_vht_opmode_notification(tpAniSirGlobal pMac, tANI_U32 arg1,tANI_U32 arg2,tANI_U32 arg3, tANI_U32 arg4, char *p) { tANI_U8 peer[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; tANI_U8 nMode = arg2; tpPESession psessionEntry; if((psessionEntry = peFindSessionBySessionId(pMac,(tANI_U8)arg1) )== NULL) { p += log_sprintf( pMac, p,"Session does not exist usage: 366 <0> sessionid channel \n"); return p; } limSendVHTOpmodeNotificationFrame(pMac, peer, nMode,psessionEntry); psessionEntry->gLimOperatingMode.present = 1; psessionEntry->gLimOperatingMode.chanWidth = nMode; psessionEntry->gLimOperatingMode.rxNSS = 0; psessionEntry->gLimOperatingMode.rxNSSType = 0; schSetFixedBeaconFields(pMac, psessionEntry); limSendBeaconInd(pMac, psessionEntry); return p; } static char * dump_lim_vht_channel_switch_notification(tpAniSirGlobal pMac, tANI_U32 arg1,tANI_U32 arg2,tANI_U32 arg3, tANI_U32 arg4, char *p) { tpPESession psessionEntry; tANI_U8 nChanWidth = arg2; tANI_U8 nNewChannel = arg3; tANI_U8 ncbMode = arg4; tANI_U8 peer[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; if((psessionEntry = peFindSessionBySessionId(pMac,(tANI_U8)arg1) )== NULL) { p += log_sprintf( pMac, p,"Session does not exist usage: 367 <0> sessionid channel \n"); VOS_TRACE(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_WARN,"Session Not found!!!!"); return p; } limSendVHTChannelSwitchMgmtFrame( pMac, peer, nChanWidth, nNewChannel, (ncbMode+1), psessionEntry ); psessionEntry->gLimChannelSwitch.switchCount = 0; psessionEntry->gLimSpecMgmt.dot11hChanSwState = eLIM_11H_CHANSW_RUNNING; psessionEntry->gLimChannelSwitch.switchMode = 1; psessionEntry->gLimChannelSwitch.primaryChannel = nNewChannel; psessionEntry->gLimWiderBWChannelSwitch.newChanWidth = nChanWidth; psessionEntry->gLimWiderBWChannelSwitch.newCenterChanFreq0 = limGetCenterChannel(pMac,nNewChannel,(ncbMode+1),nChanWidth); psessionEntry->gLimWiderBWChannelSwitch.newCenterChanFreq1 = 0; schSetFixedBeaconFields(pMac, psessionEntry); limSendBeaconInd(pMac, psessionEntry); return p; } #endif static char * dump_lim_cancel_channel_switch_announcement( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { tpPESession psessionEntry; if((psessionEntry = peFindSessionBySessionId(pMac,(tANI_U8)arg1) )== NULL) { p += log_sprintf( pMac, p,"Session does not exist usage: 363 <0> sessionid channel \n"); VOS_TRACE(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_WARN,"Session Not found!!!!"); return p; } psessionEntry->gLimChannelSwitch.switchCount = 0; psessionEntry->gLimSpecMgmt.dot11hChanSwState = eLIM_11H_CHANSW_INIT; psessionEntry->gLimChannelSwitch.switchMode = 0; psessionEntry->gLimChannelSwitch.primaryChannel = 0; schSetFixedBeaconFields(pMac, psessionEntry); limSendBeaconInd(pMac, psessionEntry); return p; } static char * dump_lim_mcc_policy_maker(tpAniSirGlobal pMac, tANI_U32 arg1,tANI_U32 arg2,tANI_U32 arg3, tANI_U32 arg4, char *p) { VOS_TRACE(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_FATAL, "dump_lim_mcc_policy_maker arg = %d",arg1); if(arg1 == 0) //Disable feature completely { WDA_TrafficStatsTimerActivate(FALSE); if (ccmCfgSetInt(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, FALSE, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE) { limLog( pMac, LOGE, FL("Could not get WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED")); } } else if(arg1 == 1) //Enable feature { if (ccmCfgSetInt(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, TRUE, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE) { limLog( pMac, LOGE, FL("Could not set WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED")); } } else if(arg1 == 2) //Enable feature and activate periodic timer { if (ccmCfgSetInt(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, TRUE, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE) { limLog( pMac, LOGE, FL("Could not set WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED")); } WDA_TrafficStatsTimerActivate(TRUE); } else if(arg1 == 3) //Enable only stats collection - Used for unit testing { VOS_TRACE(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_FATAL, "Enabling Traffic Stats in DTS"); WDI_DS_ActivateTrafficStats(); } else if(arg1 == 4) //Send current stats snapshot to Riva -- Used for unit testing { v_VOID_t * pVosContext = vos_get_global_context(VOS_MODULE_ID_WDA, NULL); tWDA_CbContext *pWDA = vos_get_context(VOS_MODULE_ID_WDA, pVosContext); ccmCfgSetInt(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, TRUE, NULL, eANI_BOOLEAN_FALSE); if(pWDA != NULL) { WDA_TimerTrafficStatsInd(pWDA); } WDA_TrafficStatsTimerActivate(FALSE); ccmCfgSetInt(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, FALSE,NULL, eANI_BOOLEAN_FALSE); } else if (arg1 == 5) //Change the periodicity of TX stats timer { v_VOID_t * pVosContext = vos_get_global_context(VOS_MODULE_ID_WDA, NULL); tWDA_CbContext *pWDA = vos_get_context(VOS_MODULE_ID_WDA, pVosContext); if (pWDA != NULL && tx_timer_change(&pWDA->wdaTimers.trafficStatsTimer, arg2/10, arg2/10) != TX_SUCCESS) { limLog(pMac, LOGP, FL("Disable timer before changing timeout value")); } } return p; } #ifdef WLANTL_DEBUG /* API to print number of pkts received based on rate index */ /* arg1 = station Id */ /* arg2 = BOOLEAN value to either or not flush the counters */ static char * dump_lim_get_pkts_rcvd_per_rate_idx( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { /* if anything other than 1, then we need not flush the counters */ if( arg2 != 1) arg2 = FALSE; WLANTLPrintPktsRcvdPerRateIdx(pMac->roam.gVosContext, (tANI_U8)arg1, (v_BOOL_t)arg2); return p; } /* API to print number of pkts received based on rssi */ /* arg1 = station Id */ /* arg2 = BOOLEAN value to either or not flush the counters */ static char * dump_lim_get_pkts_rcvd_per_rssi_values( tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { /* if anything other than 1, then we need not flush the counters */ if( arg2 != 1) arg2 = FALSE; WLANTLPrintPktsRcvdPerRssi(pMac->roam.gVosContext, (tANI_U8)arg1, (v_BOOL_t)arg2); return p; } #endif static char * dump_set_max_probe_req(tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { if ((arg1 <= 0) || (arg1 > 4)){ limLog(pMac, LOGE, FL("invalid number. valid range 1 - 4 \n")); return p; } pMac->lim.maxProbe = arg1; return p; } /* API to fill Rate Info based on mac efficiency * arg 1: mac efficiency to be used to calculate mac thorughput for a given rate index * arg 2: starting rateIndex to apply the macEfficiency to * arg 3: ending rateIndex to apply the macEfficiency to */ static char * dump_limRateInfoBasedOnMacEff(tpAniSirGlobal pMac, tANI_U32 arg1, tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p) { limLog(pMac, LOGE, FL("arg1 %u, arg2 %u, arg3 %u"), arg1, arg2, arg3); WDTS_FillRateInfo((tANI_U8)(arg1), (tANI_U16)(arg2), (tANI_U16)(arg3)); return p; } static tDumpFuncEntry limMenuDumpTable[] = { {0, "PE (300-499)", NULL}, {300, "LIM: Dump state(s)/statistics <session id>", dump_lim_info}, {301, "PE.LIM: dump TSPEC Table", dump_lim_tspec_table}, {302, "PE.LIM: dump specified TSPEC entry (id)", dump_lim_tspec_entry}, {303, "PE.LIM: dump EDCA params", dump_lim_edca_params}, {304, "PE.LIM: dump DPH table summary", dump_lim_dph_table_summary}, {305, "PE.LIM: dump link monitor stats", dump_lim_link_monitor_stats}, {306, "PE.LIM:dump Set the BAR Decline stat(arg1= 1/0 (enable/disable) arg2 =TID", dump_lim_AddBA_DeclineStat}, {307, "PE: LIM: dump CSR Send ReAssocReq", dump_lim_sme_reassoc_req}, {308, "PE:LIM: dump all 11H related data", dump_lim_dot11h_stats}, {309, "PE:LIM: dump to enable Measurement on AP", dump_lim_enable_measurement}, {310, "PE:LIM: dump to enable QuietIE on AP", dump_lim_enable_quietIE}, {311, "PE:LIM: disable/enable scan 1(disable)", dump_lim_disable_enable_scan}, {320, "PE.LIM: send sme scan request", dump_lim_scan_req_send}, /*FIXME_GEN6*/ /* This dump command is more of generic dump cmd and hence it should * be moved to logDump.c */ {321, "PE:LIM: Set Log Level <VOS Module> <VOS Log Level>", dump_lim_update_log_level}, {331, "PE.LIM: Send finish scan to LIM", dump_lim_finishscan_send}, {332, "PE.LIM: force probe rsp send from LIM", dump_lim_prb_rsp_send}, {333, "PE.SCH: Trigger to generate a beacon", dump_sch_beacon_trigger}, {335, "PE.LIM: set ACM flag (0..3)", dump_lim_acm_set}, {336, "PE.LIM: Send an ADDBA Req to peer MAC arg1=aid,arg2=tid, arg3=ssn", dump_lim_addba_req}, {337, "PE.LIM: Send a DELBA Ind to peer MAC arg1=aid,arg2=recipient(0)/initiator(1),arg3=tid,arg4=reasonCode", dump_lim_delba_req}, {338, "PE.LIM: Trigger a BA timeout for STA index", dump_lim_ba_timeout}, {339, "PE.LIM: List active BA session(s) for AssocID", dump_lim_list_active_ba}, {340, "PE.LIM: Set background scan flag (0-disable, 1-enable)",dump_lim_bgscan_toggle}, {341, "PE.LIM: Set link monitoring mode", dump_lim_linkmonitor_toggle}, {342, "PE.LIM: AddSta <6th byte of station Mac>", dump_lim_add_sta}, {343, "PE.LIM: DelSta <aid>", dump_lim_del_sta}, {344, "PE.LIM: Set probe respond flag", dump_lim_proberesp_toggle}, {345, "PE.LIM: set protection config bitmap", set_lim_prot_cfg}, {346, "PE:LIM: Set the Dot11 Mode", dump_lim_set_dot11_mode}, {347, "PE:Enable or Disable Protection", dump_lim_set_protection_control}, {348, "PE:LIM: Send SM Power Mode Action frame", dump_lim_send_SM_Power_Mode}, {349, "PE: LIM: Change CB Mode <session id> <sec chnl offset>",dump_lim_update_cb_Mode}, {350, "PE: LIM: abort scan", dump_lim_abort_scan}, {351, "PE: LIM: Start stop BG scan", dump_lim_start_stop_bg_scan}, {352, "PE: LIM: PE statistics <scanmask>", dump_lim_get_pe_statistics}, {353, "PE: LIM: Set MAC log level <Mac Module ID> <Log Level>", dump_lim_set_log_level}, {354, "PE: LIM: Set Scan in Power Save <0-disable, 1-enable>", dump_lim_set_scan_in_powersave}, {355, "PE.LIM: send sme start BSS request", dump_lim_send_start_bss_req}, {356, "PE.LIM: dump pesession info ", dump_lim_session_print}, {357, "PE.LIM: send DisAssocRequest", dump_lim_send_disassoc_req}, {358, "PE.LIM: send sme stop bss request <session ID>", dump_lim_stop_bss_req}, {359, "PE.LIM: send sme join request", dump_lim_send_join_req}, #if defined WLAN_FEATURE_VOWIFI {360, "PE.LIM: send an RRM action frame", dump_lim_send_rrm_action}, {361, "PE.LIM: unpack an RRM action frame", dump_lim_unpack_rrm_action}, #endif #ifdef WLAN_FEATURE_NEIGHBOR_ROAMING #ifdef RSSI_HACK {362, "TL Set current RSSI", dump_lim_set_tl_data_pkt_rssi}, #endif #endif #ifdef WLAN_FEATURE_VOWIFI_11R {363, "PE.LIM: trigger pre auth/reassoc event", dump_lim_ft_event}, #endif {364, "PE.LIM: Send a channel switch announcement", dump_lim_channel_switch_announcement}, {365, "PE.LIM: Cancel channel switch announcement", dump_lim_cancel_channel_switch_announcement}, #ifdef WLAN_FEATURE_11AC {366, "PE.LIM: Send a VHT OPMode Action Frame", dump_lim_vht_opmode_notification}, {367, "PE.LIM: Send a VHT Channel Switch Announcement", dump_lim_vht_channel_switch_notification}, {368, "PE.LIM: MCC Policy Maker", dump_lim_mcc_policy_maker}, #endif #ifdef WLANTL_DEBUG {369, "PE.LIM: pkts/rateIdx: iwpriv wlan0 dump 368 <staId> <boolean to flush counter>", dump_lim_get_pkts_rcvd_per_rate_idx}, {370, "PE.LIM: pkts/rssi: : iwpriv wlan0 dump 369 <staId> <boolean to flush counter>", dump_lim_get_pkts_rcvd_per_rssi_values}, #endif {374, "PE.LIM: MAS RX stats MAC eff <MAC eff in percentage>", dump_limRateInfoBasedOnMacEff}, {376, "PE.LIM: max number of probe per scan", dump_set_max_probe_req }, }; void limDumpInit(tpAniSirGlobal pMac) { logDumpRegisterTable( pMac, &limMenuDumpTable[0], sizeof(limMenuDumpTable)/sizeof(limMenuDumpTable[0]) ); } #endif //#if defined(ANI_LOGDUMP)
gpl-2.0
mythos234/SimplKernel-LL-G925F
drivers/mtd/maps/plat-ram.c
2158
6443
/* drivers/mtd/maps/plat-ram.c * * (c) 2004-2005 Simtec Electronics * http://www.simtec.co.uk/products/SWLINUX/ * Ben Dooks <ben@simtec.co.uk> * * Generic platform device based RAM map * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/plat-ram.h> #include <asm/io.h> /* private structure for each mtd platform ram device created */ struct platram_info { struct device *dev; struct mtd_info *mtd; struct map_info map; struct resource *area; struct platdata_mtd_ram *pdata; }; /* to_platram_info() * * device private data to struct platram_info conversion */ static inline struct platram_info *to_platram_info(struct platform_device *dev) { return (struct platram_info *)platform_get_drvdata(dev); } /* platram_setrw * * call the platform device's set rw/ro control * * to = 0 => read-only * = 1 => read-write */ static inline void platram_setrw(struct platram_info *info, int to) { if (info->pdata == NULL) return; if (info->pdata->set_rw != NULL) (info->pdata->set_rw)(info->dev, to); } /* platram_remove * * called to remove the device from the driver's control */ static int platram_remove(struct platform_device *pdev) { struct platram_info *info = to_platram_info(pdev); platform_set_drvdata(pdev, NULL); dev_dbg(&pdev->dev, "removing device\n"); if (info == NULL) return 0; if (info->mtd) { mtd_device_unregister(info->mtd); map_destroy(info->mtd); } /* ensure ram is left read-only */ platram_setrw(info, PLATRAM_RO); /* release resources */ if (info->area) { release_resource(info->area); kfree(info->area); } if (info->map.virt != NULL) iounmap(info->map.virt); kfree(info); return 0; } /* platram_probe * * called from device drive system when a device matching our * driver is found. */ static int platram_probe(struct platform_device *pdev) { struct platdata_mtd_ram *pdata; struct platram_info *info; struct resource *res; int err = 0; dev_dbg(&pdev->dev, "probe entered\n"); if (pdev->dev.platform_data == NULL) { dev_err(&pdev->dev, "no platform data supplied\n"); err = -ENOENT; goto exit_error; } pdata = pdev->dev.platform_data; info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) { dev_err(&pdev->dev, "no memory for flash info\n"); err = -ENOMEM; goto exit_error; } platform_set_drvdata(pdev, info); info->dev = &pdev->dev; info->pdata = pdata; /* get the resource for the memory mapping */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no memory resource specified\n"); err = -ENOENT; goto exit_free; } dev_dbg(&pdev->dev, "got platform resource %p (0x%llx)\n", res, (unsigned long long)res->start); /* setup map parameters */ info->map.phys = res->start; info->map.size = resource_size(res); info->map.name = pdata->mapname != NULL ? (char *)pdata->mapname : (char *)pdev->name; info->map.bankwidth = pdata->bankwidth; /* register our usage of the memory area */ info->area = request_mem_region(res->start, info->map.size, pdev->name); if (info->area == NULL) { dev_err(&pdev->dev, "failed to request memory region\n"); err = -EIO; goto exit_free; } /* remap the memory area */ info->map.virt = ioremap(res->start, info->map.size); dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size); if (info->map.virt == NULL) { dev_err(&pdev->dev, "failed to ioremap() region\n"); err = -EIO; goto exit_free; } simple_map_init(&info->map); dev_dbg(&pdev->dev, "initialised map, probing for mtd\n"); /* probe for the right mtd map driver * supplied by the platform_data struct */ if (pdata->map_probes) { const char * const *map_probes = pdata->map_probes; for ( ; !info->mtd && *map_probes; map_probes++) info->mtd = do_map_probe(*map_probes , &info->map); } /* fallback to map_ram */ else info->mtd = do_map_probe("map_ram", &info->map); if (info->mtd == NULL) { dev_err(&pdev->dev, "failed to probe for map_ram\n"); err = -ENOMEM; goto exit_free; } info->mtd->owner = THIS_MODULE; info->mtd->dev.parent = &pdev->dev; platram_setrw(info, PLATRAM_RW); /* check to see if there are any available partitions, or whether * to add this device whole */ err = mtd_device_parse_register(info->mtd, pdata->probes, NULL, pdata->partitions, pdata->nr_partitions); if (!err) dev_info(&pdev->dev, "registered mtd device\n"); if (pdata->nr_partitions) { /* add the whole device. */ err = mtd_device_register(info->mtd, NULL, 0); if (err) { dev_err(&pdev->dev, "failed to register the entire device\n"); } } return err; exit_free: platram_remove(pdev); exit_error: return err; } /* device driver info */ /* work with hotplug and coldplug */ MODULE_ALIAS("platform:mtd-ram"); static struct platform_driver platram_driver = { .probe = platram_probe, .remove = platram_remove, .driver = { .name = "mtd-ram", .owner = THIS_MODULE, }, }; /* module init/exit */ static int __init platram_init(void) { printk("Generic platform RAM MTD, (c) 2004 Simtec Electronics\n"); return platform_driver_register(&platram_driver); } static void __exit platram_exit(void) { platform_driver_unregister(&platram_driver); } module_init(platram_init); module_exit(platram_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("MTD platform RAM map driver");
gpl-2.0
oppo-source/R7f-4.4-kernel-source
drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
2158
13965
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/client.h> #include <core/enum.h> #include <core/engctx.h> #include <core/object.h> #include <subdev/fb.h> #include <subdev/bios.h> struct nv50_fb_priv { struct nouveau_fb base; struct page *r100c08_page; dma_addr_t r100c08; }; static int types[0x80] = { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0 }; static bool nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype) { return types[(memtype & 0xff00) >> 8] != 0; } static u32 nv50_fb_vram_rblock(struct nouveau_fb *pfb) { int i, parts, colbits, rowbitsa, rowbitsb, banks; u64 rowsize, predicted; u32 r0, r4, rt, ru, rblock_size; r0 = nv_rd32(pfb, 0x100200); r4 = nv_rd32(pfb, 0x100204); rt = nv_rd32(pfb, 0x100250); ru = nv_rd32(pfb, 0x001540); nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); for (i = 0, parts = 0; i < 8; i++) { if (ru & (0x00010000 << i)) parts++; } colbits = (r4 & 0x0000f000) >> 12; rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; banks = 1 << (((r4 & 0x03000000) >> 24) + 2); rowsize = parts * banks * (1 << colbits) * 8; predicted = rowsize << rowbitsa; if (r0 & 0x00000004) predicted += rowsize << rowbitsb; if (predicted != pfb->ram.size) { nv_warn(pfb, "memory controller reports %d MiB VRAM\n", (u32)(pfb->ram.size >> 20)); } rblock_size = rowsize; if (rt & 1) rblock_size *= 3; nv_debug(pfb, "rblock %d bytes\n", rblock_size); return rblock_size; } static int nv50_fb_vram_init(struct nouveau_fb *pfb) { struct nouveau_device *device = nv_device(pfb); struct nouveau_bios *bios = nouveau_bios(device); const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ u32 size, tags = 0; int ret; pfb->ram.size = nv_rd32(pfb, 0x10020c); pfb->ram.size = (pfb->ram.size & 0xffffff00) | ((pfb->ram.size & 0x000000ff) << 32); size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail; switch (device->chipset) { case 0xaa: case 0xac: case 0xaf: /* IGPs, no reordering, no real VRAM */ ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1); if (ret) return ret; pfb->ram.type = NV_MEM_TYPE_STOLEN; pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; break; default: switch (nv_rd32(pfb, 0x100714) & 0x00000007) { case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break; case 1: if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3) pfb->ram.type = NV_MEM_TYPE_DDR3; else pfb->ram.type = NV_MEM_TYPE_DDR2; break; case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break; case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break; default: break; } ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, nv50_fb_vram_rblock(pfb) >> 12); if (ret) return ret; pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; tags = nv_rd32(pfb, 0x100320); break; } return tags; } static int nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) { struct nv50_fb_priv *priv = (void *)pfb; struct nouveau_mm *heap = &priv->base.vram; struct nouveau_mm *tags = &priv->base.tags; struct nouveau_mm_node *r; struct nouveau_mem *mem; int comp = (memtype & 0x300) >> 8; int type = (memtype & 0x07f); int back = (memtype & 0x800); int min, max, ret; max = (size >> 12); min = ncmin ? (ncmin >> 12) : max; align >>= 12; mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; mutex_lock(&pfb->base.mutex); if (comp) { if (align == 16) { int n = (max >> 4) * comp; ret = nouveau_mm_head(tags, 1, n, n, 1, &mem->tag); if (ret) mem->tag = NULL; } if (unlikely(!mem->tag)) comp = 0; } INIT_LIST_HEAD(&mem->regions); mem->memtype = (comp << 7) | type; mem->size = max; type = types[type]; do { if (back) ret = nouveau_mm_tail(heap, type, max, min, align, &r); else ret = nouveau_mm_head(heap, type, max, min, align, &r); if (ret) { mutex_unlock(&pfb->base.mutex); pfb->ram.put(pfb, &mem); return ret; } list_add_tail(&r->rl_entry, &mem->regions); max -= r->length; } while (max); mutex_unlock(&pfb->base.mutex); r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); mem->offset = (u64)r->offset << 12; *pmem = mem; return 0; } void nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem) { struct nv50_fb_priv *priv = (void *)pfb; struct nouveau_mm_node *this; struct nouveau_mem *mem; mem = *pmem; *pmem = NULL; if (unlikely(mem == NULL)) return; mutex_lock(&pfb->base.mutex); while (!list_empty(&mem->regions)) { this = list_first_entry(&mem->regions, typeof(*this), rl_entry); list_del(&this->rl_entry); nouveau_mm_free(&priv->base.vram, &this); } nouveau_mm_free(&priv->base.tags, &mem->tag); mutex_unlock(&pfb->base.mutex); kfree(mem); } static const struct nouveau_enum vm_dispatch_subclients[] = { { 0x00000000, "GRCTX", NULL }, { 0x00000001, "NOTIFY", NULL }, { 0x00000002, "QUERY", NULL }, { 0x00000003, "COND", NULL }, { 0x00000004, "M2M_IN", NULL }, { 0x00000005, "M2M_OUT", NULL }, { 0x00000006, "M2M_NOTIFY", NULL }, {} }; static const struct nouveau_enum vm_ccache_subclients[] = { { 0x00000000, "CB", NULL }, { 0x00000001, "TIC", NULL }, { 0x00000002, "TSC", NULL }, {} }; static const struct nouveau_enum vm_prop_subclients[] = { { 0x00000000, "RT0", NULL }, { 0x00000001, "RT1", NULL }, { 0x00000002, "RT2", NULL }, { 0x00000003, "RT3", NULL }, { 0x00000004, "RT4", NULL }, { 0x00000005, "RT5", NULL }, { 0x00000006, "RT6", NULL }, { 0x00000007, "RT7", NULL }, { 0x00000008, "ZETA", NULL }, { 0x00000009, "LOCAL", NULL }, { 0x0000000a, "GLOBAL", NULL }, { 0x0000000b, "STACK", NULL }, { 0x0000000c, "DST2D", NULL }, {} }; static const struct nouveau_enum vm_pfifo_subclients[] = { { 0x00000000, "PUSHBUF", NULL }, { 0x00000001, "SEMAPHORE", NULL }, {} }; static const struct nouveau_enum vm_bar_subclients[] = { { 0x00000000, "FB", NULL }, { 0x00000001, "IN", NULL }, {} }; static const struct nouveau_enum vm_client[] = { { 0x00000000, "STRMOUT", NULL }, { 0x00000003, "DISPATCH", vm_dispatch_subclients }, { 0x00000004, "PFIFO_WRITE", NULL }, { 0x00000005, "CCACHE", vm_ccache_subclients }, { 0x00000006, "PPPP", NULL }, { 0x00000007, "CLIPID", NULL }, { 0x00000008, "PFIFO_READ", NULL }, { 0x00000009, "VFETCH", NULL }, { 0x0000000a, "TEXTURE", NULL }, { 0x0000000b, "PROP", vm_prop_subclients }, { 0x0000000c, "PVP", NULL }, { 0x0000000d, "PBSP", NULL }, { 0x0000000e, "PCRYPT", NULL }, { 0x0000000f, "PCOUNTER", NULL }, { 0x00000011, "PDAEMON", NULL }, {} }; static const struct nouveau_enum vm_engine[] = { { 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR }, { 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP }, { 0x00000004, "PEEPHOLE", NULL }, { 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO }, { 0x00000006, "BAR", vm_bar_subclients }, { 0x00000008, "PPPP", NULL, NVDEV_ENGINE_PPP }, { 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG }, { 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP }, { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CRYPT }, { 0x0000000b, "PCOUNTER", NULL }, { 0x0000000c, "SEMAPHORE_BG", NULL }, { 0x0000000d, "PCOPY", NULL, NVDEV_ENGINE_COPY0 }, { 0x0000000e, "PDAEMON", NULL }, {} }; static const struct nouveau_enum vm_fault[] = { { 0x00000000, "PT_NOT_PRESENT", NULL }, { 0x00000001, "PT_TOO_SHORT", NULL }, { 0x00000002, "PAGE_NOT_PRESENT", NULL }, { 0x00000003, "PAGE_SYSTEM_ONLY", NULL }, { 0x00000004, "PAGE_READ_ONLY", NULL }, { 0x00000006, "NULL_DMAOBJ", NULL }, { 0x00000007, "WRONG_MEMTYPE", NULL }, { 0x0000000b, "VRAM_LIMIT", NULL }, { 0x0000000f, "DMAOBJ_LIMIT", NULL }, {} }; static void nv50_fb_intr(struct nouveau_subdev *subdev) { struct nouveau_device *device = nv_device(subdev); struct nouveau_engine *engine; struct nv50_fb_priv *priv = (void *)subdev; const struct nouveau_enum *en, *cl; struct nouveau_object *engctx = NULL; u32 trap[6], idx, chan; u8 st0, st1, st2, st3; int i; idx = nv_rd32(priv, 0x100c90); if (!(idx & 0x80000000)) return; idx &= 0x00ffffff; for (i = 0; i < 6; i++) { nv_wr32(priv, 0x100c90, idx | i << 24); trap[i] = nv_rd32(priv, 0x100c94); } nv_wr32(priv, 0x100c90, idx | 0x80000000); /* decode status bits into something more useful */ if (device->chipset < 0xa3 || device->chipset == 0xaa || device->chipset == 0xac) { st0 = (trap[0] & 0x0000000f) >> 0; st1 = (trap[0] & 0x000000f0) >> 4; st2 = (trap[0] & 0x00000f00) >> 8; st3 = (trap[0] & 0x0000f000) >> 12; } else { st0 = (trap[0] & 0x000000ff) >> 0; st1 = (trap[0] & 0x0000ff00) >> 8; st2 = (trap[0] & 0x00ff0000) >> 16; st3 = (trap[0] & 0xff000000) >> 24; } chan = (trap[2] << 16) | trap[1]; en = nouveau_enum_find(vm_engine, st0); if (en && en->data2) { const struct nouveau_enum *orig_en = en; while (en->name && en->value == st0 && en->data2) { engine = nouveau_engine(subdev, en->data2); if (engine) { engctx = nouveau_engctx_get(engine, chan); if (engctx) break; } en++; } if (!engctx) en = orig_en; } nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ", (trap[5] & 0x00000100) ? "read" : "write", trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan, nouveau_client_name(engctx)); nouveau_engctx_put(engctx); if (en) pr_cont("%s/", en->name); else pr_cont("%02x/", st0); cl = nouveau_enum_find(vm_client, st2); if (cl) pr_cont("%s/", cl->name); else pr_cont("%02x/", st2); if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3); else if (en && en->data) cl = nouveau_enum_find(en->data, st3); else cl = NULL; if (cl) pr_cont("%s", cl->name); else pr_cont("%02x", st3); pr_cont(" reason: "); en = nouveau_enum_find(vm_fault, st1); if (en) pr_cont("%s\n", en->name); else pr_cont("0x%08x\n", st1); } static int nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_device *device = nv_device(parent); struct nv50_fb_priv *priv; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (priv->r100c08_page) { priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(device->pdev, priv->r100c08)) nv_warn(priv, "failed 0x100c08 page map\n"); } else { nv_warn(priv, "failed 0x100c08 page alloc\n"); } priv->base.memtype_valid = nv50_fb_memtype_valid; priv->base.ram.init = nv50_fb_vram_init; priv->base.ram.get = nv50_fb_vram_new; priv->base.ram.put = nv50_fb_vram_del; nv_subdev(priv)->intr = nv50_fb_intr; return nouveau_fb_preinit(&priv->base); } static void nv50_fb_dtor(struct nouveau_object *object) { struct nouveau_device *device = nv_device(object); struct nv50_fb_priv *priv = (void *)object; if (priv->r100c08_page) { pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); __free_page(priv->r100c08_page); } nouveau_fb_destroy(&priv->base); } static int nv50_fb_init(struct nouveau_object *object) { struct nouveau_device *device = nv_device(object); struct nv50_fb_priv *priv = (void *)object; int ret; ret = nouveau_fb_init(&priv->base); if (ret) return ret; /* Not a clue what this is exactly. Without pointing it at a * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) * cause IOMMU "read from address 0" errors (rh#561267) */ nv_wr32(priv, 0x100c08, priv->r100c08 >> 8); /* This is needed to get meaningful information from 100c90 * on traps. No idea what these values mean exactly. */ switch (device->chipset) { case 0x50: nv_wr32(priv, 0x100c90, 0x000707ff); break; case 0xa3: case 0xa5: case 0xa8: nv_wr32(priv, 0x100c90, 0x000d0fff); break; case 0xaf: nv_wr32(priv, 0x100c90, 0x089d1fff); break; default: nv_wr32(priv, 0x100c90, 0x001d07ff); break; } return 0; } struct nouveau_oclass nv50_fb_oclass = { .handle = NV_SUBDEV(FB, 0x50), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_fb_ctor, .dtor = nv50_fb_dtor, .init = nv50_fb_init, .fini = _nouveau_fb_fini, }, };
gpl-2.0
jakwu/linux-imx
fs/ext4/bitmap.c
2670
2909
/* * linux/fs/ext4/bitmap.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) */ #include <linux/buffer_head.h> #include <linux/jbd2.h> #include "ext4.h" unsigned int ext4_count_free(char *bitmap, unsigned int numchars) { return numchars * BITS_PER_BYTE - memweight(bitmap, numchars); } int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *gdp, struct buffer_head *bh, int sz) { __u32 hi; __u32 provided, calculated; struct ext4_sb_info *sbi = EXT4_SB(sb); if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return 1; provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo); calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) { hi = le16_to_cpu(gdp->bg_inode_bitmap_csum_hi); provided |= (hi << 16); } else calculated &= 0xFFFF; return provided == calculated; } void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *gdp, struct buffer_head *bh, int sz) { __u32 csum; struct ext4_sb_info *sbi = EXT4_SB(sb); if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF); if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) gdp->bg_inode_bitmap_csum_hi = cpu_to_le16(csum >> 16); } int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *gdp, struct buffer_head *bh) { __u32 hi; __u32 provided, calculated; struct ext4_sb_info *sbi = EXT4_SB(sb); int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8; if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return 1; provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo); calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) { hi = le16_to_cpu(gdp->bg_block_bitmap_csum_hi); provided |= (hi << 16); } else calculated &= 0xFFFF; if (provided == calculated) return 1; return 0; } void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *gdp, struct buffer_head *bh) { int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8; __u32 csum; struct ext4_sb_info *sbi = EXT4_SB(sb); if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); gdp->bg_block_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF); if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) gdp->bg_block_bitmap_csum_hi = cpu_to_le16(csum >> 16); }
gpl-2.0
SlimRoms/kernel_samsung_espresso10
drivers/staging/tidspbridge/pmgr/cod.c
3182
14323
/* * cod.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * This module implements DSP code management for the DSP/BIOS Bridge * environment. It is mostly a thin wrapper. * * This module provides an interface for loading both static and * dynamic code objects onto DSP systems. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> /* ----------------------------------- Host OS */ #include <dspbridge/host_os.h> #include <linux/fs.h> #include <linux/uaccess.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- Trace & Debug */ #include <dspbridge/dbc.h> /* ----------------------------------- Platform Manager */ /* Include appropriate loader header file */ #include <dspbridge/dbll.h> /* ----------------------------------- This */ #include <dspbridge/cod.h> /* * ======== cod_manager ======== */ struct cod_manager { struct dbll_tar_obj *target; struct dbll_library_obj *base_lib; bool loaded; /* Base library loaded? */ u32 entry; struct dbll_fxns fxns; struct dbll_attrs attrs; char sz_zl_file[COD_MAXPATHLENGTH]; }; /* * ======== cod_libraryobj ======== */ struct cod_libraryobj { struct dbll_library_obj *dbll_lib; struct cod_manager *cod_mgr; }; static u32 refs = 0L; static struct dbll_fxns ldr_fxns = { (dbll_close_fxn) dbll_close, (dbll_create_fxn) dbll_create, (dbll_delete_fxn) dbll_delete, (dbll_exit_fxn) dbll_exit, (dbll_get_attrs_fxn) dbll_get_attrs, (dbll_get_addr_fxn) dbll_get_addr, (dbll_get_c_addr_fxn) dbll_get_c_addr, (dbll_get_sect_fxn) dbll_get_sect, (dbll_init_fxn) dbll_init, (dbll_load_fxn) dbll_load, (dbll_open_fxn) dbll_open, (dbll_read_sect_fxn) dbll_read_sect, (dbll_unload_fxn) dbll_unload, }; static bool no_op(void); /* * File operations (originally were under kfile.c) */ static s32 cod_f_close(struct file *filp) { /* Check for valid handle */ if (!filp) return -EFAULT; filp_close(filp, NULL); /* we can't use 0 here */ return 0; } static struct file *cod_f_open(const char *psz_file_name, const char *sz_mode) { mm_segment_t fs; struct file *filp; fs = get_fs(); set_fs(get_ds()); /* ignore given mode and open file as read-only */ filp = filp_open(psz_file_name, O_RDONLY, 0); if (IS_ERR(filp)) filp = NULL; set_fs(fs); return filp; } static s32 cod_f_read(void __user *pbuffer, s32 size, s32 count, struct file *filp) { /* check for valid file handle */ if (!filp) return -EFAULT; if ((size > 0) && (count > 0) && pbuffer) { u32 dw_bytes_read; mm_segment_t fs; /* read from file */ fs = get_fs(); set_fs(get_ds()); dw_bytes_read = filp->f_op->read(filp, pbuffer, size * count, &(filp->f_pos)); set_fs(fs); if (!dw_bytes_read) return -EBADF; return dw_bytes_read / size; } return -EINVAL; } static s32 cod_f_seek(struct file *filp, s32 offset, s32 origin) { loff_t dw_cur_pos; /* check for valid file handle */ if (!filp) return -EFAULT; /* based on the origin flag, move the internal pointer */ dw_cur_pos = filp->f_op->llseek(filp, offset, origin); if ((s32) dw_cur_pos < 0) return -EPERM; /* we can't use 0 here */ return 0; } static s32 cod_f_tell(struct file *filp) { loff_t dw_cur_pos; if (!filp) return -EFAULT; /* Get current position */ dw_cur_pos = filp->f_op->llseek(filp, 0, SEEK_CUR); if ((s32) dw_cur_pos < 0) return -EPERM; return dw_cur_pos; } /* * ======== cod_close ======== */ void cod_close(struct cod_libraryobj *lib) { struct cod_manager *hmgr; DBC_REQUIRE(refs > 0); DBC_REQUIRE(lib != NULL); DBC_REQUIRE(lib->cod_mgr); hmgr = lib->cod_mgr; hmgr->fxns.close_fxn(lib->dbll_lib); kfree(lib); } /* * ======== cod_create ======== * Purpose: * Create an object to manage code on a DSP system. * This object can be used to load an initial program image with * arguments that can later be expanded with * dynamically loaded object files. * */ int cod_create(struct cod_manager **mgr, char *str_zl_file) { struct cod_manager *mgr_new; struct dbll_attrs zl_attrs; int status = 0; DBC_REQUIRE(refs > 0); DBC_REQUIRE(mgr != NULL); /* assume failure */ *mgr = NULL; mgr_new = kzalloc(sizeof(struct cod_manager), GFP_KERNEL); if (mgr_new == NULL) return -ENOMEM; /* Set up loader functions */ mgr_new->fxns = ldr_fxns; /* initialize the ZL module */ mgr_new->fxns.init_fxn(); zl_attrs.alloc = (dbll_alloc_fxn) no_op; zl_attrs.free = (dbll_free_fxn) no_op; zl_attrs.fread = (dbll_read_fxn) cod_f_read; zl_attrs.fseek = (dbll_seek_fxn) cod_f_seek; zl_attrs.ftell = (dbll_tell_fxn) cod_f_tell; zl_attrs.fclose = (dbll_f_close_fxn) cod_f_close; zl_attrs.fopen = (dbll_f_open_fxn) cod_f_open; zl_attrs.sym_lookup = NULL; zl_attrs.base_image = true; zl_attrs.log_write = NULL; zl_attrs.log_write_handle = NULL; zl_attrs.write = NULL; zl_attrs.rmm_handle = NULL; zl_attrs.input_params = NULL; zl_attrs.sym_handle = NULL; zl_attrs.sym_arg = NULL; mgr_new->attrs = zl_attrs; status = mgr_new->fxns.create_fxn(&mgr_new->target, &zl_attrs); if (status) { cod_delete(mgr_new); return -ESPIPE; } /* return the new manager */ *mgr = mgr_new; return 0; } /* * ======== cod_delete ======== * Purpose: * Delete a code manager object. */ void cod_delete(struct cod_manager *cod_mgr_obj) { DBC_REQUIRE(refs > 0); DBC_REQUIRE(cod_mgr_obj); if (cod_mgr_obj->base_lib) { if (cod_mgr_obj->loaded) cod_mgr_obj->fxns.unload_fxn(cod_mgr_obj->base_lib, &cod_mgr_obj->attrs); cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib); } if (cod_mgr_obj->target) { cod_mgr_obj->fxns.delete_fxn(cod_mgr_obj->target); cod_mgr_obj->fxns.exit_fxn(); } kfree(cod_mgr_obj); } /* * ======== cod_exit ======== * Purpose: * Discontinue usage of the COD module. * */ void cod_exit(void) { DBC_REQUIRE(refs > 0); refs--; DBC_ENSURE(refs >= 0); } /* * ======== cod_get_base_lib ======== * Purpose: * Get handle to the base image DBL library. */ int cod_get_base_lib(struct cod_manager *cod_mgr_obj, struct dbll_library_obj **plib) { int status = 0; DBC_REQUIRE(refs > 0); DBC_REQUIRE(cod_mgr_obj); DBC_REQUIRE(plib != NULL); *plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib; return status; } /* * ======== cod_get_base_name ======== */ int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name, u32 usize) { int status = 0; DBC_REQUIRE(refs > 0); DBC_REQUIRE(cod_mgr_obj); DBC_REQUIRE(sz_name != NULL); if (usize <= COD_MAXPATHLENGTH) strncpy(sz_name, cod_mgr_obj->sz_zl_file, usize); else status = -EPERM; return status; } /* * ======== cod_get_entry ======== * Purpose: * Retrieve the entry point of a loaded DSP program image * */ int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *entry_pt) { DBC_REQUIRE(refs > 0); DBC_REQUIRE(cod_mgr_obj); DBC_REQUIRE(entry_pt != NULL); *entry_pt = cod_mgr_obj->entry; return 0; } /* * ======== cod_get_loader ======== * Purpose: * Get handle to the DBLL loader. */ int cod_get_loader(struct cod_manager *cod_mgr_obj, struct dbll_tar_obj **loader) { int status = 0; DBC_REQUIRE(refs > 0); DBC_REQUIRE(cod_mgr_obj); DBC_REQUIRE(loader != NULL); *loader = (struct dbll_tar_obj *)cod_mgr_obj->target; return status; } /* * ======== cod_get_section ======== * Purpose: * Retrieve the starting address and length of a section in the COFF file * given the section name. */ int cod_get_section(struct cod_libraryobj *lib, char *str_sect, u32 *addr, u32 *len) { struct cod_manager *cod_mgr_obj; int status = 0; DBC_REQUIRE(refs > 0); DBC_REQUIRE(lib != NULL); DBC_REQUIRE(lib->cod_mgr); DBC_REQUIRE(str_sect != NULL); DBC_REQUIRE(addr != NULL); DBC_REQUIRE(len != NULL); *addr = 0; *len = 0; if (lib != NULL) { cod_mgr_obj = lib->cod_mgr; status = cod_mgr_obj->fxns.get_sect_fxn(lib->dbll_lib, str_sect, addr, len); } else { status = -ESPIPE; } DBC_ENSURE(!status || ((*addr == 0) && (*len == 0))); return status; } /* * ======== cod_get_sym_value ======== * Purpose: * Retrieve the value for the specified symbol. The symbol is first * searched for literally and then, if not found, searched for as a * C symbol. * */ int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym, u32 *pul_value) { struct dbll_sym_val *dbll_sym; DBC_REQUIRE(refs > 0); DBC_REQUIRE(cod_mgr_obj); DBC_REQUIRE(str_sym != NULL); DBC_REQUIRE(pul_value != NULL); dev_dbg(bridge, "%s: cod_mgr_obj: %p str_sym: %s pul_value: %p\n", __func__, cod_mgr_obj, str_sym, pul_value); if (cod_mgr_obj->base_lib) { if (!cod_mgr_obj->fxns. get_addr_fxn(cod_mgr_obj->base_lib, str_sym, &dbll_sym)) { if (!cod_mgr_obj->fxns. get_c_addr_fxn(cod_mgr_obj->base_lib, str_sym, &dbll_sym)) return -ESPIPE; } } else { return -ESPIPE; } *pul_value = dbll_sym->value; return 0; } /* * ======== cod_init ======== * Purpose: * Initialize the COD module's private state. * */ bool cod_init(void) { bool ret = true; DBC_REQUIRE(refs >= 0); if (ret) refs++; DBC_ENSURE((ret && refs > 0) || (!ret && refs >= 0)); return ret; } /* * ======== cod_load_base ======== * Purpose: * Load the initial program image, optionally with command-line arguments, * on the DSP system managed by the supplied handle. The program to be * loaded must be the first element of the args array and must be a fully * qualified pathname. * Details: * if num_argc doesn't match the number of arguments in the args array, the * args array is searched for a NULL terminating entry, and argc is * recalculated to reflect this. In this way, we can support NULL * terminating args arrays, if num_argc is very large. */ int cod_load_base(struct cod_manager *cod_mgr_obj, u32 num_argc, char *args[], cod_writefxn pfn_write, void *arb, char *envp[]) { dbll_flags flags; struct dbll_attrs save_attrs; struct dbll_attrs new_attrs; int status; u32 i; DBC_REQUIRE(refs > 0); DBC_REQUIRE(cod_mgr_obj); DBC_REQUIRE(num_argc > 0); DBC_REQUIRE(args != NULL); DBC_REQUIRE(args[0] != NULL); DBC_REQUIRE(pfn_write != NULL); DBC_REQUIRE(cod_mgr_obj->base_lib != NULL); /* * Make sure every argv[] stated in argc has a value, or change argc to * reflect true number in NULL terminated argv array. */ for (i = 0; i < num_argc; i++) { if (args[i] == NULL) { num_argc = i; break; } } /* set the write function for this operation */ cod_mgr_obj->fxns.get_attrs_fxn(cod_mgr_obj->target, &save_attrs); new_attrs = save_attrs; new_attrs.write = (dbll_write_fxn) pfn_write; new_attrs.input_params = arb; new_attrs.alloc = (dbll_alloc_fxn) no_op; new_attrs.free = (dbll_free_fxn) no_op; new_attrs.log_write = NULL; new_attrs.log_write_handle = NULL; /* Load the image */ flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB; status = cod_mgr_obj->fxns.load_fxn(cod_mgr_obj->base_lib, flags, &new_attrs, &cod_mgr_obj->entry); if (status) cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib); if (!status) cod_mgr_obj->loaded = true; else cod_mgr_obj->base_lib = NULL; return status; } /* * ======== cod_open ======== * Open library for reading sections. */ int cod_open(struct cod_manager *hmgr, char *sz_coff_path, u32 flags, struct cod_libraryobj **lib_obj) { int status = 0; struct cod_libraryobj *lib = NULL; DBC_REQUIRE(refs > 0); DBC_REQUIRE(hmgr); DBC_REQUIRE(sz_coff_path != NULL); DBC_REQUIRE(flags == COD_NOLOAD || flags == COD_SYMB); DBC_REQUIRE(lib_obj != NULL); *lib_obj = NULL; lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL); if (lib == NULL) status = -ENOMEM; if (!status) { lib->cod_mgr = hmgr; status = hmgr->fxns.open_fxn(hmgr->target, sz_coff_path, flags, &lib->dbll_lib); if (!status) *lib_obj = lib; } if (status) pr_err("%s: error status 0x%x, sz_coff_path: %s flags: 0x%x\n", __func__, status, sz_coff_path, flags); return status; } /* * ======== cod_open_base ======== * Purpose: * Open base image for reading sections. */ int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path, dbll_flags flags) { int status = 0; struct dbll_library_obj *lib; DBC_REQUIRE(refs > 0); DBC_REQUIRE(hmgr); DBC_REQUIRE(sz_coff_path != NULL); /* if we previously opened a base image, close it now */ if (hmgr->base_lib) { if (hmgr->loaded) { hmgr->fxns.unload_fxn(hmgr->base_lib, &hmgr->attrs); hmgr->loaded = false; } hmgr->fxns.close_fxn(hmgr->base_lib); hmgr->base_lib = NULL; } status = hmgr->fxns.open_fxn(hmgr->target, sz_coff_path, flags, &lib); if (!status) { /* hang onto the library for subsequent sym table usage */ hmgr->base_lib = lib; strncpy(hmgr->sz_zl_file, sz_coff_path, COD_MAXPATHLENGTH - 1); hmgr->sz_zl_file[COD_MAXPATHLENGTH - 1] = '\0'; } if (status) pr_err("%s: error status 0x%x sz_coff_path: %s\n", __func__, status, sz_coff_path); return status; } /* * ======== cod_read_section ======== * Purpose: * Retrieve the content of a code section given the section name. */ int cod_read_section(struct cod_libraryobj *lib, char *str_sect, char *str_content, u32 content_size) { int status = 0; DBC_REQUIRE(refs > 0); DBC_REQUIRE(lib != NULL); DBC_REQUIRE(lib->cod_mgr); DBC_REQUIRE(str_sect != NULL); DBC_REQUIRE(str_content != NULL); if (lib != NULL) status = lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, str_sect, str_content, content_size); else status = -ESPIPE; return status; } /* * ======== no_op ======== * Purpose: * No Operation. * */ static bool no_op(void) { return true; }
gpl-2.0
brymaster5000/Elite_darksLide_kernel
arch/powerpc/lib/copyuser_power7_vmx.c
4462
1571
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2011 * * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> * Anton Blanchard <anton@au.ibm.com> */ #include <linux/uaccess.h> #include <linux/hardirq.h> #include <asm/switch_to.h> int enter_vmx_copy(void) { if (in_interrupt()) return 0; /* This acts as preempt_disable() as well and will make * enable_kernel_altivec(). We need to disable page faults * as they can call schedule and thus make us lose the VMX * context. So on page faults, we just fail which will cause * a fallback to the normal non-vmx copy. */ pagefault_disable(); enable_kernel_altivec(); return 1; } /* * This function must return 0 because we tail call optimise when calling * from __copy_tofrom_user_power7 which returns 0 on success. */ int exit_vmx_copy(void) { pagefault_enable(); return 0; }
gpl-2.0
ghbhaha/furnace-bacon
drivers/net/ethernet/i825xx/ether1.c
4974
27611
/* * linux/drivers/acorn/net/ether1.c * * Copyright (C) 1996-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Acorn ether1 driver (82586 chip) for Acorn machines * * We basically keep two queues in the cards memory - one for transmit * and one for receive. Each has a head and a tail. The head is where * we/the chip adds packets to be transmitted/received, and the tail * is where the transmitter has got to/where the receiver will stop. * Both of these queues are circular, and since the chip is running * all the time, we have to be careful when we modify the pointers etc * so that the buffer memory contents is valid all the time. * * Change log: * 1.00 RMK Released * 1.01 RMK 19/03/1996 Transfers the last odd byte onto/off of the card now. * 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready * 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt. * Should prevent lockup. * 1.04 RMK 17/09/1997 Added more info when initialsation of chip goes wrong. * TDR now only reports failure when chip reports non-zero * TDR time-distance. * 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1 * 1.06 RMK 10/02/2000 Updated for 2.3.43 * 1.07 RMK 13/05/2000 Updated for 2.3.99-pre8 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/ecard.h> #define __ETHER1_C #include "ether1.h" static unsigned int net_debug = NET_DEBUG; #define BUFFER_SIZE 0x10000 #define TX_AREA_START 0x00100 #define TX_AREA_END 0x05000 #define RX_AREA_START 0x05000 #define RX_AREA_END 0x0fc00 static int ether1_open(struct net_device *dev); static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); static irqreturn_t ether1_interrupt(int irq, void *dev_id); static int ether1_close(struct net_device *dev); static void ether1_setmulticastlist(struct net_device *dev); static void ether1_timeout(struct net_device *dev); /* ------------------------------------------------------------------------- */ static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n"; #define BUS_16 16 #define BUS_8 8 /* ------------------------------------------------------------------------- */ #define DISABLEIRQS 1 #define NORMALIRQS 0 #define ether1_readw(dev, addr, type, offset, svflgs) ether1_inw_p (dev, addr + (int)(&((type *)0)->offset), svflgs) #define ether1_writew(dev, val, addr, type, offset, svflgs) ether1_outw_p (dev, val, addr + (int)(&((type *)0)->offset), svflgs) static inline unsigned short ether1_inw_p (struct net_device *dev, int addr, int svflgs) { unsigned long flags; unsigned short ret; if (svflgs) local_irq_save (flags); writeb(addr >> 12, REG_PAGE); ret = readw(ETHER1_RAM + ((addr & 4095) << 1)); if (svflgs) local_irq_restore (flags); return ret; } static inline void ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs) { unsigned long flags; if (svflgs) local_irq_save (flags); writeb(addr >> 12, REG_PAGE); writew(val, ETHER1_RAM + ((addr & 4095) << 1)); if (svflgs) local_irq_restore (flags); } /* * Some inline assembler to allow fast transfers on to/off of the card. * Since this driver depends on some features presented by the ARM * specific architecture, and that you can't configure this driver * without specifiing ARM mode, this is not a problem. * * This routine is essentially an optimised memcpy from the card's * onboard RAM to kernel memory. */ static void ether1_writebuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) { unsigned int page, thislen, offset; void __iomem *addr; offset = start & 4095; page = start >> 12; addr = ETHER1_RAM + (offset << 1); if (offset + length > 4096) thislen = 4096 - offset; else thislen = length; do { int used; writeb(page, REG_PAGE); length -= thislen; __asm__ __volatile__( "subs %3, %3, #2\n\ bmi 2f\n\ 1: ldr %0, [%1], #2\n\ mov %0, %0, lsl #16\n\ orr %0, %0, %0, lsr #16\n\ str %0, [%2], #4\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%1], #2\n\ mov %0, %0, lsl #16\n\ orr %0, %0, %0, lsr #16\n\ str %0, [%2], #4\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%1], #2\n\ mov %0, %0, lsl #16\n\ orr %0, %0, %0, lsr #16\n\ str %0, [%2], #4\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%1], #2\n\ mov %0, %0, lsl #16\n\ orr %0, %0, %0, lsr #16\n\ str %0, [%2], #4\n\ subs %3, %3, #2\n\ bpl 1b\n\ 2: adds %3, %3, #1\n\ ldreqb %0, [%1]\n\ streqb %0, [%2]" : "=&r" (used), "=&r" (data) : "r" (addr), "r" (thislen), "1" (data)); addr = ETHER1_RAM; thislen = length; if (thislen > 4096) thislen = 4096; page++; } while (thislen); } static void ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) { unsigned int page, thislen, offset; void __iomem *addr; offset = start & 4095; page = start >> 12; addr = ETHER1_RAM + (offset << 1); if (offset + length > 4096) thislen = 4096 - offset; else thislen = length; do { int used; writeb(page, REG_PAGE); length -= thislen; __asm__ __volatile__( "subs %3, %3, #2\n\ bmi 2f\n\ 1: ldr %0, [%2], #4\n\ strb %0, [%1], #1\n\ mov %0, %0, lsr #8\n\ strb %0, [%1], #1\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%2], #4\n\ strb %0, [%1], #1\n\ mov %0, %0, lsr #8\n\ strb %0, [%1], #1\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%2], #4\n\ strb %0, [%1], #1\n\ mov %0, %0, lsr #8\n\ strb %0, [%1], #1\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%2], #4\n\ strb %0, [%1], #1\n\ mov %0, %0, lsr #8\n\ strb %0, [%1], #1\n\ subs %3, %3, #2\n\ bpl 1b\n\ 2: adds %3, %3, #1\n\ ldreqb %0, [%2]\n\ streqb %0, [%1]" : "=&r" (used), "=&r" (data) : "r" (addr), "r" (thislen), "1" (data)); addr = ETHER1_RAM; thislen = length; if (thislen > 4096) thislen = 4096; page++; } while (thislen); } static int __devinit ether1_ramtest(struct net_device *dev, unsigned char byte) { unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL); int i, ret = BUFFER_SIZE; int max_errors = 15; int bad = -1; int bad_start = 0; if (!buffer) return 1; memset (buffer, byte, BUFFER_SIZE); ether1_writebuffer (dev, buffer, 0, BUFFER_SIZE); memset (buffer, byte ^ 0xff, BUFFER_SIZE); ether1_readbuffer (dev, buffer, 0, BUFFER_SIZE); for (i = 0; i < BUFFER_SIZE; i++) { if (buffer[i] != byte) { if (max_errors >= 0 && bad != buffer[i]) { if (bad != -1) printk ("\n"); printk (KERN_CRIT "%s: RAM failed with (%02X instead of %02X) at 0x%04X", dev->name, buffer[i], byte, i); ret = -ENODEV; max_errors --; bad = buffer[i]; bad_start = i; } } else { if (bad != -1) { if (bad_start == i - 1) printk ("\n"); else printk (" - 0x%04X\n", i - 1); bad = -1; } } } if (bad != -1) printk (" - 0x%04X\n", BUFFER_SIZE); kfree (buffer); return ret; } static int ether1_reset (struct net_device *dev) { writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); return BUS_16; } static int __devinit ether1_init_2(struct net_device *dev) { int i; dev->mem_start = 0; i = ether1_ramtest (dev, 0x5a); if (i > 0) i = ether1_ramtest (dev, 0x1e); if (i <= 0) return -ENODEV; dev->mem_end = i; return 0; } /* * These are the structures that are loaded into the ether RAM card to * initialise the 82586 */ /* at 0x0100 */ #define NOP_ADDR (TX_AREA_START) #define NOP_SIZE (0x06) static nop_t init_nop = { 0, CMD_NOP, NOP_ADDR }; /* at 0x003a */ #define TDR_ADDR (0x003a) #define TDR_SIZE (0x08) static tdr_t init_tdr = { 0, CMD_TDR | CMD_INTR, NOP_ADDR, 0 }; /* at 0x002e */ #define MC_ADDR (0x002e) #define MC_SIZE (0x0c) static mc_t init_mc = { 0, CMD_SETMULTICAST, TDR_ADDR, 0, { { 0, } } }; /* at 0x0022 */ #define SA_ADDR (0x0022) #define SA_SIZE (0x0c) static sa_t init_sa = { 0, CMD_SETADDRESS, MC_ADDR, { 0, } }; /* at 0x0010 */ #define CFG_ADDR (0x0010) #define CFG_SIZE (0x12) static cfg_t init_cfg = { 0, CMD_CONFIG, SA_ADDR, 8, 8, CFG8_SRDY, CFG9_PREAMB8 | CFG9_ADDRLENBUF | CFG9_ADDRLEN(6), 0, 0x60, 0, CFG13_RETRY(15) | CFG13_SLOTH(2), 0, }; /* at 0x0000 */ #define SCB_ADDR (0x0000) #define SCB_SIZE (0x10) static scb_t init_scb = { 0, SCB_CMDACKRNR | SCB_CMDACKCNA | SCB_CMDACKFR | SCB_CMDACKCX, CFG_ADDR, RX_AREA_START, 0, 0, 0, 0 }; /* at 0xffee */ #define ISCP_ADDR (0xffee) #define ISCP_SIZE (0x08) static iscp_t init_iscp = { 1, SCB_ADDR, 0x0000, 0x0000 }; /* at 0xfff6 */ #define SCP_ADDR (0xfff6) #define SCP_SIZE (0x0a) static scp_t init_scp = { SCP_SY_16BBUS, { 0, 0 }, ISCP_ADDR, 0 }; #define RFD_SIZE (0x16) static rfd_t init_rfd = { 0, 0, 0, 0, { 0, }, { 0, }, 0 }; #define RBD_SIZE (0x0a) static rbd_t init_rbd = { 0, 0, 0, 0, ETH_FRAME_LEN + 8 }; #define TX_SIZE (0x08) #define TBD_SIZE (0x08) static int ether1_init_for_open (struct net_device *dev) { int i, status, addr, next, next2; int failures = 0; unsigned long timeout; writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); for (i = 0; i < 6; i++) init_sa.sa_addr[i] = dev->dev_addr[i]; /* load data structures into ether1 RAM */ ether1_writebuffer (dev, &init_scp, SCP_ADDR, SCP_SIZE); ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE); ether1_writebuffer (dev, &init_scb, SCB_ADDR, SCB_SIZE); ether1_writebuffer (dev, &init_cfg, CFG_ADDR, CFG_SIZE); ether1_writebuffer (dev, &init_sa, SA_ADDR, SA_SIZE); ether1_writebuffer (dev, &init_mc, MC_ADDR, MC_SIZE); ether1_writebuffer (dev, &init_tdr, TDR_ADDR, TDR_SIZE); ether1_writebuffer (dev, &init_nop, NOP_ADDR, NOP_SIZE); if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) { printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n", dev->name); return 1; } /* * setup circularly linked list of { rfd, rbd, buffer }, with * all rfds circularly linked, rbds circularly linked. * First rfd is linked to scp, first rbd is linked to first * rfd. Last rbd has a suspend command. */ addr = RX_AREA_START; do { next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; if (next2 >= RX_AREA_END) { next = RX_AREA_START; init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND; priv(dev)->rx_tail = addr; } else init_rfd.rfd_command = 0; if (addr == RX_AREA_START) init_rfd.rfd_rbdoffset = addr + RFD_SIZE; else init_rfd.rfd_rbdoffset = 0; init_rfd.rfd_link = next; init_rbd.rbd_link = next + RFD_SIZE; init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE; ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE); ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE); addr = next; } while (next2 < RX_AREA_END); priv(dev)->tx_link = NOP_ADDR; priv(dev)->tx_head = NOP_ADDR + NOP_SIZE; priv(dev)->tx_tail = TDR_ADDR; priv(dev)->rx_head = RX_AREA_START; /* release reset & give 586 a prod */ priv(dev)->resetting = 1; priv(dev)->initialising = 1; writeb(CTRL_RST, REG_CONTROL); writeb(0, REG_CONTROL); writeb(CTRL_CA, REG_CONTROL); /* 586 should now unset iscp.busy */ timeout = jiffies + HZ/2; while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) { if (time_after(jiffies, timeout)) { printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name); return 1; } } /* check status of commands that we issued */ timeout += HZ/10; while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); failures += 1; } timeout += HZ/10; while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); failures += 1; } timeout += HZ/10; while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); failures += 1; } timeout += HZ; while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); } else { status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS); if (status & TDR_XCVRPROB) printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name); else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) { #ifdef FANCY printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name, status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10, (status & TDR_TIME) % 10); #else printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name, status & TDR_SHORT ? "short" : "open", (status & TDR_TIME)); #endif } } if (failures) ether1_reset (dev); return failures ? 1 : 0; } /* ------------------------------------------------------------------------- */ static int ether1_txalloc (struct net_device *dev, int size) { int start, tail; size = (size + 1) & ~1; tail = priv(dev)->tx_tail; if (priv(dev)->tx_head + size > TX_AREA_END) { if (tail > priv(dev)->tx_head) return -1; start = TX_AREA_START; if (start + size > tail) return -1; priv(dev)->tx_head = start + size; } else { if (priv(dev)->tx_head < tail && (priv(dev)->tx_head + size) > tail) return -1; start = priv(dev)->tx_head; priv(dev)->tx_head += size; } return start; } static int ether1_open (struct net_device *dev) { if (!is_valid_ether_addr(dev->dev_addr)) { printk(KERN_WARNING "%s: invalid ethernet MAC address\n", dev->name); return -EINVAL; } if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev)) return -EAGAIN; if (ether1_init_for_open (dev)) { free_irq (dev->irq, dev); return -EAGAIN; } netif_start_queue(dev); return 0; } static void ether1_timeout(struct net_device *dev) { printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n", dev->name); printk(KERN_WARNING "%s: resetting device\n", dev->name); ether1_reset (dev); if (ether1_init_for_open (dev)) printk (KERN_ERR "%s: unable to restart interface\n", dev->name); dev->stats.tx_errors++; netif_wake_queue(dev); } static int ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) { int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; unsigned long flags; tx_t tx; tbd_t tbd; nop_t nop; if (priv(dev)->restart) { printk(KERN_WARNING "%s: resetting device\n", dev->name); ether1_reset(dev); if (ether1_init_for_open(dev)) printk(KERN_ERR "%s: unable to restart interface\n", dev->name); else priv(dev)->restart = 0; } if (skb->len < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) goto out; } /* * insert packet followed by a nop */ txaddr = ether1_txalloc (dev, TX_SIZE); tbdaddr = ether1_txalloc (dev, TBD_SIZE); dataddr = ether1_txalloc (dev, skb->len); nopaddr = ether1_txalloc (dev, NOP_SIZE); tx.tx_status = 0; tx.tx_command = CMD_TX | CMD_INTR; tx.tx_link = nopaddr; tx.tx_tbdoffset = tbdaddr; tbd.tbd_opts = TBD_EOL | skb->len; tbd.tbd_link = I82586_NULL; tbd.tbd_bufl = dataddr; tbd.tbd_bufh = 0; nop.nop_status = 0; nop.nop_command = CMD_NOP; nop.nop_link = nopaddr; local_irq_save(flags); ether1_writebuffer (dev, &tx, txaddr, TX_SIZE); ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE); ether1_writebuffer (dev, skb->data, dataddr, skb->len); ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE); tmp = priv(dev)->tx_link; priv(dev)->tx_link = nopaddr; /* now reset the previous nop pointer */ ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS); local_irq_restore(flags); /* handle transmit */ /* check to see if we have room for a full sized ether frame */ tmp = priv(dev)->tx_head; tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); priv(dev)->tx_head = tmp; dev_kfree_skb (skb); if (tst == -1) netif_stop_queue(dev); out: return NETDEV_TX_OK; } static void ether1_xmit_done (struct net_device *dev) { nop_t nop; int caddr, tst; caddr = priv(dev)->tx_tail; again: ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); switch (nop.nop_command & CMD_MASK) { case CMD_TDR: /* special case */ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) != (unsigned short)I82586_NULL) { ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS); writeb(CTRL_CA, REG_CONTROL); } priv(dev)->tx_tail = NOP_ADDR; return; case CMD_NOP: if (nop.nop_link == caddr) { if (priv(dev)->initialising == 0) printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name); else priv(dev)->initialising = 0; return; } if (caddr == nop.nop_link) return; caddr = nop.nop_link; goto again; case CMD_TX: if (nop.nop_status & STAT_COMPLETE) break; printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name); priv(dev)->restart = 1; return; default: printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name, nop.nop_command & CMD_MASK, caddr); priv(dev)->restart = 1; return; } while (nop.nop_status & STAT_COMPLETE) { if (nop.nop_status & STAT_OK) { dev->stats.tx_packets++; dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS); } else { dev->stats.tx_errors++; if (nop.nop_status & STAT_COLLAFTERTX) dev->stats.collisions++; if (nop.nop_status & STAT_NOCARRIER) dev->stats.tx_carrier_errors++; if (nop.nop_status & STAT_TXLOSTCTS) printk (KERN_WARNING "%s: cts lost\n", dev->name); if (nop.nop_status & STAT_TXSLOWDMA) dev->stats.tx_fifo_errors++; if (nop.nop_status & STAT_COLLEXCESSIVE) dev->stats.collisions += 16; } if (nop.nop_link == caddr) { printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name); break; } caddr = nop.nop_link; ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); if ((nop.nop_command & CMD_MASK) != CMD_NOP) { printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name); break; } if (caddr == nop.nop_link) break; caddr = nop.nop_link; ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); if ((nop.nop_command & CMD_MASK) != CMD_TX) { printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name); break; } } priv(dev)->tx_tail = caddr; caddr = priv(dev)->tx_head; tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); priv(dev)->tx_head = caddr; if (tst != -1) netif_wake_queue(dev); } static void ether1_recv_done (struct net_device *dev) { int status; int nexttail, rbdaddr; rbd_t rbd; do { status = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_status, NORMALIRQS); if ((status & RFD_COMPLETE) == 0) break; rbdaddr = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_rbdoffset, NORMALIRQS); ether1_readbuffer (dev, &rbd, rbdaddr, RBD_SIZE); if ((rbd.rbd_status & (RBD_EOF | RBD_ACNTVALID)) == (RBD_EOF | RBD_ACNTVALID)) { int length = rbd.rbd_status & RBD_ACNT; struct sk_buff *skb; length = (length + 1) & ~1; skb = netdev_alloc_skb(dev, length + 2); if (skb) { skb_reserve (skb, 2); ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length); skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->stats.rx_packets++; } else dev->stats.rx_dropped++; } else { printk(KERN_WARNING "%s: %s\n", dev->name, (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid"); dev->stats.rx_dropped++; } nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS); /* nexttail should be rx_head */ if (nexttail != priv(dev)->rx_head) printk(KERN_ERR "%s: receiver buffer chaining error (%04X != %04X)\n", dev->name, nexttail, priv(dev)->rx_head); ether1_writew(dev, RFD_CMDEL | RFD_CMDSUSPEND, nexttail, rfd_t, rfd_command, NORMALIRQS); ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_command, NORMALIRQS); ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_status, NORMALIRQS); ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_rbdoffset, NORMALIRQS); priv(dev)->rx_tail = nexttail; priv(dev)->rx_head = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_link, NORMALIRQS); } while (1); } static irqreturn_t ether1_interrupt (int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; int status; status = ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS); if (status) { ether1_writew(dev, status & (SCB_STRNR | SCB_STCNA | SCB_STFR | SCB_STCX), SCB_ADDR, scb_t, scb_command, NORMALIRQS); writeb(CTRL_CA | CTRL_ACK, REG_CONTROL); if (status & SCB_STCX) { ether1_xmit_done (dev); } if (status & SCB_STCNA) { if (priv(dev)->resetting == 0) printk (KERN_WARNING "%s: CU went not ready ???\n", dev->name); else priv(dev)->resetting += 1; if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) != (unsigned short)I82586_NULL) { ether1_writew(dev, SCB_CMDCUCSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS); writeb(CTRL_CA, REG_CONTROL); } if (priv(dev)->resetting == 2) priv(dev)->resetting = 0; } if (status & SCB_STFR) { ether1_recv_done (dev); } if (status & SCB_STRNR) { if (ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS) & SCB_STRXSUSP) { printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name); ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS); writeb(CTRL_CA, REG_CONTROL); dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */ } else printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS)); printk (KERN_WARNING "RU ptr = %04X\n", ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); } } else writeb(CTRL_ACK, REG_CONTROL); return IRQ_HANDLED; } static int ether1_close (struct net_device *dev) { ether1_reset (dev); free_irq(dev->irq, dev); return 0; } /* * Set or clear the multicast filter for this adaptor. * num_addrs == -1 Promiscuous mode, receive all packets. * num_addrs == 0 Normal mode, clear multicast list. * num_addrs > 0 Multicast mode, receive normal and MC packets, and do * best-effort filtering. */ static void ether1_setmulticastlist (struct net_device *dev) { } /* ------------------------------------------------------------------------- */ static void __devinit ether1_banner(void) { static unsigned int version_printed = 0; if (net_debug && version_printed++ == 0) printk(KERN_INFO "%s", version); } static const struct net_device_ops ether1_netdev_ops = { .ndo_open = ether1_open, .ndo_stop = ether1_close, .ndo_start_xmit = ether1_sendpacket, .ndo_set_rx_mode = ether1_setmulticastlist, .ndo_tx_timeout = ether1_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, }; static int __devinit ether1_probe(struct expansion_card *ec, const struct ecard_id *id) { struct net_device *dev; int i, ret = 0; ether1_banner(); ret = ecard_request_resources(ec); if (ret) goto out; dev = alloc_etherdev(sizeof(struct ether1_priv)); if (!dev) { ret = -ENOMEM; goto release; } SET_NETDEV_DEV(dev, &ec->dev); dev->irq = ec->irq; priv(dev)->base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!priv(dev)->base) { ret = -ENOMEM; goto free; } if ((priv(dev)->bus_type = ether1_reset(dev)) == 0) { ret = -ENODEV; goto free; } for (i = 0; i < 6; i++) dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2)); if (ether1_init_2(dev)) { ret = -ENODEV; goto free; } dev->netdev_ops = &ether1_netdev_ops; dev->watchdog_timeo = 5 * HZ / 100; ret = register_netdev(dev); if (ret) goto free; printk(KERN_INFO "%s: ether1 in slot %d, %pM\n", dev->name, ec->slot_no, dev->dev_addr); ecard_set_drvdata(ec, dev); return 0; free: free_netdev(dev); release: ecard_release_resources(ec); out: return ret; } static void __devexit ether1_remove(struct expansion_card *ec) { struct net_device *dev = ecard_get_drvdata(ec); ecard_set_drvdata(ec, NULL); unregister_netdev(dev); free_netdev(dev); ecard_release_resources(ec); } static const struct ecard_id ether1_ids[] = { { MANU_ACORN, PROD_ACORN_ETHER1 }, { 0xffff, 0xffff } }; static struct ecard_driver ether1_driver = { .probe = ether1_probe, .remove = __devexit_p(ether1_remove), .id_table = ether1_ids, .drv = { .name = "ether1", }, }; static int __init ether1_init(void) { return ecard_register_driver(&ether1_driver); } static void __exit ether1_exit(void) { ecard_remove_driver(&ether1_driver); } module_init(ether1_init); module_exit(ether1_exit); MODULE_LICENSE("GPL");
gpl-2.0
farchanrifai/lineage
drivers/pnp/pnpbios/core.c
4974
14486
/* * pnpbios -- PnP BIOS driver * * This driver provides access to Plug-'n'-Play services provided by * the PnP BIOS firmware, described in the following documents: * Plug and Play BIOS Specification, Version 1.0A, 5 May 1994 * Plug and Play BIOS Clarification Paper, 6 October 1994 * Compaq Computer Corporation, Phoenix Technologies Ltd., Intel Corp. * * Originally (C) 1998 Christian Schmidt <schmidt@digadd.de> * Modifications (C) 1998 Tom Lees <tom@lpsg.demon.co.uk> * Minor reorganizations by David Hinds <dahinds@users.sourceforge.net> * Further modifications (C) 2001, 2002 by: * Alan Cox <alan@redhat.com> * Thomas Hood * Brian Gerst <bgerst@didntduck.org> * * Ported to the PnP Layer and several additional improvements (C) 2002 * by Adam Belay <ambx1@neo.rr.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Change Log * * Adam Belay - <ambx1@neo.rr.com> - March 16, 2003 * rev 1.01 Only call pnp_bios_dev_node_info once * Added pnpbios_print_status * Added several new error messages and info messages * Added pnpbios_interface_attach_device * integrated core and proc init system * Introduced PNPMODE flags * Removed some useless includes */ #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/linkage.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/pnp.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/spinlock.h> #include <linux/dmi.h> #include <linux/delay.h> #include <linux/acpi.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <asm/page.h> #include <asm/desc.h> #include <asm/byteorder.h> #include "../base.h" #include "pnpbios.h" /* * * PnP BIOS INTERFACE * */ static union pnp_bios_install_struct *pnp_bios_install = NULL; int pnp_bios_present(void) { return (pnp_bios_install != NULL); } struct pnp_dev_node_info node_info; /* * * DOCKING FUNCTIONS * */ #ifdef CONFIG_HOTPLUG static struct completion unload_sem; /* * (Much of this belongs in a shared routine somewhere) */ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info) { char *argv[3], **envp, *buf, *scratch; int i = 0, value; if (!(envp = kcalloc(20, sizeof(char *), GFP_KERNEL))) return -ENOMEM; if (!(buf = kzalloc(256, GFP_KERNEL))) { kfree(envp); return -ENOMEM; } /* FIXME: if there are actual users of this, it should be * integrated into the driver core and use the usual infrastructure * like sysfs and uevents */ argv[0] = "/sbin/pnpbios"; argv[1] = "dock"; argv[2] = NULL; /* minimal command environment */ envp[i++] = "HOME=/"; envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; #ifdef DEBUG /* hint that policy agent should enter no-stdout debug mode */ envp[i++] = "DEBUG=kernel"; #endif /* extensible set of named bus-specific parameters, * supporting multiple driver selection algorithms. */ scratch = buf; /* action: add, remove */ envp[i++] = scratch; scratch += sprintf(scratch, "ACTION=%s", dock ? "add" : "remove") + 1; /* Report the ident for the dock */ envp[i++] = scratch; scratch += sprintf(scratch, "DOCK=%x/%x/%x", info->location_id, info->serial, info->capabilities); envp[i] = NULL; value = call_usermodehelper(argv [0], argv, envp, UMH_WAIT_EXEC); kfree(buf); kfree(envp); return 0; } /* * Poll the PnP docking at regular intervals */ static int pnp_dock_thread(void *unused) { static struct pnp_docking_station_info now; int docked = -1, d = 0; set_freezable(); while (1) { int status; /* * Poll every 2 seconds */ msleep_interruptible(2000); if (try_to_freeze()) continue; status = pnp_bios_dock_station_info(&now); switch (status) { /* * No dock to manage */ case PNP_FUNCTION_NOT_SUPPORTED: complete_and_exit(&unload_sem, 0); case PNP_SYSTEM_NOT_DOCKED: d = 0; break; case PNP_SUCCESS: d = 1; break; default: pnpbios_print_status("pnp_dock_thread", status); continue; } if (d != docked) { if (pnp_dock_event(d, &now) == 0) { docked = d; #if 0 printk(KERN_INFO "PnPBIOS: Docking station %stached\n", docked ? "at" : "de"); #endif } } } complete_and_exit(&unload_sem, 0); } #endif /* CONFIG_HOTPLUG */ static int pnpbios_get_resources(struct pnp_dev *dev) { u8 nodenum = dev->number; struct pnp_bios_node *node; if (!pnpbios_is_dynamic(dev)) return -EPERM; pnp_dbg(&dev->dev, "get resources\n"); node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -1; if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { kfree(node); return -ENODEV; } pnpbios_read_resources_from_node(dev, node); dev->active = pnp_is_active(dev); kfree(node); return 0; } static int pnpbios_set_resources(struct pnp_dev *dev) { u8 nodenum = dev->number; struct pnp_bios_node *node; int ret; if (!pnpbios_is_dynamic(dev)) return -EPERM; pnp_dbg(&dev->dev, "set resources\n"); node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -1; if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { kfree(node); return -ENODEV; } if (pnpbios_write_resources_to_node(dev, node) < 0) { kfree(node); return -1; } ret = pnp_bios_set_dev_node(node->handle, (char)PNPMODE_DYNAMIC, node); kfree(node); if (ret > 0) ret = -1; return ret; } static void pnpbios_zero_data_stream(struct pnp_bios_node *node) { unsigned char *p = (char *)node->data; unsigned char *end = (char *)(node->data + node->size); unsigned int len; int i; while ((char *)p < (char *)end) { if (p[0] & 0x80) { /* large tag */ len = (p[2] << 8) | p[1]; p += 3; } else { if (((p[0] >> 3) & 0x0f) == 0x0f) return; len = p[0] & 0x07; p += 1; } for (i = 0; i < len; i++) p[i] = 0; p += len; } printk(KERN_ERR "PnPBIOS: Resource structure did not contain an end tag.\n"); } static int pnpbios_disable_resources(struct pnp_dev *dev) { struct pnp_bios_node *node; u8 nodenum = dev->number; int ret; if (dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev)) return -EPERM; node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -ENOMEM; if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { kfree(node); return -ENODEV; } pnpbios_zero_data_stream(node); ret = pnp_bios_set_dev_node(dev->number, (char)PNPMODE_DYNAMIC, node); kfree(node); if (ret > 0) ret = -1; return ret; } /* PnP Layer support */ struct pnp_protocol pnpbios_protocol = { .name = "Plug and Play BIOS", .get = pnpbios_get_resources, .set = pnpbios_set_resources, .disable = pnpbios_disable_resources, }; static int __init insert_device(struct pnp_bios_node *node) { struct list_head *pos; struct pnp_dev *dev; char id[8]; /* check if the device is already added */ list_for_each(pos, &pnpbios_protocol.devices) { dev = list_entry(pos, struct pnp_dev, protocol_list); if (dev->number == node->handle) return -1; } pnp_eisa_id_to_string(node->eisa_id & PNP_EISA_ID_MASK, id); dev = pnp_alloc_dev(&pnpbios_protocol, node->handle, id); if (!dev) return -1; pnpbios_parse_data_stream(dev, node); dev->active = pnp_is_active(dev); dev->flags = node->flags; if (!(dev->flags & PNPBIOS_NO_CONFIG)) dev->capabilities |= PNP_CONFIGURABLE; if (!(dev->flags & PNPBIOS_NO_DISABLE) && pnpbios_is_dynamic(dev)) dev->capabilities |= PNP_DISABLE; dev->capabilities |= PNP_READ; if (pnpbios_is_dynamic(dev)) dev->capabilities |= PNP_WRITE; if (dev->flags & PNPBIOS_REMOVABLE) dev->capabilities |= PNP_REMOVABLE; /* clear out the damaged flags */ if (!dev->active) pnp_init_resources(dev); pnp_add_device(dev); pnpbios_interface_attach_device(node); return 0; } static void __init build_devlist(void) { u8 nodenum; unsigned int nodes_got = 0; unsigned int devs = 0; struct pnp_bios_node *node; node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return; for (nodenum = 0; nodenum < 0xff;) { u8 thisnodenum = nodenum; /* eventually we will want to use PNPMODE_STATIC here but for now * dynamic will help us catch buggy bioses to add to the blacklist. */ if (!pnpbios_dont_use_current_config) { if (pnp_bios_get_dev_node (&nodenum, (char)PNPMODE_DYNAMIC, node)) break; } else { if (pnp_bios_get_dev_node (&nodenum, (char)PNPMODE_STATIC, node)) break; } nodes_got++; if (insert_device(node) == 0) devs++; if (nodenum <= thisnodenum) { printk(KERN_ERR "PnPBIOS: build_devlist: Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", (unsigned int)nodenum, (unsigned int)thisnodenum); break; } } kfree(node); printk(KERN_INFO "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver\n", nodes_got, nodes_got != 1 ? "s" : "", devs); } /* * * INIT AND EXIT * */ static int pnpbios_disabled; int pnpbios_dont_use_current_config; static int __init pnpbios_setup(char *str) { int invert; while ((str != NULL) && (*str != '\0')) { if (strncmp(str, "off", 3) == 0) pnpbios_disabled = 1; if (strncmp(str, "on", 2) == 0) pnpbios_disabled = 0; invert = (strncmp(str, "no-", 3) == 0); if (invert) str += 3; if (strncmp(str, "curr", 4) == 0) pnpbios_dont_use_current_config = invert; str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); } return 1; } __setup("pnpbios=", pnpbios_setup); /* PnP BIOS signature: "$PnP" */ #define PNP_SIGNATURE (('$' << 0) + ('P' << 8) + ('n' << 16) + ('P' << 24)) static int __init pnpbios_probe_system(void) { union pnp_bios_install_struct *check; u8 sum; int length, i; printk(KERN_INFO "PnPBIOS: Scanning system for PnP BIOS support...\n"); /* * Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS * structure and, if one is found, sets up the selectors and * entry points */ for (check = (union pnp_bios_install_struct *)__va(0xf0000); check < (union pnp_bios_install_struct *)__va(0xffff0); check = (void *)check + 16) { if (check->fields.signature != PNP_SIGNATURE) continue; printk(KERN_INFO "PnPBIOS: Found PnP BIOS installation structure at 0x%p\n", check); length = check->fields.length; if (!length) { printk(KERN_ERR "PnPBIOS: installation structure is invalid, skipping\n"); continue; } for (sum = 0, i = 0; i < length; i++) sum += check->chars[i]; if (sum) { printk(KERN_ERR "PnPBIOS: installation structure is corrupted, skipping\n"); continue; } if (check->fields.version < 0x10) { printk(KERN_WARNING "PnPBIOS: PnP BIOS version %d.%d is not supported\n", check->fields.version >> 4, check->fields.version & 15); continue; } printk(KERN_INFO "PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x\n", check->fields.version >> 4, check->fields.version & 15, check->fields.pm16cseg, check->fields.pm16offset, check->fields.pm16dseg); pnp_bios_install = check; return 1; } printk(KERN_INFO "PnPBIOS: PnP BIOS support was not detected.\n"); return 0; } static int __init exploding_pnp_bios(const struct dmi_system_id *d) { printk(KERN_WARNING "%s detected. Disabling PnPBIOS\n", d->ident); return 0; } static struct dmi_system_id pnpbios_dmi_table[] __initdata = { { /* PnPBIOS GPF on boot */ .callback = exploding_pnp_bios, .ident = "Higraded P14H", .matches = { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), DMI_MATCH(DMI_BIOS_VERSION, "07.00T"), DMI_MATCH(DMI_SYS_VENDOR, "Higraded"), DMI_MATCH(DMI_PRODUCT_NAME, "P14H"), }, }, { /* PnPBIOS GPF on boot */ .callback = exploding_pnp_bios, .ident = "ASUS P4P800", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_BOARD_NAME, "P4P800"), }, }, {} }; static int __init pnpbios_init(void) { int ret; #if defined(CONFIG_PPC) if (check_legacy_ioport(PNPBIOS_BASE)) return -ENODEV; #endif if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) || paravirt_enabled()) { printk(KERN_INFO "PnPBIOS: Disabled\n"); return -ENODEV; } #ifdef CONFIG_PNPACPI if (!acpi_disabled && !pnpacpi_disabled) { pnpbios_disabled = 1; printk(KERN_INFO "PnPBIOS: Disabled by ACPI PNP\n"); return -ENODEV; } #endif /* CONFIG_ACPI */ /* scan the system for pnpbios support */ if (!pnpbios_probe_system()) return -ENODEV; /* make preparations for bios calls */ pnpbios_calls_init(pnp_bios_install); /* read the node info */ ret = pnp_bios_dev_node_info(&node_info); if (ret) { printk(KERN_ERR "PnPBIOS: Unable to get node info. Aborting.\n"); return ret; } /* register with the pnp layer */ ret = pnp_register_protocol(&pnpbios_protocol); if (ret) { printk(KERN_ERR "PnPBIOS: Unable to register driver. Aborting.\n"); return ret; } /* start the proc interface */ ret = pnpbios_proc_init(); if (ret) printk(KERN_ERR "PnPBIOS: Failed to create proc interface.\n"); /* scan for pnpbios devices */ build_devlist(); pnp_platform_devices = 1; return 0; } fs_initcall(pnpbios_init); static int __init pnpbios_thread_init(void) { #if defined(CONFIG_PPC) if (check_legacy_ioport(PNPBIOS_BASE)) return 0; #endif if (pnpbios_disabled) return 0; #ifdef CONFIG_HOTPLUG { struct task_struct *task; init_completion(&unload_sem); task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd"); if (IS_ERR(task)) return PTR_ERR(task); } #endif return 0; } /* Start the kernel thread later: */ module_init(pnpbios_thread_init); EXPORT_SYMBOL(pnpbios_protocol);
gpl-2.0
klabit87/SCH-I545_NA_LL_VZW
net/phonet/pn_netlink.c
6254
7553
/* * File: pn_netlink.c * * Phonet netlink interface * * Copyright (C) 2008 Nokia Corporation. * * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> * Original author: Sakari Ailus <sakari.ailus@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/kernel.h> #include <linux/netlink.h> #include <linux/phonet.h> #include <linux/slab.h> #include <net/sock.h> #include <net/phonet/pn_dev.h> /* Device address handling */ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, u32 pid, u32 seq, int event); void phonet_address_notify(int event, struct net_device *dev, u8 addr) { struct sk_buff *skb; int err = -ENOBUFS; skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(1), GFP_KERNEL); if (skb == NULL) goto errout; err = fill_addr(skb, dev, addr, 0, 0, event); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, dev_net(dev), 0, RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL); return; errout: rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err); } static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = { [IFA_LOCAL] = { .type = NLA_U8 }, }; static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct net_device *dev; struct ifaddrmsg *ifm; int err; u8 pnaddr; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ASSERT_RTNL(); err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy); if (err < 0) return err; ifm = nlmsg_data(nlh); if (tb[IFA_LOCAL] == NULL) return -EINVAL; pnaddr = nla_get_u8(tb[IFA_LOCAL]); if (pnaddr & 3) /* Phonet addresses only have 6 high-order bits */ return -EINVAL; dev = __dev_get_by_index(net, ifm->ifa_index); if (dev == NULL) return -ENODEV; if (nlh->nlmsg_type == RTM_NEWADDR) err = phonet_address_add(dev, pnaddr); else err = phonet_address_del(dev, pnaddr); if (!err) phonet_address_notify(nlh->nlmsg_type, dev, pnaddr); return err; } static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, u32 pid, u32 seq, int event) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), 0); if (nlh == NULL) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifa_family = AF_PHONET; ifm->ifa_prefixlen = 0; ifm->ifa_flags = IFA_F_PERMANENT; ifm->ifa_scope = RT_SCOPE_LINK; ifm->ifa_index = dev->ifindex; NLA_PUT_U8(skb, IFA_LOCAL, addr); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct phonet_device_list *pndevs; struct phonet_device *pnd; int dev_idx = 0, dev_start_idx = cb->args[0]; int addr_idx = 0, addr_start_idx = cb->args[1]; pndevs = phonet_device_list(sock_net(skb->sk)); rcu_read_lock(); list_for_each_entry_rcu(pnd, &pndevs->list, list) { u8 addr; if (dev_idx > dev_start_idx) addr_start_idx = 0; if (dev_idx++ < dev_start_idx) continue; addr_idx = 0; for_each_set_bit(addr, pnd->addrs, 64) { if (addr_idx++ < addr_start_idx) continue; if (fill_addr(skb, pnd->netdev, addr << 2, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0) goto out; } } out: rcu_read_unlock(); cb->args[0] = dev_idx; cb->args[1] = addr_idx; return skb->len; } /* Routes handling */ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, u32 pid, u32 seq, int event) { struct rtmsg *rtm; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), 0); if (nlh == NULL) return -EMSGSIZE; rtm = nlmsg_data(nlh); rtm->rtm_family = AF_PHONET; rtm->rtm_dst_len = 6; rtm->rtm_src_len = 0; rtm->rtm_tos = 0; rtm->rtm_table = RT_TABLE_MAIN; rtm->rtm_protocol = RTPROT_STATIC; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; NLA_PUT_U8(skb, RTA_DST, dst); NLA_PUT_U32(skb, RTA_OIF, dev->ifindex); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } void rtm_phonet_notify(int event, struct net_device *dev, u8 dst) { struct sk_buff *skb; int err = -ENOBUFS; skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(1) + nla_total_size(4), GFP_KERNEL); if (skb == NULL) goto errout; err = fill_route(skb, dev, dst, 0, 0, event); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, dev_net(dev), 0, RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL); return; errout: rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err); } static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = { [RTA_DST] = { .type = NLA_U8 }, [RTA_OIF] = { .type = NLA_U32 }, }; static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) { struct net *net = sock_net(skb->sk); struct nlattr *tb[RTA_MAX+1]; struct net_device *dev; struct rtmsg *rtm; int err; u8 dst; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ASSERT_RTNL(); err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy); if (err < 0) return err; rtm = nlmsg_data(nlh); if (rtm->rtm_table != RT_TABLE_MAIN || rtm->rtm_type != RTN_UNICAST) return -EINVAL; if (tb[RTA_DST] == NULL || tb[RTA_OIF] == NULL) return -EINVAL; dst = nla_get_u8(tb[RTA_DST]); if (dst & 3) /* Phonet addresses only have 6 high-order bits */ return -EINVAL; dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF])); if (dev == NULL) return -ENODEV; if (nlh->nlmsg_type == RTM_NEWROUTE) err = phonet_route_add(dev, dst); else err = phonet_route_del(dev, dst); if (!err) rtm_phonet_notify(nlh->nlmsg_type, dev, dst); return err; } static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); u8 addr, addr_idx = 0, addr_start_idx = cb->args[0]; rcu_read_lock(); for (addr = 0; addr < 64; addr++) { struct net_device *dev; dev = phonet_route_get_rcu(net, addr << 2); if (!dev) continue; if (addr_idx++ < addr_start_idx) continue; if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWROUTE)) goto out; } out: rcu_read_unlock(); cb->args[0] = addr_idx; cb->args[1] = 0; return skb->len; } int __init phonet_netlink_register(void) { int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL, NULL); if (err) return err; /* Further __rtnl_register() cannot fail */ __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL, NULL); __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, NULL); __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL, NULL); __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL, NULL); __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, NULL); return 0; }
gpl-2.0
franciscofranco/Grouper-3.1.10
drivers/video/console/fbcon_cw.c
8046
10778
/* * linux/drivers/video/console/fbcon_ud.c -- Software Rotation - 90 degrees * * Copyright (C) 2005 Antonino Daplas <adaplas @pol.net> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/fb.h> #include <linux/vt_kern.h> #include <linux/console.h> #include <asm/types.h> #include "fbcon.h" #include "fbcon_rotate.h" /* * Rotation 90 degrees */ static void cw_update_attr(u8 *dst, u8 *src, int attribute, struct vc_data *vc) { int i, j, offset = (vc->vc_font.height < 10) ? 1 : 2; int width = (vc->vc_font.height + 7) >> 3; u8 c, t = 0, msk = ~(0xff >> offset); for (i = 0; i < vc->vc_font.width; i++) { for (j = 0; j < width; j++) { c = *src; if (attribute & FBCON_ATTRIBUTE_UNDERLINE && !j) c |= msk; if (attribute & FBCON_ATTRIBUTE_BOLD && i) c |= *(src-width); if (attribute & FBCON_ATTRIBUTE_REVERSE) c = ~c; src++; *dst++ = c; t = c; } } } static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width) { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; u32 vxres = GETVXRES(ops->p->scrollmode, info); area.sx = vxres - ((sy + height) * vc->vc_font.height); area.sy = sx * vc->vc_font.width; area.dx = vxres - ((dy + height) * vc->vc_font.height); area.dy = dx * vc->vc_font.width; area.width = height * vc->vc_font.height; area.height = width * vc->vc_font.width; info->fbops->fb_copyarea(info, &area); } static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; u32 vxres = GETVXRES(ops->p->scrollmode, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = vxres - ((sy + height) * vc->vc_font.height); region.dy = sx * vc->vc_font.width; region.height = width * vc->vc_font.width; region.width = height * vc->vc_font.height; region.rop = ROP_COPY; info->fbops->fb_fillrect(info, &region); } static inline void cw_putcs_aligned(struct vc_data *vc, struct fb_info *info, const u16 *s, u32 attr, u32 cnt, u32 d_pitch, u32 s_pitch, u32 cellsize, struct fb_image *image, u8 *buf, u8 *dst) { struct fbcon_ops *ops = info->fbcon_par; u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; u32 idx = (vc->vc_font.height + 7) >> 3; u8 *src; while (cnt--) { src = ops->fontbuffer + (scr_readw(s++) & charmask)*cellsize; if (attr) { cw_update_attr(buf, src, attr, vc); src = buf; } if (likely(idx == 1)) __fb_pad_aligned_buffer(dst, d_pitch, src, idx, vc->vc_font.width); else fb_pad_aligned_buffer(dst, d_pitch, src, idx, vc->vc_font.width); dst += d_pitch * vc->vc_font.width; } info->fbops->fb_imageblit(info, image); } static void cw_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx, int fg, int bg) { struct fb_image image; struct fbcon_ops *ops = info->fbcon_par; u32 width = (vc->vc_font.height + 7)/8; u32 cellsize = width * vc->vc_font.width; u32 maxcnt = info->pixmap.size/cellsize; u32 scan_align = info->pixmap.scan_align - 1; u32 buf_align = info->pixmap.buf_align - 1; u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; image.fg_color = fg; image.bg_color = bg; image.dx = vxres - ((yy + 1) * vc->vc_font.height); image.dy = xx * vc->vc_font.width; image.width = vc->vc_font.height; image.depth = 1; if (attribute) { buf = kmalloc(cellsize, GFP_KERNEL); if (!buf) return; } while (count) { if (count > maxcnt) cnt = maxcnt; else cnt = count; image.height = vc->vc_font.width * cnt; pitch = ((image.width + 7) >> 3) + scan_align; pitch &= ~scan_align; size = pitch * image.height + buf_align; size &= ~buf_align; dst = fb_get_buffer_offset(info, &info->pixmap, size); image.data = dst; cw_putcs_aligned(vc, info, s, attribute, cnt, pitch, width, cellsize, &image, buf, dst); image.dy += image.height; count -= cnt; s += cnt; } /* buf is always NULL except when in monochrome mode, so in this case it's a gain to check buf against NULL even though kfree() handles NULL pointers just fine */ if (unlikely(buf)) kfree(buf); } static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; unsigned int rw = info->var.yres - (vc->vc_cols*cw); unsigned int bh = info->var.xres - (vc->vc_rows*ch); unsigned int rs = info->var.yres - rw; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; region.color = attr_bgcol_ec(bgshift,vc,info); region.rop = ROP_COPY; if (rw && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset + rs; region.height = rw; region.width = info->var.xres_virtual; info->fbops->fb_fillrect(info, &region); } if (bh) { region.dx = info->var.xoffset; region.dy = info->var.yoffset; region.height = info->var.yres; region.width = bh; info->fbops->fb_fillrect(info, &region); } } static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.height + 7) >> 3, c; int y = real_y(ops->p, vc->vc_y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1, dx, dy; char *src; u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; cursor.set = 0; if (softback_lines) { if (y + softback_lines >= vc->vc_rows) { mode = CM_ERASE; ops->cursor_flash = 0; return; } else y += softback_lines; } c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); if (ops->cursor_state.image.data != src || ops->cursor_reset) { ops->cursor_state.image.data = src; cursor.set |= FB_CUR_SETIMAGE; } if (attribute) { u8 *dst; dst = kmalloc(w * vc->vc_font.width, GFP_ATOMIC); if (!dst) return; kfree(ops->cursor_data); ops->cursor_data = dst; cw_update_attr(dst, src, attribute, vc); src = dst; } if (ops->cursor_state.image.fg_color != fg || ops->cursor_state.image.bg_color != bg || ops->cursor_reset) { ops->cursor_state.image.fg_color = fg; ops->cursor_state.image.bg_color = bg; cursor.set |= FB_CUR_SETCMAP; } if (ops->cursor_state.image.height != vc->vc_font.width || ops->cursor_state.image.width != vc->vc_font.height || ops->cursor_reset) { ops->cursor_state.image.height = vc->vc_font.width; ops->cursor_state.image.width = vc->vc_font.height; cursor.set |= FB_CUR_SETSIZE; } dx = vxres - ((y * vc->vc_font.height) + vc->vc_font.height); dy = vc->vc_x * vc->vc_font.width; if (ops->cursor_state.image.dx != dx || ops->cursor_state.image.dy != dy || ops->cursor_reset) { ops->cursor_state.image.dx = dx; ops->cursor_state.image.dy = dy; cursor.set |= FB_CUR_SETPOS; } if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || ops->cursor_reset) { ops->cursor_state.hot.x = cursor.hot.y = 0; cursor.set |= FB_CUR_SETHOT; } if (cursor.set & FB_CUR_SETSIZE || vc->vc_cursor_type != ops->p->cursor_shape || ops->cursor_state.mask == NULL || ops->cursor_reset) { char *tmp, *mask = kmalloc(w*vc->vc_font.width, GFP_ATOMIC); int cur_height, size, i = 0; int width = (vc->vc_font.width + 7)/8; if (!mask) return; tmp = kmalloc(width * vc->vc_font.height, GFP_ATOMIC); if (!tmp) { kfree(mask); return; } kfree(ops->cursor_state.mask); ops->cursor_state.mask = mask; ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; switch (ops->p->cursor_shape & CUR_HWMASK) { case CUR_NONE: cur_height = 0; break; case CUR_UNDERLINE: cur_height = (vc->vc_font.height < 10) ? 1 : 2; break; case CUR_LOWER_THIRD: cur_height = vc->vc_font.height/3; break; case CUR_LOWER_HALF: cur_height = vc->vc_font.height >> 1; break; case CUR_TWO_THIRDS: cur_height = (vc->vc_font.height << 1)/3; break; case CUR_BLOCK: default: cur_height = vc->vc_font.height; break; } size = (vc->vc_font.height - cur_height) * width; while (size--) tmp[i++] = 0; size = cur_height * width; while (size--) tmp[i++] = 0xff; memset(mask, 0, w * vc->vc_font.width); rotate_cw(tmp, mask, vc->vc_font.width, vc->vc_font.height); kfree(tmp); } switch (mode) { case CM_ERASE: ops->cursor_state.enable = 0; break; case CM_DRAW: case CM_MOVE: default: ops->cursor_state.enable = (use_sw) ? 0 : 1; break; } cursor.image.data = src; cursor.image.fg_color = ops->cursor_state.image.fg_color; cursor.image.bg_color = ops->cursor_state.image.bg_color; cursor.image.dx = ops->cursor_state.image.dx; cursor.image.dy = ops->cursor_state.image.dy; cursor.image.height = ops->cursor_state.image.height; cursor.image.width = ops->cursor_state.image.width; cursor.hot.x = ops->cursor_state.hot.x; cursor.hot.y = ops->cursor_state.hot.y; cursor.mask = ops->cursor_state.mask; cursor.enable = ops->cursor_state.enable; cursor.image.depth = 1; cursor.rop = ROP_XOR; if (info->fbops->fb_cursor) err = info->fbops->fb_cursor(info, &cursor); if (err) soft_cursor(info, &cursor); ops->cursor_reset = 0; } static int cw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; u32 vxres = GETVXRES(ops->p->scrollmode, info); u32 xoffset; int err; xoffset = vxres - (info->var.xres + ops->var.yoffset); ops->var.yoffset = ops->var.xoffset; ops->var.xoffset = xoffset; err = fb_pan_display(info, &ops->var); ops->var.xoffset = info->var.xoffset; ops->var.yoffset = info->var.yoffset; ops->var.vmode = info->var.vmode; return err; } void fbcon_rotate_cw(struct fbcon_ops *ops) { ops->bmove = cw_bmove; ops->clear = cw_clear; ops->putcs = cw_putcs; ops->clear_margins = cw_clear_margins; ops->cursor = cw_cursor; ops->update_start = cw_update_start; } EXPORT_SYMBOL(fbcon_rotate_cw); MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); MODULE_DESCRIPTION("Console Rotation (90 degrees) Support"); MODULE_LICENSE("GPL");
gpl-2.0
somcom3x/android_kernel_samsung_msm8660-caf
drivers/net/wireless/ipw2x00/libipw_geo.c
9326
5482
/****************************************************************************** Copyright(c) 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The full GNU General Public License is included in this distribution in the file called LICENSE. Contact Information: Intel Linux Wireless <ilw@linux.intel.com> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ******************************************************************************/ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <asm/uaccess.h> #include "libipw.h" int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel) { int i; /* Driver needs to initialize the geography map before using * these helper functions */ if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) return 0; if (ieee->freq_band & LIBIPW_24GHZ_BAND) for (i = 0; i < ieee->geo.bg_channels; i++) /* NOTE: If G mode is currently supported but * this is a B only channel, we don't see it * as valid. */ if ((ieee->geo.bg[i].channel == channel) && !(ieee->geo.bg[i].flags & LIBIPW_CH_INVALID) && (!(ieee->mode & IEEE_G) || !(ieee->geo.bg[i].flags & LIBIPW_CH_B_ONLY))) return LIBIPW_24GHZ_BAND; if (ieee->freq_band & LIBIPW_52GHZ_BAND) for (i = 0; i < ieee->geo.a_channels; i++) if ((ieee->geo.a[i].channel == channel) && !(ieee->geo.a[i].flags & LIBIPW_CH_INVALID)) return LIBIPW_52GHZ_BAND; return 0; } int libipw_channel_to_index(struct libipw_device *ieee, u8 channel) { int i; /* Driver needs to initialize the geography map before using * these helper functions */ if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) return -1; if (ieee->freq_band & LIBIPW_24GHZ_BAND) for (i = 0; i < ieee->geo.bg_channels; i++) if (ieee->geo.bg[i].channel == channel) return i; if (ieee->freq_band & LIBIPW_52GHZ_BAND) for (i = 0; i < ieee->geo.a_channels; i++) if (ieee->geo.a[i].channel == channel) return i; return -1; } u32 libipw_channel_to_freq(struct libipw_device * ieee, u8 channel) { const struct libipw_channel * ch; /* Driver needs to initialize the geography map before using * these helper functions */ if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) return 0; ch = libipw_get_channel(ieee, channel); if (!ch->channel) return 0; return ch->freq; } u8 libipw_freq_to_channel(struct libipw_device * ieee, u32 freq) { int i; /* Driver needs to initialize the geography map before using * these helper functions */ if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) return 0; freq /= 100000; if (ieee->freq_band & LIBIPW_24GHZ_BAND) for (i = 0; i < ieee->geo.bg_channels; i++) if (ieee->geo.bg[i].freq == freq) return ieee->geo.bg[i].channel; if (ieee->freq_band & LIBIPW_52GHZ_BAND) for (i = 0; i < ieee->geo.a_channels; i++) if (ieee->geo.a[i].freq == freq) return ieee->geo.a[i].channel; return 0; } int libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo) { memcpy(ieee->geo.name, geo->name, 3); ieee->geo.name[3] = '\0'; ieee->geo.bg_channels = geo->bg_channels; ieee->geo.a_channels = geo->a_channels; memcpy(ieee->geo.bg, geo->bg, geo->bg_channels * sizeof(struct libipw_channel)); memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels * sizeof(struct libipw_channel)); return 0; } const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee) { return &ieee->geo; } u8 libipw_get_channel_flags(struct libipw_device * ieee, u8 channel) { int index = libipw_channel_to_index(ieee, channel); if (index == -1) return LIBIPW_CH_INVALID; if (channel <= LIBIPW_24GHZ_CHANNELS) return ieee->geo.bg[index].flags; return ieee->geo.a[index].flags; } static const struct libipw_channel bad_channel = { .channel = 0, .flags = LIBIPW_CH_INVALID, .max_power = 0, }; const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee, u8 channel) { int index = libipw_channel_to_index(ieee, channel); if (index == -1) return &bad_channel; if (channel <= LIBIPW_24GHZ_CHANNELS) return &ieee->geo.bg[index]; return &ieee->geo.a[index]; } EXPORT_SYMBOL(libipw_get_channel); EXPORT_SYMBOL(libipw_get_channel_flags); EXPORT_SYMBOL(libipw_is_valid_channel); EXPORT_SYMBOL(libipw_freq_to_channel); EXPORT_SYMBOL(libipw_channel_to_freq); EXPORT_SYMBOL(libipw_channel_to_index); EXPORT_SYMBOL(libipw_set_geo); EXPORT_SYMBOL(libipw_get_geo);
gpl-2.0
yuzaipiaofei/android_kernel_cyanogen_msm8916
arch/x86/oprofile/op_model_ppro.c
9326
6048
/* * @file op_model_ppro.h * Family 6 perfmon and architectural perfmon MSR operations * * @remark Copyright 2002 OProfile authors * @remark Copyright 2008 Intel Corporation * @remark Read the file COPYING * * @author John Levon * @author Philippe Elie * @author Graydon Hoare * @author Andi Kleen * @author Robert Richter <robert.richter@amd.com> */ #include <linux/oprofile.h> #include <linux/slab.h> #include <asm/ptrace.h> #include <asm/msr.h> #include <asm/apic.h> #include <asm/nmi.h> #include "op_x86_model.h" #include "op_counter.h" static int num_counters = 2; static int counter_width = 32; #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) static u64 reset_value[OP_MAX_COUNTER]; static void ppro_shutdown(struct op_msrs const * const msrs) { int i; for (i = 0; i < num_counters; ++i) { if (!msrs->counters[i].addr) continue; release_perfctr_nmi(MSR_P6_PERFCTR0 + i); release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); } } static int ppro_fill_in_addresses(struct op_msrs * const msrs) { int i; for (i = 0; i < num_counters; i++) { if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) goto fail; if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) { release_perfctr_nmi(MSR_P6_PERFCTR0 + i); goto fail; } /* both registers must be reserved */ msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; continue; fail: if (!counter_config[i].enabled) continue; op_x86_warn_reserved(i); ppro_shutdown(msrs); return -EBUSY; } return 0; } static void ppro_setup_ctrs(struct op_x86_model_spec const *model, struct op_msrs const * const msrs) { u64 val; int i; if (cpu_has_arch_perfmon) { union cpuid10_eax eax; eax.full = cpuid_eax(0xa); /* * For Core2 (family 6, model 15), don't reset the * counter width: */ if (!(eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && __this_cpu_read(cpu_info.x86_model) == 15)) { if (counter_width < eax.split.bit_width) counter_width = eax.split.bit_width; } } /* clear all counters */ for (i = 0; i < num_counters; ++i) { if (!msrs->controls[i].addr) continue; rdmsrl(msrs->controls[i].addr, val); if (val & ARCH_PERFMON_EVENTSEL_ENABLE) op_x86_warn_in_use(i); val &= model->reserved; wrmsrl(msrs->controls[i].addr, val); /* * avoid a false detection of ctr overflows in NMI * * handler */ wrmsrl(msrs->counters[i].addr, -1LL); } /* enable active counters */ for (i = 0; i < num_counters; ++i) { if (counter_config[i].enabled && msrs->counters[i].addr) { reset_value[i] = counter_config[i].count; wrmsrl(msrs->counters[i].addr, -reset_value[i]); rdmsrl(msrs->controls[i].addr, val); val &= model->reserved; val |= op_x86_get_ctrl(model, &counter_config[i]); wrmsrl(msrs->controls[i].addr, val); } else { reset_value[i] = 0; } } } static int ppro_check_ctrs(struct pt_regs * const regs, struct op_msrs const * const msrs) { u64 val; int i; for (i = 0; i < num_counters; ++i) { if (!reset_value[i]) continue; rdmsrl(msrs->counters[i].addr, val); if (val & (1ULL << (counter_width - 1))) continue; oprofile_add_sample(regs, i); wrmsrl(msrs->counters[i].addr, -reset_value[i]); } /* Only P6 based Pentium M need to re-unmask the apic vector but it * doesn't hurt other P6 variant */ apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); /* We can't work out if we really handled an interrupt. We * might have caught a *second* counter just after overflowing * the interrupt for this counter then arrives * and we don't find a counter that's overflowed, so we * would return 0 and get dazed + confused. Instead we always * assume we found an overflow. This sucks. */ return 1; } static void ppro_start(struct op_msrs const * const msrs) { u64 val; int i; for (i = 0; i < num_counters; ++i) { if (reset_value[i]) { rdmsrl(msrs->controls[i].addr, val); val |= ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(msrs->controls[i].addr, val); } } } static void ppro_stop(struct op_msrs const * const msrs) { u64 val; int i; for (i = 0; i < num_counters; ++i) { if (!reset_value[i]) continue; rdmsrl(msrs->controls[i].addr, val); val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(msrs->controls[i].addr, val); } } struct op_x86_model_spec op_ppro_spec = { .num_counters = 2, .num_controls = 2, .reserved = MSR_PPRO_EVENTSEL_RESERVED, .fill_in_addresses = &ppro_fill_in_addresses, .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, .start = &ppro_start, .stop = &ppro_stop, .shutdown = &ppro_shutdown }; /* * Architectural performance monitoring. * * Newer Intel CPUs (Core1+) have support for architectural * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details. * The advantage of this is that it can be done without knowing about * the specific CPU. */ static void arch_perfmon_setup_counters(void) { union cpuid10_eax eax; eax.full = cpuid_eax(0xa); /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && __this_cpu_read(cpu_info.x86_model) == 15) { eax.split.version_id = 2; eax.split.num_counters = 2; eax.split.bit_width = 40; } num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER); op_arch_perfmon_spec.num_counters = num_counters; op_arch_perfmon_spec.num_controls = num_counters; } static int arch_perfmon_init(struct oprofile_operations *ignore) { arch_perfmon_setup_counters(); return 0; } struct op_x86_model_spec op_arch_perfmon_spec = { .reserved = MSR_PPRO_EVENTSEL_RESERVED, .init = &arch_perfmon_init, /* num_counters/num_controls filled in at runtime */ .fill_in_addresses = &ppro_fill_in_addresses, /* user space does the cpuid check for available events */ .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, .start = &ppro_start, .stop = &ppro_stop, .shutdown = &ppro_shutdown };
gpl-2.0
Ki113R/android_kernel_samsung_golden
drivers/mtd/maps/nettel.c
11886
11672
/****************************************************************************/ /* * nettel.c -- mappings for NETtel/SecureEdge/SnapGear (x86) boards. * * (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com) * (C) Copyright 2001-2002, SnapGear (www.snapgear.com) */ /****************************************************************************/ #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/cfi.h> #include <linux/reboot.h> #include <linux/err.h> #include <linux/kdev_t.h> #include <linux/root_dev.h> #include <asm/io.h> /****************************************************************************/ #define INTEL_BUSWIDTH 1 #define AMD_WINDOW_MAXSIZE 0x00200000 #define AMD_BUSWIDTH 1 /* * PAR masks and shifts, assuming 64K pages. */ #define SC520_PAR_ADDR_MASK 0x00003fff #define SC520_PAR_ADDR_SHIFT 16 #define SC520_PAR_TO_ADDR(par) \ (((par)&SC520_PAR_ADDR_MASK) << SC520_PAR_ADDR_SHIFT) #define SC520_PAR_SIZE_MASK 0x01ffc000 #define SC520_PAR_SIZE_SHIFT 2 #define SC520_PAR_TO_SIZE(par) \ ((((par)&SC520_PAR_SIZE_MASK) << SC520_PAR_SIZE_SHIFT) + (64*1024)) #define SC520_PAR(cs, addr, size) \ ((cs) | \ ((((size)-(64*1024)) >> SC520_PAR_SIZE_SHIFT) & SC520_PAR_SIZE_MASK) | \ (((addr) >> SC520_PAR_ADDR_SHIFT) & SC520_PAR_ADDR_MASK)) #define SC520_PAR_BOOTCS 0x8a000000 #define SC520_PAR_ROMCS1 0xaa000000 #define SC520_PAR_ROMCS2 0xca000000 /* Cache disabled, 64K page */ static void *nettel_mmcrp = NULL; #ifdef CONFIG_MTD_CFI_INTELEXT static struct mtd_info *intel_mtd; #endif static struct mtd_info *amd_mtd; /****************************************************************************/ /****************************************************************************/ #ifdef CONFIG_MTD_CFI_INTELEXT static struct map_info nettel_intel_map = { .name = "SnapGear Intel", .size = 0, .bankwidth = INTEL_BUSWIDTH, }; static struct mtd_partition nettel_intel_partitions[] = { { .name = "SnapGear kernel", .offset = 0, .size = 0x000e0000 }, { .name = "SnapGear filesystem", .offset = 0x00100000, }, { .name = "SnapGear config", .offset = 0x000e0000, .size = 0x00020000 }, { .name = "SnapGear Intel", .offset = 0 }, { .name = "SnapGear BIOS Config", .offset = 0x007e0000, .size = 0x00020000 }, { .name = "SnapGear BIOS", .offset = 0x007e0000, .size = 0x00020000 }, }; #endif static struct map_info nettel_amd_map = { .name = "SnapGear AMD", .size = AMD_WINDOW_MAXSIZE, .bankwidth = AMD_BUSWIDTH, }; static struct mtd_partition nettel_amd_partitions[] = { { .name = "SnapGear BIOS config", .offset = 0x000e0000, .size = 0x00010000 }, { .name = "SnapGear BIOS", .offset = 0x000f0000, .size = 0x00010000 }, { .name = "SnapGear AMD", .offset = 0 }, { .name = "SnapGear high BIOS", .offset = 0x001f0000, .size = 0x00010000 } }; #define NUM_AMD_PARTITIONS ARRAY_SIZE(nettel_amd_partitions) /****************************************************************************/ #ifdef CONFIG_MTD_CFI_INTELEXT /* * Set the Intel flash back to read mode since some old boot * loaders don't. */ static int nettel_reboot_notifier(struct notifier_block *nb, unsigned long val, void *v) { struct cfi_private *cfi = nettel_intel_map.fldrv_priv; unsigned long b; /* Make sure all FLASH chips are put back into read mode */ for (b = 0; (b < nettel_intel_partitions[3].size); b += 0x100000) { cfi_send_gen_cmd(0xff, 0x55, b, &nettel_intel_map, cfi, cfi->device_type, NULL); } return(NOTIFY_OK); } static struct notifier_block nettel_notifier_block = { nettel_reboot_notifier, NULL, 0 }; #endif /****************************************************************************/ static int __init nettel_init(void) { volatile unsigned long *amdpar; unsigned long amdaddr, maxsize; int num_amd_partitions=0; #ifdef CONFIG_MTD_CFI_INTELEXT volatile unsigned long *intel0par, *intel1par; unsigned long orig_bootcspar, orig_romcs1par; unsigned long intel0addr, intel0size; unsigned long intel1addr, intel1size; int intelboot, intel0cs, intel1cs; int num_intel_partitions; #endif int rc = 0; nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096); if (nettel_mmcrp == NULL) { printk("SNAPGEAR: failed to disable MMCR cache??\n"); return(-EIO); } /* Set CPU clock to be 33.000MHz */ *((unsigned char *) (nettel_mmcrp + 0xc64)) = 0x01; amdpar = (volatile unsigned long *) (nettel_mmcrp + 0xc4); #ifdef CONFIG_MTD_CFI_INTELEXT intelboot = 0; intel0cs = SC520_PAR_ROMCS1; intel0par = (volatile unsigned long *) (nettel_mmcrp + 0xc0); intel1cs = SC520_PAR_ROMCS2; intel1par = (volatile unsigned long *) (nettel_mmcrp + 0xbc); /* * Save the CS settings then ensure ROMCS1 and ROMCS2 are off, * otherwise they might clash with where we try to map BOOTCS. */ orig_bootcspar = *amdpar; orig_romcs1par = *intel0par; *intel0par = 0; *intel1par = 0; #endif /* * The first thing to do is determine if we have a separate * boot FLASH device. Typically this is a small (1 to 2MB) * AMD FLASH part. It seems that device size is about the * only way to tell if this is the case... */ amdaddr = 0x20000000; maxsize = AMD_WINDOW_MAXSIZE; *amdpar = SC520_PAR(SC520_PAR_BOOTCS, amdaddr, maxsize); __asm__ ("wbinvd"); nettel_amd_map.phys = amdaddr; nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize); if (!nettel_amd_map.virt) { printk("SNAPGEAR: failed to ioremap() BOOTCS\n"); iounmap(nettel_mmcrp); return(-EIO); } simple_map_init(&nettel_amd_map); if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) { printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n", (int)(amd_mtd->size>>10)); amd_mtd->owner = THIS_MODULE; /* The high BIOS partition is only present for 2MB units */ num_amd_partitions = NUM_AMD_PARTITIONS; if (amd_mtd->size < AMD_WINDOW_MAXSIZE) num_amd_partitions--; /* Don't add the partition until after the primary INTEL's */ #ifdef CONFIG_MTD_CFI_INTELEXT /* * Map the Intel flash into memory after the AMD * It has to start on a multiple of maxsize. */ maxsize = SC520_PAR_TO_SIZE(orig_romcs1par); if (maxsize < (32 * 1024 * 1024)) maxsize = (32 * 1024 * 1024); intel0addr = amdaddr + maxsize; #endif } else { #ifdef CONFIG_MTD_CFI_INTELEXT /* INTEL boot FLASH */ intelboot++; if (!orig_romcs1par) { intel0cs = SC520_PAR_BOOTCS; intel0par = (volatile unsigned long *) (nettel_mmcrp + 0xc4); intel1cs = SC520_PAR_ROMCS1; intel1par = (volatile unsigned long *) (nettel_mmcrp + 0xc0); intel0addr = SC520_PAR_TO_ADDR(orig_bootcspar); maxsize = SC520_PAR_TO_SIZE(orig_bootcspar); } else { /* Kernel base is on ROMCS1, not BOOTCS */ intel0cs = SC520_PAR_ROMCS1; intel0par = (volatile unsigned long *) (nettel_mmcrp + 0xc0); intel1cs = SC520_PAR_BOOTCS; intel1par = (volatile unsigned long *) (nettel_mmcrp + 0xc4); intel0addr = SC520_PAR_TO_ADDR(orig_romcs1par); maxsize = SC520_PAR_TO_SIZE(orig_romcs1par); } /* Destroy useless AMD MTD mapping */ amd_mtd = NULL; iounmap(nettel_amd_map.virt); nettel_amd_map.virt = NULL; #else /* Only AMD flash supported */ rc = -ENXIO; goto out_unmap2; #endif } #ifdef CONFIG_MTD_CFI_INTELEXT /* * We have determined the INTEL FLASH configuration, so lets * go ahead and probe for them now. */ /* Set PAR to the maximum size */ if (maxsize < (32 * 1024 * 1024)) maxsize = (32 * 1024 * 1024); *intel0par = SC520_PAR(intel0cs, intel0addr, maxsize); /* Turn other PAR off so the first probe doesn't find it */ *intel1par = 0; /* Probe for the size of the first Intel flash */ nettel_intel_map.size = maxsize; nettel_intel_map.phys = intel0addr; nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); if (!nettel_intel_map.virt) { printk("SNAPGEAR: failed to ioremap() ROMCS1\n"); rc = -EIO; goto out_unmap2; } simple_map_init(&nettel_intel_map); intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map); if (!intel_mtd) { rc = -ENXIO; goto out_unmap1; } /* Set PAR to the detected size */ intel0size = intel_mtd->size; *intel0par = SC520_PAR(intel0cs, intel0addr, intel0size); /* * Map second Intel FLASH right after first. Set its size to the * same maxsize used for the first Intel FLASH. */ intel1addr = intel0addr + intel0size; *intel1par = SC520_PAR(intel1cs, intel1addr, maxsize); __asm__ ("wbinvd"); maxsize += intel0size; /* Delete the old map and probe again to do both chips */ map_destroy(intel_mtd); intel_mtd = NULL; iounmap(nettel_intel_map.virt); nettel_intel_map.size = maxsize; nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); if (!nettel_intel_map.virt) { printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n"); rc = -EIO; goto out_unmap2; } intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map); if (! intel_mtd) { rc = -ENXIO; goto out_unmap1; } intel1size = intel_mtd->size - intel0size; if (intel1size > 0) { *intel1par = SC520_PAR(intel1cs, intel1addr, intel1size); __asm__ ("wbinvd"); } else { *intel1par = 0; } printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %lldKiB\n", (unsigned long long)(intel_mtd->size >> 10)); intel_mtd->owner = THIS_MODULE; num_intel_partitions = ARRAY_SIZE(nettel_intel_partitions); if (intelboot) { /* * Adjust offset and size of last boot partition. * Must allow for BIOS region at end of FLASH. */ nettel_intel_partitions[1].size = (intel0size + intel1size) - (1024*1024 + intel_mtd->erasesize); nettel_intel_partitions[3].size = intel0size + intel1size; nettel_intel_partitions[4].offset = (intel0size + intel1size) - intel_mtd->erasesize; nettel_intel_partitions[4].size = intel_mtd->erasesize; nettel_intel_partitions[5].offset = nettel_intel_partitions[4].offset; nettel_intel_partitions[5].size = nettel_intel_partitions[4].size; } else { /* No BIOS regions when AMD boot */ num_intel_partitions -= 2; } rc = mtd_device_register(intel_mtd, nettel_intel_partitions, num_intel_partitions); #endif if (amd_mtd) { rc = mtd_device_register(amd_mtd, nettel_amd_partitions, num_amd_partitions); } #ifdef CONFIG_MTD_CFI_INTELEXT register_reboot_notifier(&nettel_notifier_block); #endif return(rc); #ifdef CONFIG_MTD_CFI_INTELEXT out_unmap1: iounmap(nettel_intel_map.virt); #endif out_unmap2: iounmap(nettel_mmcrp); iounmap(nettel_amd_map.virt); return(rc); } /****************************************************************************/ static void __exit nettel_cleanup(void) { #ifdef CONFIG_MTD_CFI_INTELEXT unregister_reboot_notifier(&nettel_notifier_block); #endif if (amd_mtd) { mtd_device_unregister(amd_mtd); map_destroy(amd_mtd); } if (nettel_mmcrp) { iounmap(nettel_mmcrp); nettel_mmcrp = NULL; } if (nettel_amd_map.virt) { iounmap(nettel_amd_map.virt); nettel_amd_map.virt = NULL; } #ifdef CONFIG_MTD_CFI_INTELEXT if (intel_mtd) { mtd_device_unregister(intel_mtd); map_destroy(intel_mtd); } if (nettel_intel_map.virt) { iounmap(nettel_intel_map.virt); nettel_intel_map.virt = NULL; } #endif } /****************************************************************************/ module_init(nettel_init); module_exit(nettel_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>"); MODULE_DESCRIPTION("SnapGear/SecureEdge FLASH support"); /****************************************************************************/
gpl-2.0
mefody1971/android_kernel_samsung_SM-G355H_ANF1
mm/mprotect.c
111
10437
/* * mm/mprotect.c * * (C) Copyright 1994 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig * * Address space accounting code <alan@lxorguk.ukuu.org.uk> * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/mempolicy.h> #include <linux/personality.h> #include <linux/syscalls.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/perf_event.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #ifndef pgprot_modify static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) { return newprot; } #endif static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa, bool *ret_all_same_node) { struct mm_struct *mm = vma->vm_mm; pte_t *pte, oldpte; spinlock_t *ptl; unsigned long pages = 0; bool all_same_node = true; int last_nid = -1; pte = pte_offset_map_lock(mm, pmd, addr, &ptl); arch_enter_lazy_mmu_mode(); do { oldpte = *pte; if (pte_present(oldpte)) { pte_t ptent; bool updated = false; ptent = ptep_modify_prot_start(mm, addr, pte); if (!prot_numa) { ptent = pte_modify(ptent, newprot); updated = true; } else { struct page *page; page = vm_normal_page(vma, addr, oldpte); if (page) { int this_nid = page_to_nid(page); if (last_nid == -1) last_nid = this_nid; if (last_nid != this_nid) all_same_node = false; /* only check non-shared pages */ if (!pte_numa(oldpte) && page_mapcount(page) == 1) { ptent = pte_mknuma(ptent); updated = true; } } } /* * Avoid taking write faults for pages we know to be * dirty. */ if (dirty_accountable && pte_dirty(ptent)) { ptent = pte_mkwrite(ptent); updated = true; } if (updated) pages++; ptep_modify_prot_commit(mm, addr, pte, ptent); } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); if (is_write_migration_entry(entry)) { /* * A protection check is difficult so * just be safe and disable write */ make_migration_entry_read(&entry); set_pte_at(mm, addr, pte, swp_entry_to_pte(entry)); } pages++; } } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); *ret_all_same_node = all_same_node; return pages; } #ifdef CONFIG_NUMA_BALANCING static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, pmd_t *pmd) { spin_lock(&mm->page_table_lock); set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd)); spin_unlock(&mm->page_table_lock); } #else static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, pmd_t *pmd) { BUG(); } #endif /* CONFIG_NUMA_BALANCING */ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { pmd_t *pmd; unsigned long next; unsigned long pages = 0; bool all_same_node; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) split_huge_page_pmd(vma, addr, pmd); else if (change_huge_pmd(vma, pmd, addr, newprot, prot_numa)) { pages += HPAGE_PMD_NR; continue; } /* fall through */ } if (pmd_none_or_clear_bad(pmd)) continue; pages += change_pte_range(vma, pmd, addr, next, newprot, dirty_accountable, prot_numa, &all_same_node); /* * If we are changing protections for NUMA hinting faults then * set pmd_numa if the examined pages were all on the same * node. This allows a regular PMD to be handled as one fault * and effectively batches the taking of the PTL */ if (prot_numa && all_same_node) change_pmd_protnuma(vma->vm_mm, addr, pmd); } while (pmd++, addr = next, addr != end); return pages; } static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { pud_t *pud; unsigned long next; unsigned long pages = 0; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; pages += change_pmd_range(vma, pud, addr, next, newprot, dirty_accountable, prot_numa); } while (pud++, addr = next, addr != end); return pages; } static unsigned long change_protection_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; unsigned long next; unsigned long start = addr; unsigned long pages = 0; BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; pages += change_pud_range(vma, pgd, addr, next, newprot, dirty_accountable, prot_numa); } while (pgd++, addr = next, addr != end); /* Only flush the TLB if we actually modified any entries: */ if (pages) flush_tlb_range(vma, start, end); return pages; } unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { struct mm_struct *mm = vma->vm_mm; unsigned long pages; mmu_notifier_invalidate_range_start(mm, start, end); if (is_vm_hugetlb_page(vma)) pages = hugetlb_change_protection(vma, start, end, newprot); else pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); mmu_notifier_invalidate_range_end(mm, start, end); return pages; } int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) { struct mm_struct *mm = vma->vm_mm; unsigned long oldflags = vma->vm_flags; long nrpages = (end - start) >> PAGE_SHIFT; unsigned long charged = 0; pgoff_t pgoff; int error; int dirty_accountable = 0; if (newflags == oldflags) { *pprev = vma; return 0; } /* * If we make a private mapping writable we increase our commit; * but (without finer accounting) cannot reduce our commit if we * make it unwritable again. hugetlb mapping were accounted for * even if read-only so there is no need to account for them here */ if (newflags & VM_WRITE) { if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| VM_SHARED|VM_NORESERVE))) { charged = nrpages; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; newflags |= VM_ACCOUNT; } } /* * First try to merge with previous and/or next vma. */ pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *pprev = vma_merge(mm, *pprev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma_get_anon_name(vma)); if (*pprev) { vma = *pprev; goto success; } *pprev = vma; if (start != vma->vm_start) { error = split_vma(mm, vma, start, 1); if (error) goto fail; } if (end != vma->vm_end) { error = split_vma(mm, vma, end, 0); if (error) goto fail; } success: /* * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ vma->vm_flags = newflags; vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(newflags)); if (vma_wants_writenotify(vma)) { vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); dirty_accountable = 1; } change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0); vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); vm_stat_account(mm, newflags, vma->vm_file, nrpages); perf_event_mmap(vma); return 0; fail: vm_unacct_memory(charged); return error; } SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, unsigned long, prot) { unsigned long vm_flags, nstart, end, tmp, reqprot; struct vm_area_struct *vma, *prev; int error = -EINVAL; const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ return -EINVAL; if (start & ~PAGE_MASK) return -EINVAL; if (!len) return 0; len = PAGE_ALIGN(len); end = start + len; if (end <= start) return -ENOMEM; if (!arch_validate_prot(prot)) return -EINVAL; reqprot = prot; /* * Does the application expect PROT_READ to imply PROT_EXEC: */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) prot |= PROT_EXEC; vm_flags = calc_vm_prot_bits(prot); down_write(&current->mm->mmap_sem); vma = find_vma(current->mm, start); error = -ENOMEM; if (!vma) goto out; prev = vma->vm_prev; if (unlikely(grows & PROT_GROWSDOWN)) { if (vma->vm_start >= end) goto out; start = vma->vm_start; error = -EINVAL; if (!(vma->vm_flags & VM_GROWSDOWN)) goto out; } else { if (vma->vm_start > start) goto out; if (unlikely(grows & PROT_GROWSUP)) { end = vma->vm_end; error = -EINVAL; if (!(vma->vm_flags & VM_GROWSUP)) goto out; } } if (start > vma->vm_start) prev = vma; for (nstart = start ; ; ) { unsigned long newflags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ newflags = vm_flags; newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); /* newflags >> 4 shift VM_MAY% in place of VM_% */ if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { error = -EACCES; goto out; } error = security_file_mprotect(vma, reqprot, prot); if (error) goto out; tmp = vma->vm_end; if (tmp > end) tmp = end; error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); if (error) goto out; nstart = tmp; if (nstart < prev->vm_end) nstart = prev->vm_end; if (nstart >= end) goto out; vma = prev->vm_next; if (!vma || vma->vm_start != nstart) { error = -ENOMEM; goto out; } } out: up_write(&current->mm->mmap_sem); return error; }
gpl-2.0
psndna88/AGNI-pureSTOCK
drivers/net/wireless/bcmdhd/bcmevent.c
367
5351
/* * bcmevent read-only data shared by kernel or app layers * * Copyright (C) 1999-2014, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * $Id: bcmevent.c 440870 2013-12-04 05:23:45Z $ */ #include <typedefs.h> #include <bcmutils.h> #include <proto/ethernet.h> #include <proto/bcmeth.h> #include <proto/bcmevent.h> /* Use the actual name for event tracing */ #define BCMEVENT_NAME(_event) {(_event), #_event} const bcmevent_name_t bcmevent_names[] = { BCMEVENT_NAME(WLC_E_SET_SSID), BCMEVENT_NAME(WLC_E_JOIN), BCMEVENT_NAME(WLC_E_START), BCMEVENT_NAME(WLC_E_AUTH), BCMEVENT_NAME(WLC_E_AUTH_IND), BCMEVENT_NAME(WLC_E_DEAUTH), BCMEVENT_NAME(WLC_E_DEAUTH_IND), BCMEVENT_NAME(WLC_E_ASSOC), BCMEVENT_NAME(WLC_E_ASSOC_IND), BCMEVENT_NAME(WLC_E_REASSOC), BCMEVENT_NAME(WLC_E_REASSOC_IND), BCMEVENT_NAME(WLC_E_DISASSOC), BCMEVENT_NAME(WLC_E_DISASSOC_IND), BCMEVENT_NAME(WLC_E_QUIET_START), BCMEVENT_NAME(WLC_E_QUIET_END), BCMEVENT_NAME(WLC_E_BEACON_RX), BCMEVENT_NAME(WLC_E_LINK), BCMEVENT_NAME(WLC_E_MIC_ERROR), BCMEVENT_NAME(WLC_E_NDIS_LINK), BCMEVENT_NAME(WLC_E_ROAM), BCMEVENT_NAME(WLC_E_TXFAIL), BCMEVENT_NAME(WLC_E_PMKID_CACHE), BCMEVENT_NAME(WLC_E_RETROGRADE_TSF), BCMEVENT_NAME(WLC_E_PRUNE), BCMEVENT_NAME(WLC_E_AUTOAUTH), BCMEVENT_NAME(WLC_E_EAPOL_MSG), BCMEVENT_NAME(WLC_E_SCAN_COMPLETE), BCMEVENT_NAME(WLC_E_ADDTS_IND), BCMEVENT_NAME(WLC_E_DELTS_IND), BCMEVENT_NAME(WLC_E_BCNSENT_IND), BCMEVENT_NAME(WLC_E_BCNRX_MSG), BCMEVENT_NAME(WLC_E_BCNLOST_MSG), BCMEVENT_NAME(WLC_E_ROAM_PREP), BCMEVENT_NAME(WLC_E_PFN_NET_FOUND), BCMEVENT_NAME(WLC_E_PFN_NET_LOST), #if defined(IBSS_PEER_DISCOVERY_EVENT) BCMEVENT_NAME(WLC_E_IBSS_ASSOC), #endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */ BCMEVENT_NAME(WLC_E_RADIO), BCMEVENT_NAME(WLC_E_PSM_WATCHDOG), BCMEVENT_NAME(WLC_E_PROBREQ_MSG), BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND), BCMEVENT_NAME(WLC_E_PSK_SUP), BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED), BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME), BCMEVENT_NAME(WLC_E_ICV_ERROR), BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR), BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR), BCMEVENT_NAME(WLC_E_TRACE), #ifdef WLBTAMP BCMEVENT_NAME(WLC_E_BTA_HCI_EVENT), #endif BCMEVENT_NAME(WLC_E_IF), #ifdef WLP2P BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE), #endif BCMEVENT_NAME(WLC_E_RSSI), BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE), BCMEVENT_NAME(WLC_E_EXTLOG_MSG), #ifdef WIFI_ACT_FRAME BCMEVENT_NAME(WLC_E_ACTION_FRAME), BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX), BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE), #endif #ifdef BCMWAPI_WAI BCMEVENT_NAME(WLC_E_WAI_STA_EVENT), BCMEVENT_NAME(WLC_E_WAI_MSG), #endif /* BCMWAPI_WAI */ BCMEVENT_NAME(WLC_E_ESCAN_RESULT), BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE), #ifdef WLP2P BCMEVENT_NAME(WLC_E_PROBRESP_MSG), BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG), #endif #ifdef PROP_TXSTATUS BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP), #endif BCMEVENT_NAME(WLC_E_WAKE_EVENT), BCMEVENT_NAME(WLC_E_DCS_REQUEST), BCMEVENT_NAME(WLC_E_RM_COMPLETE), #ifdef WLMEDIA_HTSF BCMEVENT_NAME(WLC_E_HTSFSYNC), #endif BCMEVENT_NAME(WLC_E_OVERLAY_REQ), BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND), BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT), BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE), BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), #ifdef SOFTAP BCMEVENT_NAME(WLC_E_GTK_PLUMBED), #endif BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE), BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE), BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX_NDIS), BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX), #ifdef WLTDLS BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT), #endif /* WLTDLS */ BCMEVENT_NAME(WLC_E_NATIVE), #ifdef WLPKTDLYSTAT BCMEVENT_NAME(WLC_E_PKTDELAY_IND), #endif /* WLPKTDLYSTAT */ BCMEVENT_NAME(WLC_E_SERVICE_FOUND), BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX), BCMEVENT_NAME(WLC_E_GAS_COMPLETE), BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE), BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE), #ifdef WLWNM BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP), #endif /* WLWNM */ #if defined(WL_PROXDETECT) BCMEVENT_NAME(WLC_E_PROXD), #endif BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL), BCMEVENT_NAME(WLC_E_BSSID), #ifdef PROP_TXSTATUS BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT), #endif BCMEVENT_NAME(WLC_E_TXFAIL_THRESH), #ifdef WLAIBSS BCMEVENT_NAME(WLC_E_AIBSS_TXFAIL), #endif /* WLAIBSS */ }; const int bcmevent_names_size = ARRAYSIZE(bcmevent_names);
gpl-2.0
bndmag/linux
tools/testing/selftests/timers/nanosleep.c
623
4147
/* Make sure timers don't return early * by: john stultz (johnstul@us.ibm.com) * John Stultz (john.stultz@linaro.org) * (C) Copyright IBM 2012 * (C) Copyright Linaro 2013 2015 * Licensed under the GPLv2 * * To build: * $ gcc nanosleep.c -o nanosleep -lrt * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <sys/timex.h> #include <string.h> #include <signal.h> #ifdef KTEST #include "../kselftest.h" #else static inline int ksft_exit_pass(void) { exit(0); } static inline int ksft_exit_fail(void) { exit(1); } #endif #define NSEC_PER_SEC 1000000000ULL #define CLOCK_REALTIME 0 #define CLOCK_MONOTONIC 1 #define CLOCK_PROCESS_CPUTIME_ID 2 #define CLOCK_THREAD_CPUTIME_ID 3 #define CLOCK_MONOTONIC_RAW 4 #define CLOCK_REALTIME_COARSE 5 #define CLOCK_MONOTONIC_COARSE 6 #define CLOCK_BOOTTIME 7 #define CLOCK_REALTIME_ALARM 8 #define CLOCK_BOOTTIME_ALARM 9 #define CLOCK_HWSPECIFIC 10 #define CLOCK_TAI 11 #define NR_CLOCKIDS 12 #define UNSUPPORTED 0xf00f char *clockstring(int clockid) { switch (clockid) { case CLOCK_REALTIME: return "CLOCK_REALTIME"; case CLOCK_MONOTONIC: return "CLOCK_MONOTONIC"; case CLOCK_PROCESS_CPUTIME_ID: return "CLOCK_PROCESS_CPUTIME_ID"; case CLOCK_THREAD_CPUTIME_ID: return "CLOCK_THREAD_CPUTIME_ID"; case CLOCK_MONOTONIC_RAW: return "CLOCK_MONOTONIC_RAW"; case CLOCK_REALTIME_COARSE: return "CLOCK_REALTIME_COARSE"; case CLOCK_MONOTONIC_COARSE: return "CLOCK_MONOTONIC_COARSE"; case CLOCK_BOOTTIME: return "CLOCK_BOOTTIME"; case CLOCK_REALTIME_ALARM: return "CLOCK_REALTIME_ALARM"; case CLOCK_BOOTTIME_ALARM: return "CLOCK_BOOTTIME_ALARM"; case CLOCK_TAI: return "CLOCK_TAI"; }; return "UNKNOWN_CLOCKID"; } /* returns 1 if a <= b, 0 otherwise */ static inline int in_order(struct timespec a, struct timespec b) { if (a.tv_sec < b.tv_sec) return 1; if (a.tv_sec > b.tv_sec) return 0; if (a.tv_nsec > b.tv_nsec) return 0; return 1; } struct timespec timespec_add(struct timespec ts, unsigned long long ns) { ts.tv_nsec += ns; while (ts.tv_nsec >= NSEC_PER_SEC) { ts.tv_nsec -= NSEC_PER_SEC; ts.tv_sec++; } return ts; } int nanosleep_test(int clockid, long long ns) { struct timespec now, target, rel; /* First check abs time */ if (clock_gettime(clockid, &now)) return UNSUPPORTED; target = timespec_add(now, ns); if (clock_nanosleep(clockid, TIMER_ABSTIME, &target, NULL)) return UNSUPPORTED; clock_gettime(clockid, &now); if (!in_order(target, now)) return -1; /* Second check reltime */ clock_gettime(clockid, &now); rel.tv_sec = 0; rel.tv_nsec = 0; rel = timespec_add(rel, ns); target = timespec_add(now, ns); clock_nanosleep(clockid, 0, &rel, NULL); clock_gettime(clockid, &now); if (!in_order(target, now)) return -1; return 0; } int main(int argc, char **argv) { long long length; int clockid, ret; for (clockid = CLOCK_REALTIME; clockid < NR_CLOCKIDS; clockid++) { /* Skip cputime clockids since nanosleep won't increment cputime */ if (clockid == CLOCK_PROCESS_CPUTIME_ID || clockid == CLOCK_THREAD_CPUTIME_ID || clockid == CLOCK_HWSPECIFIC) continue; printf("Nanosleep %-31s ", clockstring(clockid)); length = 10; while (length <= (NSEC_PER_SEC * 10)) { ret = nanosleep_test(clockid, length); if (ret == UNSUPPORTED) { printf("[UNSUPPORTED]\n"); goto next; } if (ret < 0) { printf("[FAILED]\n"); return ksft_exit_fail(); } length *= 100; } printf("[OK]\n"); next: ret = 0; } return ksft_exit_pass(); }
gpl-2.0
wujichang/linux
drivers/net/ethernet/sfc/nic.c
1647
15422
/**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/cpu_rmap.h> #include "net_driver.h" #include "bitfield.h" #include "efx.h" #include "nic.h" #include "ef10_regs.h" #include "farch_regs.h" #include "io.h" #include "workarounds.h" /************************************************************************** * * Generic buffer handling * These buffers are used for interrupt status, MAC stats, etc. * **************************************************************************/ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len, gfp_t gfp_flags) { buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, &buffer->dma_addr, gfp_flags); if (!buffer->addr) return -ENOMEM; buffer->len = len; return 0; } void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) { if (buffer->addr) { dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, buffer->dma_addr); buffer->addr = NULL; } } /* Check whether an event is present in the eventq at the current * read pointer. Only useful for self-test. */ bool efx_nic_event_present(struct efx_channel *channel) { return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); } void efx_nic_event_test_start(struct efx_channel *channel) { channel->event_test_cpu = -1; smp_wmb(); channel->efx->type->ev_test_generate(channel); } void efx_nic_irq_test_start(struct efx_nic *efx) { efx->last_irq_cpu = -1; smp_wmb(); efx->type->irq_test_generate(efx); } /* Hook interrupt handler(s) * Try MSI and then legacy interrupts. */ int efx_nic_init_interrupt(struct efx_nic *efx) { struct efx_channel *channel; unsigned int n_irqs; int rc; if (!EFX_INT_MODE_USE_MSI(efx)) { rc = request_irq(efx->legacy_irq, efx->type->irq_handle_legacy, IRQF_SHARED, efx->name, efx); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to hook legacy IRQ %d\n", efx->pci_dev->irq); goto fail1; } return 0; } #ifdef CONFIG_RFS_ACCEL if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); if (!efx->net_dev->rx_cpu_rmap) { rc = -ENOMEM; goto fail1; } } #endif /* Hook MSI or MSI-X interrupt */ n_irqs = 0; efx_for_each_channel(channel, efx) { rc = request_irq(channel->irq, efx->type->irq_handle_msi, IRQF_PROBE_SHARED, /* Not shared */ efx->msi_context[channel->channel].name, &efx->msi_context[channel->channel]); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to hook IRQ %d\n", channel->irq); goto fail2; } ++n_irqs; #ifdef CONFIG_RFS_ACCEL if (efx->interrupt_mode == EFX_INT_MODE_MSIX && channel->channel < efx->n_rx_channels) { rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, channel->irq); if (rc) goto fail2; } #endif } return 0; fail2: #ifdef CONFIG_RFS_ACCEL free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); efx->net_dev->rx_cpu_rmap = NULL; #endif efx_for_each_channel(channel, efx) { if (n_irqs-- == 0) break; free_irq(channel->irq, &efx->msi_context[channel->channel]); } fail1: return rc; } void efx_nic_fini_interrupt(struct efx_nic *efx) { struct efx_channel *channel; #ifdef CONFIG_RFS_ACCEL free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); efx->net_dev->rx_cpu_rmap = NULL; #endif if (EFX_INT_MODE_USE_MSI(efx)) { /* Disable MSI/MSI-X interrupts */ efx_for_each_channel(channel, efx) free_irq(channel->irq, &efx->msi_context[channel->channel]); } else { /* Disable legacy interrupt */ free_irq(efx->legacy_irq, efx); } } /* Register dump */ #define REGISTER_REVISION_FA 1 #define REGISTER_REVISION_FB 2 #define REGISTER_REVISION_FC 3 #define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */ #define REGISTER_REVISION_ED 4 #define REGISTER_REVISION_EZ 4 /* latest EF10 revision */ struct efx_nic_reg { u32 offset:24; u32 min_revision:3, max_revision:3; }; #define REGISTER(name, arch, min_rev, max_rev) { \ arch ## R_ ## min_rev ## max_rev ## _ ## name, \ REGISTER_REVISION_ ## arch ## min_rev, \ REGISTER_REVISION_ ## arch ## max_rev \ } #define REGISTER_AA(name) REGISTER(name, F, A, A) #define REGISTER_AB(name) REGISTER(name, F, A, B) #define REGISTER_AZ(name) REGISTER(name, F, A, Z) #define REGISTER_BB(name) REGISTER(name, F, B, B) #define REGISTER_BZ(name) REGISTER(name, F, B, Z) #define REGISTER_CZ(name) REGISTER(name, F, C, Z) #define REGISTER_DZ(name) REGISTER(name, E, D, Z) static const struct efx_nic_reg efx_nic_regs[] = { REGISTER_AZ(ADR_REGION), REGISTER_AZ(INT_EN_KER), REGISTER_BZ(INT_EN_CHAR), REGISTER_AZ(INT_ADR_KER), REGISTER_BZ(INT_ADR_CHAR), /* INT_ACK_KER is WO */ /* INT_ISR0 is RC */ REGISTER_AZ(HW_INIT), REGISTER_CZ(USR_EV_CFG), REGISTER_AB(EE_SPI_HCMD), REGISTER_AB(EE_SPI_HADR), REGISTER_AB(EE_SPI_HDATA), REGISTER_AB(EE_BASE_PAGE), REGISTER_AB(EE_VPD_CFG0), /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ /* PCIE_CORE_INDIRECT is indirect */ REGISTER_AB(NIC_STAT), REGISTER_AB(GPIO_CTL), REGISTER_AB(GLB_CTL), /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ REGISTER_BZ(DP_CTRL), REGISTER_AZ(MEM_STAT), REGISTER_AZ(CS_DEBUG), REGISTER_AZ(ALTERA_BUILD), REGISTER_AZ(CSR_SPARE), REGISTER_AB(PCIE_SD_CTL0123), REGISTER_AB(PCIE_SD_CTL45), REGISTER_AB(PCIE_PCS_CTL_STAT), /* DEBUG_DATA_OUT is not used */ /* DRV_EV is WO */ REGISTER_AZ(EVQ_CTL), REGISTER_AZ(EVQ_CNT1), REGISTER_AZ(EVQ_CNT2), REGISTER_AZ(BUF_TBL_CFG), REGISTER_AZ(SRM_RX_DC_CFG), REGISTER_AZ(SRM_TX_DC_CFG), REGISTER_AZ(SRM_CFG), /* BUF_TBL_UPD is WO */ REGISTER_AZ(SRM_UPD_EVQ), REGISTER_AZ(SRAM_PARITY), REGISTER_AZ(RX_CFG), REGISTER_BZ(RX_FILTER_CTL), /* RX_FLUSH_DESCQ is WO */ REGISTER_AZ(RX_DC_CFG), REGISTER_AZ(RX_DC_PF_WM), REGISTER_BZ(RX_RSS_TKEY), /* RX_NODESC_DROP is RC */ REGISTER_AA(RX_SELF_RST), /* RX_DEBUG, RX_PUSH_DROP are not used */ REGISTER_CZ(RX_RSS_IPV6_REG1), REGISTER_CZ(RX_RSS_IPV6_REG2), REGISTER_CZ(RX_RSS_IPV6_REG3), /* TX_FLUSH_DESCQ is WO */ REGISTER_AZ(TX_DC_CFG), REGISTER_AA(TX_CHKSM_CFG), REGISTER_AZ(TX_CFG), /* TX_PUSH_DROP is not used */ REGISTER_AZ(TX_RESERVED), REGISTER_BZ(TX_PACE), /* TX_PACE_DROP_QID is RC */ REGISTER_BB(TX_VLAN), REGISTER_BZ(TX_IPFIL_PORTEN), REGISTER_AB(MD_TXD), REGISTER_AB(MD_RXD), REGISTER_AB(MD_CS), REGISTER_AB(MD_PHY_ADR), REGISTER_AB(MD_ID), /* MD_STAT is RC */ REGISTER_AB(MAC_STAT_DMA), REGISTER_AB(MAC_CTRL), REGISTER_BB(GEN_MODE), REGISTER_AB(MAC_MC_HASH_REG0), REGISTER_AB(MAC_MC_HASH_REG1), REGISTER_AB(GM_CFG1), REGISTER_AB(GM_CFG2), /* GM_IPG and GM_HD are not used */ REGISTER_AB(GM_MAX_FLEN), /* GM_TEST is not used */ REGISTER_AB(GM_ADR1), REGISTER_AB(GM_ADR2), REGISTER_AB(GMF_CFG0), REGISTER_AB(GMF_CFG1), REGISTER_AB(GMF_CFG2), REGISTER_AB(GMF_CFG3), REGISTER_AB(GMF_CFG4), REGISTER_AB(GMF_CFG5), REGISTER_BB(TX_SRC_MAC_CTL), REGISTER_AB(XM_ADR_LO), REGISTER_AB(XM_ADR_HI), REGISTER_AB(XM_GLB_CFG), REGISTER_AB(XM_TX_CFG), REGISTER_AB(XM_RX_CFG), REGISTER_AB(XM_MGT_INT_MASK), REGISTER_AB(XM_FC), REGISTER_AB(XM_PAUSE_TIME), REGISTER_AB(XM_TX_PARAM), REGISTER_AB(XM_RX_PARAM), /* XM_MGT_INT_MSK (note no 'A') is RC */ REGISTER_AB(XX_PWR_RST), REGISTER_AB(XX_SD_CTL), REGISTER_AB(XX_TXDRV_CTL), /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ /* XX_CORE_STAT is partly RC */ REGISTER_DZ(BIU_HW_REV_ID), REGISTER_DZ(MC_DB_LWRD), REGISTER_DZ(MC_DB_HWRD), }; struct efx_nic_reg_table { u32 offset:24; u32 min_revision:3, max_revision:3; u32 step:6, rows:21; }; #define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \ offset, \ REGISTER_REVISION_ ## arch ## min_rev, \ REGISTER_REVISION_ ## arch ## max_rev, \ step, rows \ } #define REGISTER_TABLE(name, arch, min_rev, max_rev) \ REGISTER_TABLE_DIMENSIONS( \ name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \ arch, min_rev, max_rev, \ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS) #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A) #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z) #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B) #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z) #define REGISTER_TABLE_BB_CZ(name) \ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \ FR_BZ_ ## name ## _STEP, \ FR_BB_ ## name ## _ROWS), \ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \ FR_BZ_ ## name ## _STEP, \ FR_CZ_ ## name ## _ROWS) #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z) #define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z) static const struct efx_nic_reg_table efx_nic_reg_tables[] = { /* DRIVER is not used */ /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ REGISTER_TABLE_BB(TX_IPFIL_TBL), REGISTER_TABLE_BB(TX_SRC_MAC_TBL), REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), /* We can't reasonably read all of the buffer table (up to 8MB!). * However this driver will only use a few entries. Reading * 1K entries allows for some expansion of queue count and * size before we need to change the version. */ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, F, A, A, 8, 1024), REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, F, B, Z, 8, 1024), REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), REGISTER_TABLE_BB_CZ(TIMER_TBL), REGISTER_TABLE_BB_CZ(TX_PACE_TBL), REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), /* TX_FILTER_TBL0 is huge and not used by this driver */ REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), REGISTER_TABLE_CZ(MC_TREG_SMEM), /* MSIX_PBA_TABLE is not mapped */ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ REGISTER_TABLE_BZ(RX_FILTER_TBL0), REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS), }; size_t efx_nic_get_regs_len(struct efx_nic *efx) { const struct efx_nic_reg *reg; const struct efx_nic_reg_table *table; size_t len = 0; for (reg = efx_nic_regs; reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); reg++) if (efx->type->revision >= reg->min_revision && efx->type->revision <= reg->max_revision) len += sizeof(efx_oword_t); for (table = efx_nic_reg_tables; table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); table++) if (efx->type->revision >= table->min_revision && efx->type->revision <= table->max_revision) len += table->rows * min_t(size_t, table->step, 16); return len; } void efx_nic_get_regs(struct efx_nic *efx, void *buf) { const struct efx_nic_reg *reg; const struct efx_nic_reg_table *table; for (reg = efx_nic_regs; reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); reg++) { if (efx->type->revision >= reg->min_revision && efx->type->revision <= reg->max_revision) { efx_reado(efx, (efx_oword_t *)buf, reg->offset); buf += sizeof(efx_oword_t); } } for (table = efx_nic_reg_tables; table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); table++) { size_t size, i; if (!(efx->type->revision >= table->min_revision && efx->type->revision <= table->max_revision)) continue; size = min_t(size_t, table->step, 16); for (i = 0; i < table->rows; i++) { switch (table->step) { case 4: /* 32-bit SRAM */ efx_readd(efx, buf, table->offset + 4 * i); break; case 8: /* 64-bit SRAM */ efx_sram_readq(efx, efx->membase + table->offset, buf, i); break; case 16: /* 128-bit-readable register */ efx_reado_table(efx, buf, table->offset, i); break; case 32: /* 128-bit register, interleaved */ efx_reado_table(efx, buf, table->offset, 2 * i); break; default: WARN_ON(1); return; } buf += size; } } } /** * efx_nic_describe_stats - Describe supported statistics for ethtool * @desc: Array of &struct efx_hw_stat_desc describing the statistics * @count: Length of the @desc array * @mask: Bitmask of which elements of @desc are enabled * @names: Buffer to copy names to, or %NULL. The names are copied * starting at intervals of %ETH_GSTRING_LEN bytes. * * Returns the number of visible statistics, i.e. the number of set * bits in the first @count bits of @mask for which a name is defined. */ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, const unsigned long *mask, u8 *names) { size_t visible = 0; size_t index; for_each_set_bit(index, mask, count) { if (desc[index].name) { if (names) { strlcpy(names, desc[index].name, ETH_GSTRING_LEN); names += ETH_GSTRING_LEN; } ++visible; } } return visible; } /** * efx_nic_update_stats - Convert statistics DMA buffer to array of u64 * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer * layout. DMA widths of 0, 16, 32 and 64 are supported; where * the width is specified as 0 the corresponding element of * @stats is not updated. * @count: Length of the @desc array * @mask: Bitmask of which elements of @desc are enabled * @stats: Buffer to update with the converted statistics. The length * of this array must be at least @count. * @dma_buf: DMA buffer containing hardware statistics * @accumulate: If set, the converted values will be added rather than * directly stored to the corresponding elements of @stats */ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, const unsigned long *mask, u64 *stats, const void *dma_buf, bool accumulate) { size_t index; for_each_set_bit(index, mask, count) { if (desc[index].dma_width) { const void *addr = dma_buf + desc[index].offset; u64 val; switch (desc[index].dma_width) { case 16: val = le16_to_cpup((__le16 *)addr); break; case 32: val = le32_to_cpup((__le32 *)addr); break; case 64: val = le64_to_cpup((__le64 *)addr); break; default: WARN_ON(1); val = 0; break; } if (accumulate) stats[index] += val; else stats[index] = val; } } } void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops) { /* if down, or this is the first update after coming up */ if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state) efx->rx_nodesc_drops_while_down += *rx_nodesc_drops - efx->rx_nodesc_drops_total; efx->rx_nodesc_drops_total = *rx_nodesc_drops; efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; }
gpl-2.0
bagnz0r/GT-I8160_Kernel
fs/sysfs/dir.c
1903
22717
/* * fs/sysfs/dir.c - sysfs core and dir operation implementation * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * * This file is released under the GPLv2. * * Please see Documentation/filesystems/sysfs.txt for more information. */ #undef DEBUG #include <linux/fs.h> #include <linux/mount.h> #include <linux/module.h> #include <linux/kobject.h> #include <linux/namei.h> #include <linux/idr.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/security.h> #include "sysfs.h" DEFINE_MUTEX(sysfs_mutex); DEFINE_SPINLOCK(sysfs_assoc_lock); static DEFINE_SPINLOCK(sysfs_ino_lock); static DEFINE_IDA(sysfs_ino_ida); /** * sysfs_link_sibling - link sysfs_dirent into sibling list * @sd: sysfs_dirent of interest * * Link @sd into its sibling list which starts from * sd->s_parent->s_dir.children. * * Locking: * mutex_lock(sysfs_mutex) */ static void sysfs_link_sibling(struct sysfs_dirent *sd) { struct sysfs_dirent *parent_sd = sd->s_parent; struct sysfs_dirent **pos; BUG_ON(sd->s_sibling); /* Store directory entries in order by ino. This allows * readdir to properly restart without having to add a * cursor into the s_dir.children list. */ for (pos = &parent_sd->s_dir.children; *pos; pos = &(*pos)->s_sibling) { if (sd->s_ino < (*pos)->s_ino) break; } sd->s_sibling = *pos; *pos = sd; } /** * sysfs_unlink_sibling - unlink sysfs_dirent from sibling list * @sd: sysfs_dirent of interest * * Unlink @sd from its sibling list which starts from * sd->s_parent->s_dir.children. * * Locking: * mutex_lock(sysfs_mutex) */ static void sysfs_unlink_sibling(struct sysfs_dirent *sd) { struct sysfs_dirent **pos; for (pos = &sd->s_parent->s_dir.children; *pos; pos = &(*pos)->s_sibling) { if (*pos == sd) { *pos = sd->s_sibling; sd->s_sibling = NULL; break; } } } /** * sysfs_get_active - get an active reference to sysfs_dirent * @sd: sysfs_dirent to get an active reference to * * Get an active reference of @sd. This function is noop if @sd * is NULL. * * RETURNS: * Pointer to @sd on success, NULL on failure. */ struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd) { if (unlikely(!sd)) return NULL; while (1) { int v, t; v = atomic_read(&sd->s_active); if (unlikely(v < 0)) return NULL; t = atomic_cmpxchg(&sd->s_active, v, v + 1); if (likely(t == v)) { rwsem_acquire_read(&sd->dep_map, 0, 1, _RET_IP_); return sd; } if (t < 0) return NULL; cpu_relax(); } } /** * sysfs_put_active - put an active reference to sysfs_dirent * @sd: sysfs_dirent to put an active reference to * * Put an active reference to @sd. This function is noop if @sd * is NULL. */ void sysfs_put_active(struct sysfs_dirent *sd) { struct completion *cmpl; int v; if (unlikely(!sd)) return; rwsem_release(&sd->dep_map, 1, _RET_IP_); v = atomic_dec_return(&sd->s_active); if (likely(v != SD_DEACTIVATED_BIAS)) return; /* atomic_dec_return() is a mb(), we'll always see the updated * sd->s_sibling. */ cmpl = (void *)sd->s_sibling; complete(cmpl); } /** * sysfs_deactivate - deactivate sysfs_dirent * @sd: sysfs_dirent to deactivate * * Deny new active references and drain existing ones. */ static void sysfs_deactivate(struct sysfs_dirent *sd) { DECLARE_COMPLETION_ONSTACK(wait); int v; BUG_ON(sd->s_sibling || !(sd->s_flags & SYSFS_FLAG_REMOVED)); if (!(sysfs_type(sd) & SYSFS_ACTIVE_REF)) return; sd->s_sibling = (void *)&wait; rwsem_acquire(&sd->dep_map, 0, 0, _RET_IP_); /* atomic_add_return() is a mb(), put_active() will always see * the updated sd->s_sibling. */ v = atomic_add_return(SD_DEACTIVATED_BIAS, &sd->s_active); if (v != SD_DEACTIVATED_BIAS) { lock_contended(&sd->dep_map, _RET_IP_); wait_for_completion(&wait); } sd->s_sibling = NULL; lock_acquired(&sd->dep_map, _RET_IP_); rwsem_release(&sd->dep_map, 1, _RET_IP_); } static int sysfs_alloc_ino(ino_t *pino) { int ino, rc; retry: spin_lock(&sysfs_ino_lock); rc = ida_get_new_above(&sysfs_ino_ida, 2, &ino); spin_unlock(&sysfs_ino_lock); if (rc == -EAGAIN) { if (ida_pre_get(&sysfs_ino_ida, GFP_KERNEL)) goto retry; rc = -ENOMEM; } *pino = ino; return rc; } static void sysfs_free_ino(ino_t ino) { spin_lock(&sysfs_ino_lock); ida_remove(&sysfs_ino_ida, ino); spin_unlock(&sysfs_ino_lock); } void release_sysfs_dirent(struct sysfs_dirent * sd) { struct sysfs_dirent *parent_sd; repeat: /* Moving/renaming is always done while holding reference. * sd->s_parent won't change beneath us. */ parent_sd = sd->s_parent; if (sysfs_type(sd) == SYSFS_KOBJ_LINK) sysfs_put(sd->s_symlink.target_sd); if (sysfs_type(sd) & SYSFS_COPY_NAME) kfree(sd->s_name); if (sd->s_iattr && sd->s_iattr->ia_secdata) security_release_secctx(sd->s_iattr->ia_secdata, sd->s_iattr->ia_secdata_len); kfree(sd->s_iattr); sysfs_free_ino(sd->s_ino); kmem_cache_free(sysfs_dir_cachep, sd); sd = parent_sd; if (sd && atomic_dec_and_test(&sd->s_count)) goto repeat; } static int sysfs_dentry_delete(const struct dentry *dentry) { struct sysfs_dirent *sd = dentry->d_fsdata; return !!(sd->s_flags & SYSFS_FLAG_REMOVED); } static int sysfs_dentry_revalidate(struct dentry *dentry, struct nameidata *nd) { struct sysfs_dirent *sd; int is_dir; if (nd->flags & LOOKUP_RCU) return -ECHILD; sd = dentry->d_fsdata; mutex_lock(&sysfs_mutex); /* The sysfs dirent has been deleted */ if (sd->s_flags & SYSFS_FLAG_REMOVED) goto out_bad; /* The sysfs dirent has been moved? */ if (dentry->d_parent->d_fsdata != sd->s_parent) goto out_bad; /* The sysfs dirent has been renamed */ if (strcmp(dentry->d_name.name, sd->s_name) != 0) goto out_bad; mutex_unlock(&sysfs_mutex); out_valid: return 1; out_bad: /* Remove the dentry from the dcache hashes. * If this is a deleted dentry we use d_drop instead of d_delete * so sysfs doesn't need to cope with negative dentries. * * If this is a dentry that has simply been renamed we * use d_drop to remove it from the dcache lookup on its * old parent. If this dentry persists later when a lookup * is performed at its new name the dentry will be readded * to the dcache hashes. */ is_dir = (sysfs_type(sd) == SYSFS_DIR); mutex_unlock(&sysfs_mutex); if (is_dir) { /* If we have submounts we must allow the vfs caches * to lie about the state of the filesystem to prevent * leaks and other nasty things. */ if (have_submounts(dentry)) goto out_valid; shrink_dcache_parent(dentry); } d_drop(dentry); return 0; } static void sysfs_dentry_iput(struct dentry *dentry, struct inode *inode) { struct sysfs_dirent * sd = dentry->d_fsdata; sysfs_put(sd); iput(inode); } static const struct dentry_operations sysfs_dentry_ops = { .d_revalidate = sysfs_dentry_revalidate, .d_delete = sysfs_dentry_delete, .d_iput = sysfs_dentry_iput, }; struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type) { char *dup_name = NULL; struct sysfs_dirent *sd; if (type & SYSFS_COPY_NAME) { name = dup_name = kstrdup(name, GFP_KERNEL); if (!name) return NULL; } sd = kmem_cache_zalloc(sysfs_dir_cachep, GFP_KERNEL); if (!sd) goto err_out1; if (sysfs_alloc_ino(&sd->s_ino)) goto err_out2; atomic_set(&sd->s_count, 1); atomic_set(&sd->s_active, 0); sd->s_name = name; sd->s_mode = mode; sd->s_flags = type; return sd; err_out2: kmem_cache_free(sysfs_dir_cachep, sd); err_out1: kfree(dup_name); return NULL; } /** * sysfs_addrm_start - prepare for sysfs_dirent add/remove * @acxt: pointer to sysfs_addrm_cxt to be used * @parent_sd: parent sysfs_dirent * * This function is called when the caller is about to add or * remove sysfs_dirent under @parent_sd. This function acquires * sysfs_mutex. @acxt is used to keep and pass context to * other addrm functions. * * LOCKING: * Kernel thread context (may sleep). sysfs_mutex is locked on * return. */ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *parent_sd) { memset(acxt, 0, sizeof(*acxt)); acxt->parent_sd = parent_sd; mutex_lock(&sysfs_mutex); } /** * __sysfs_add_one - add sysfs_dirent to parent without warning * @acxt: addrm context to use * @sd: sysfs_dirent to be added * * Get @acxt->parent_sd and set sd->s_parent to it and increment * nlink of parent inode if @sd is a directory and link into the * children list of the parent. * * This function should be called between calls to * sysfs_addrm_start() and sysfs_addrm_finish() and should be * passed the same @acxt as passed to sysfs_addrm_start(). * * LOCKING: * Determined by sysfs_addrm_start(). * * RETURNS: * 0 on success, -EEXIST if entry with the given name already * exists. */ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd) { struct sysfs_inode_attrs *ps_iattr; if (sysfs_find_dirent(acxt->parent_sd, sd->s_ns, sd->s_name)) return -EEXIST; sd->s_parent = sysfs_get(acxt->parent_sd); sysfs_link_sibling(sd); /* Update timestamps on the parent */ ps_iattr = acxt->parent_sd->s_iattr; if (ps_iattr) { struct iattr *ps_iattrs = &ps_iattr->ia_iattr; ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME; } return 0; } /** * sysfs_pathname - return full path to sysfs dirent * @sd: sysfs_dirent whose path we want * @path: caller allocated buffer * * Gives the name "/" to the sysfs_root entry; any path returned * is relative to wherever sysfs is mounted. * * XXX: does no error checking on @path size */ static char *sysfs_pathname(struct sysfs_dirent *sd, char *path) { if (sd->s_parent) { sysfs_pathname(sd->s_parent, path); strcat(path, "/"); } strcat(path, sd->s_name); return path; } /** * sysfs_add_one - add sysfs_dirent to parent * @acxt: addrm context to use * @sd: sysfs_dirent to be added * * Get @acxt->parent_sd and set sd->s_parent to it and increment * nlink of parent inode if @sd is a directory and link into the * children list of the parent. * * This function should be called between calls to * sysfs_addrm_start() and sysfs_addrm_finish() and should be * passed the same @acxt as passed to sysfs_addrm_start(). * * LOCKING: * Determined by sysfs_addrm_start(). * * RETURNS: * 0 on success, -EEXIST if entry with the given name already * exists. */ int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd) { int ret; ret = __sysfs_add_one(acxt, sd); if (ret == -EEXIST) { char *path = kzalloc(PATH_MAX, GFP_KERNEL); WARN(1, KERN_WARNING "sysfs: cannot create duplicate filename '%s'\n", (path == NULL) ? sd->s_name : strcat(strcat(sysfs_pathname(acxt->parent_sd, path), "/"), sd->s_name)); kfree(path); } return ret; } /** * sysfs_remove_one - remove sysfs_dirent from parent * @acxt: addrm context to use * @sd: sysfs_dirent to be removed * * Mark @sd removed and drop nlink of parent inode if @sd is a * directory. @sd is unlinked from the children list. * * This function should be called between calls to * sysfs_addrm_start() and sysfs_addrm_finish() and should be * passed the same @acxt as passed to sysfs_addrm_start(). * * LOCKING: * Determined by sysfs_addrm_start(). */ void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd) { struct sysfs_inode_attrs *ps_iattr; BUG_ON(sd->s_flags & SYSFS_FLAG_REMOVED); sysfs_unlink_sibling(sd); /* Update timestamps on the parent */ ps_iattr = acxt->parent_sd->s_iattr; if (ps_iattr) { struct iattr *ps_iattrs = &ps_iattr->ia_iattr; ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME; } sd->s_flags |= SYSFS_FLAG_REMOVED; sd->s_sibling = acxt->removed; acxt->removed = sd; } /** * sysfs_addrm_finish - finish up sysfs_dirent add/remove * @acxt: addrm context to finish up * * Finish up sysfs_dirent add/remove. Resources acquired by * sysfs_addrm_start() are released and removed sysfs_dirents are * cleaned up. * * LOCKING: * sysfs_mutex is released. */ void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt) { /* release resources acquired by sysfs_addrm_start() */ mutex_unlock(&sysfs_mutex); /* kill removed sysfs_dirents */ while (acxt->removed) { struct sysfs_dirent *sd = acxt->removed; acxt->removed = sd->s_sibling; sd->s_sibling = NULL; sysfs_deactivate(sd); unmap_bin_file(sd); sysfs_put(sd); } } /** * sysfs_find_dirent - find sysfs_dirent with the given name * @parent_sd: sysfs_dirent to search under * @name: name to look for * * Look for sysfs_dirent with name @name under @parent_sd. * * LOCKING: * mutex_lock(sysfs_mutex) * * RETURNS: * Pointer to sysfs_dirent if found, NULL if not. */ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd, const void *ns, const unsigned char *name) { struct sysfs_dirent *sd; for (sd = parent_sd->s_dir.children; sd; sd = sd->s_sibling) { if (ns && sd->s_ns && (sd->s_ns != ns)) continue; if (!strcmp(sd->s_name, name)) return sd; } return NULL; } /** * sysfs_get_dirent - find and get sysfs_dirent with the given name * @parent_sd: sysfs_dirent to search under * @name: name to look for * * Look for sysfs_dirent with name @name under @parent_sd and get * it if found. * * LOCKING: * Kernel thread context (may sleep). Grabs sysfs_mutex. * * RETURNS: * Pointer to sysfs_dirent if found, NULL if not. */ struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, const void *ns, const unsigned char *name) { struct sysfs_dirent *sd; mutex_lock(&sysfs_mutex); sd = sysfs_find_dirent(parent_sd, ns, name); sysfs_get(sd); mutex_unlock(&sysfs_mutex); return sd; } EXPORT_SYMBOL_GPL(sysfs_get_dirent); static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd, enum kobj_ns_type type, const void *ns, const char *name, struct sysfs_dirent **p_sd) { umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; struct sysfs_addrm_cxt acxt; struct sysfs_dirent *sd; int rc; /* allocate */ sd = sysfs_new_dirent(name, mode, SYSFS_DIR); if (!sd) return -ENOMEM; sd->s_flags |= (type << SYSFS_NS_TYPE_SHIFT); sd->s_ns = ns; sd->s_dir.kobj = kobj; /* link in */ sysfs_addrm_start(&acxt, parent_sd); rc = sysfs_add_one(&acxt, sd); sysfs_addrm_finish(&acxt); if (rc == 0) *p_sd = sd; else sysfs_put(sd); return rc; } int sysfs_create_subdir(struct kobject *kobj, const char *name, struct sysfs_dirent **p_sd) { return create_dir(kobj, kobj->sd, KOBJ_NS_TYPE_NONE, NULL, name, p_sd); } /** * sysfs_read_ns_type: return associated ns_type * @kobj: the kobject being queried * * Each kobject can be tagged with exactly one namespace type * (i.e. network or user). Return the ns_type associated with * this object if any */ static enum kobj_ns_type sysfs_read_ns_type(struct kobject *kobj) { const struct kobj_ns_type_operations *ops; enum kobj_ns_type type; ops = kobj_child_ns_ops(kobj); if (!ops) return KOBJ_NS_TYPE_NONE; type = ops->type; BUG_ON(type <= KOBJ_NS_TYPE_NONE); BUG_ON(type >= KOBJ_NS_TYPES); BUG_ON(!kobj_ns_type_registered(type)); return type; } /** * sysfs_create_dir - create a directory for an object. * @kobj: object we're creating directory for. */ int sysfs_create_dir(struct kobject * kobj) { enum kobj_ns_type type; struct sysfs_dirent *parent_sd, *sd; const void *ns = NULL; int error = 0; BUG_ON(!kobj); if (kobj->parent) parent_sd = kobj->parent->sd; else parent_sd = &sysfs_root; if (sysfs_ns_type(parent_sd)) ns = kobj->ktype->namespace(kobj); type = sysfs_read_ns_type(kobj); error = create_dir(kobj, parent_sd, type, ns, kobject_name(kobj), &sd); if (!error) kobj->sd = sd; return error; } static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct dentry *ret = NULL; struct dentry *parent = dentry->d_parent; struct sysfs_dirent *parent_sd = parent->d_fsdata; struct sysfs_dirent *sd; struct inode *inode; enum kobj_ns_type type; const void *ns; mutex_lock(&sysfs_mutex); type = sysfs_ns_type(parent_sd); ns = sysfs_info(dir->i_sb)->ns[type]; sd = sysfs_find_dirent(parent_sd, ns, dentry->d_name.name); /* no such entry */ if (!sd) { ret = ERR_PTR(-ENOENT); goto out_unlock; } /* attach dentry and inode */ inode = sysfs_get_inode(dir->i_sb, sd); if (!inode) { ret = ERR_PTR(-ENOMEM); goto out_unlock; } /* instantiate and hash dentry */ ret = d_find_alias(inode); if (!ret) { d_set_d_op(dentry, &sysfs_dentry_ops); dentry->d_fsdata = sysfs_get(sd); d_add(dentry, inode); } else { d_move(ret, dentry); iput(inode); } out_unlock: mutex_unlock(&sysfs_mutex); return ret; } const struct inode_operations sysfs_dir_inode_operations = { .lookup = sysfs_lookup, .permission = sysfs_permission, .setattr = sysfs_setattr, .getattr = sysfs_getattr, .setxattr = sysfs_setxattr, }; static void remove_dir(struct sysfs_dirent *sd) { struct sysfs_addrm_cxt acxt; sysfs_addrm_start(&acxt, sd->s_parent); sysfs_remove_one(&acxt, sd); sysfs_addrm_finish(&acxt); } void sysfs_remove_subdir(struct sysfs_dirent *sd) { remove_dir(sd); } static void __sysfs_remove_dir(struct sysfs_dirent *dir_sd) { struct sysfs_addrm_cxt acxt; struct sysfs_dirent **pos; if (!dir_sd) return; pr_debug("sysfs %s: removing dir\n", dir_sd->s_name); sysfs_addrm_start(&acxt, dir_sd); pos = &dir_sd->s_dir.children; while (*pos) { struct sysfs_dirent *sd = *pos; if (sysfs_type(sd) != SYSFS_DIR) sysfs_remove_one(&acxt, sd); else pos = &(*pos)->s_sibling; } sysfs_addrm_finish(&acxt); remove_dir(dir_sd); } /** * sysfs_remove_dir - remove an object's directory. * @kobj: object. * * The only thing special about this is that we remove any files in * the directory before we remove the directory, and we've inlined * what used to be sysfs_rmdir() below, instead of calling separately. */ void sysfs_remove_dir(struct kobject * kobj) { struct sysfs_dirent *sd = kobj->sd; spin_lock(&sysfs_assoc_lock); kobj->sd = NULL; spin_unlock(&sysfs_assoc_lock); __sysfs_remove_dir(sd); } int sysfs_rename(struct sysfs_dirent *sd, struct sysfs_dirent *new_parent_sd, const void *new_ns, const char *new_name) { const char *dup_name = NULL; int error; mutex_lock(&sysfs_mutex); error = 0; if ((sd->s_parent == new_parent_sd) && (sd->s_ns == new_ns) && (strcmp(sd->s_name, new_name) == 0)) goto out; /* nothing to rename */ error = -EEXIST; if (sysfs_find_dirent(new_parent_sd, new_ns, new_name)) goto out; /* rename sysfs_dirent */ if (strcmp(sd->s_name, new_name) != 0) { error = -ENOMEM; new_name = dup_name = kstrdup(new_name, GFP_KERNEL); if (!new_name) goto out; dup_name = sd->s_name; sd->s_name = new_name; } /* Remove from old parent's list and insert into new parent's list. */ if (sd->s_parent != new_parent_sd) { sysfs_unlink_sibling(sd); sysfs_get(new_parent_sd); sysfs_put(sd->s_parent); sd->s_parent = new_parent_sd; sysfs_link_sibling(sd); } sd->s_ns = new_ns; error = 0; out: mutex_unlock(&sysfs_mutex); kfree(dup_name); return error; } int sysfs_rename_dir(struct kobject *kobj, const char *new_name) { struct sysfs_dirent *parent_sd = kobj->sd->s_parent; const void *new_ns = NULL; if (sysfs_ns_type(parent_sd)) new_ns = kobj->ktype->namespace(kobj); return sysfs_rename(kobj->sd, parent_sd, new_ns, new_name); } int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj) { struct sysfs_dirent *sd = kobj->sd; struct sysfs_dirent *new_parent_sd; const void *new_ns = NULL; BUG_ON(!sd->s_parent); if (sysfs_ns_type(sd->s_parent)) new_ns = kobj->ktype->namespace(kobj); new_parent_sd = new_parent_kobj && new_parent_kobj->sd ? new_parent_kobj->sd : &sysfs_root; return sysfs_rename(sd, new_parent_sd, new_ns, sd->s_name); } /* Relationship between s_mode and the DT_xxx types */ static inline unsigned char dt_type(struct sysfs_dirent *sd) { return (sd->s_mode >> 12) & 15; } static int sysfs_dir_release(struct inode *inode, struct file *filp) { sysfs_put(filp->private_data); return 0; } static struct sysfs_dirent *sysfs_dir_pos(const void *ns, struct sysfs_dirent *parent_sd, ino_t ino, struct sysfs_dirent *pos) { if (pos) { int valid = !(pos->s_flags & SYSFS_FLAG_REMOVED) && pos->s_parent == parent_sd && ino == pos->s_ino; sysfs_put(pos); if (!valid) pos = NULL; } if (!pos && (ino > 1) && (ino < INT_MAX)) { pos = parent_sd->s_dir.children; while (pos && (ino > pos->s_ino)) pos = pos->s_sibling; } while (pos && pos->s_ns && pos->s_ns != ns) pos = pos->s_sibling; return pos; } static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns, struct sysfs_dirent *parent_sd, ino_t ino, struct sysfs_dirent *pos) { pos = sysfs_dir_pos(ns, parent_sd, ino, pos); if (pos) pos = pos->s_sibling; while (pos && pos->s_ns && pos->s_ns != ns) pos = pos->s_sibling; return pos; } static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) { struct dentry *dentry = filp->f_path.dentry; struct sysfs_dirent * parent_sd = dentry->d_fsdata; struct sysfs_dirent *pos = filp->private_data; enum kobj_ns_type type; const void *ns; ino_t ino; type = sysfs_ns_type(parent_sd); ns = sysfs_info(dentry->d_sb)->ns[type]; if (filp->f_pos == 0) { ino = parent_sd->s_ino; if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0) filp->f_pos++; } if (filp->f_pos == 1) { if (parent_sd->s_parent) ino = parent_sd->s_parent->s_ino; else ino = parent_sd->s_ino; if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0) filp->f_pos++; } mutex_lock(&sysfs_mutex); for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos); pos; pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) { const char * name; unsigned int type; int len, ret; name = pos->s_name; len = strlen(name); ino = pos->s_ino; type = dt_type(pos); filp->f_pos = ino; filp->private_data = sysfs_get(pos); mutex_unlock(&sysfs_mutex); ret = filldir(dirent, name, len, filp->f_pos, ino, type); mutex_lock(&sysfs_mutex); if (ret < 0) break; } mutex_unlock(&sysfs_mutex); if ((filp->f_pos > 1) && !pos) { /* EOF */ filp->f_pos = INT_MAX; filp->private_data = NULL; } return 0; } const struct file_operations sysfs_dir_operations = { .read = generic_read_dir, .readdir = sysfs_readdir, .release = sysfs_dir_release, .llseek = generic_file_llseek, };
gpl-2.0
chinghanyu/Cognet-RPi-linux
arch/mips/loongson/common/reset.c
2415
1550
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * Copyright (C) 2009 Lemote, Inc. * Author: Zhangjin Wu, wuzhangjin@gmail.com */ #include <linux/init.h> #include <linux/pm.h> #include <asm/idle.h> #include <asm/reboot.h> #include <loongson.h> static inline void loongson_reboot(void) { #ifndef CONFIG_CPU_JUMP_WORKAROUNDS ((void (*)(void))ioremap_nocache(LOONGSON_BOOT_BASE, 4)) (); #else void (*func)(void); func = (void *)ioremap_nocache(LOONGSON_BOOT_BASE, 4); __asm__ __volatile__( " .set noat \n" " jr %[func] \n" " .set at \n" : /* No outputs */ : [func] "r" (func)); #endif } static void loongson_restart(char *command) { /* do preparation for reboot */ mach_prepare_reboot(); /* reboot via jumping to boot base address */ loongson_reboot(); } static void loongson_poweroff(void) { mach_prepare_shutdown(); unreachable(); } static void loongson_halt(void) { pr_notice("\n\n** You can safely turn off the power now **\n\n"); while (1) { if (cpu_wait) cpu_wait(); } } static int __init mips_reboot_setup(void) { _machine_restart = loongson_restart; _machine_halt = loongson_halt; pm_power_off = loongson_poweroff; return 0; } arch_initcall(mips_reboot_setup);
gpl-2.0
vmobi-d2vmu/android_kernel_samsung_d2vmu
arch/arm/mach-orion5x/edmini_v2-setup.c
2671
7280
/* * arch/arm/mach-orion5x/edmini_v2-setup.c * * LaCie Ethernet Disk mini V2 Setup * * Copyright (C) 2008 Christopher Moore <moore@free.fr> * Copyright (C) 2008 Albert Aribaud <albert.aribaud@free.fr> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ /* * TODO: add Orion USB device port init when kernel.org support is added. * TODO: add flash write support: see below. * TODO: add power-off support. * TODO: add I2C EEPROM support. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/leds.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/ata_platform.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * EDMINI_V2 Info ****************************************************************************/ /* * 512KB NOR flash Device bus boot chip select */ #define EDMINI_V2_NOR_BOOT_BASE 0xfff80000 #define EDMINI_V2_NOR_BOOT_SIZE SZ_512K /***************************************************************************** * 512KB NOR Flash on BOOT Device ****************************************************************************/ /* * Currently the MTD code does not recognize the MX29LV400CBCT as a bottom * -type device. This could cause risks of accidentally erasing critical * flash sectors. We thus define a single, write-protected partition covering * the whole flash. * TODO: once the flash part TOP/BOTTOM detection issue is sorted out in the MTD * code, break this into at least three partitions: 'u-boot code', 'u-boot * environment' and 'whatever is left'. */ static struct mtd_partition edmini_v2_partitions[] = { { .name = "Full512kb", .size = 0x00080000, .offset = 0x00000000, .mask_flags = MTD_WRITEABLE, }, }; static struct physmap_flash_data edmini_v2_nor_flash_data = { .width = 1, .parts = edmini_v2_partitions, .nr_parts = ARRAY_SIZE(edmini_v2_partitions), }; static struct resource edmini_v2_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = EDMINI_V2_NOR_BOOT_BASE, .end = EDMINI_V2_NOR_BOOT_BASE + EDMINI_V2_NOR_BOOT_SIZE - 1, }; static struct platform_device edmini_v2_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &edmini_v2_nor_flash_data, }, .num_resources = 1, .resource = &edmini_v2_nor_flash_resource, }; /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data edmini_v2_eth_data = { .phy_addr = 8, }; /***************************************************************************** * RTC 5C372a on I2C bus ****************************************************************************/ #define EDMINIV2_RTC_GPIO 3 static struct i2c_board_info __initdata edmini_v2_i2c_rtc = { I2C_BOARD_INFO("rs5c372a", 0x32), .irq = 0, }; /***************************************************************************** * Sata ****************************************************************************/ static struct mv_sata_platform_data edmini_v2_sata_data = { .n_ports = 2, }; /***************************************************************************** * GPIO LED (simple - doesn't use hardware blinking support) ****************************************************************************/ #define EDMINI_V2_GPIO_LED_POWER 16 static struct gpio_led edmini_v2_leds[] = { { .name = "power:blue", .gpio = EDMINI_V2_GPIO_LED_POWER, .active_low = 1, }, }; static struct gpio_led_platform_data edmini_v2_led_data = { .num_leds = ARRAY_SIZE(edmini_v2_leds), .leds = edmini_v2_leds, }; static struct platform_device edmini_v2_gpio_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &edmini_v2_led_data, }, }; /**************************************************************************** * GPIO key ****************************************************************************/ #define EDMINI_V2_GPIO_KEY_POWER 18 static struct gpio_keys_button edmini_v2_buttons[] = { { .code = KEY_POWER, .gpio = EDMINI_V2_GPIO_KEY_POWER, .desc = "Power Button", .active_low = 0, }, }; static struct gpio_keys_platform_data edmini_v2_button_data = { .buttons = edmini_v2_buttons, .nbuttons = ARRAY_SIZE(edmini_v2_buttons), }; static struct platform_device edmini_v2_gpio_buttons = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &edmini_v2_button_data, }, }; /***************************************************************************** * General Setup ****************************************************************************/ static unsigned int edminiv2_mpp_modes[] __initdata = { MPP0_UNUSED, MPP1_UNUSED, MPP2_UNUSED, MPP3_GPIO, /* RTC interrupt */ MPP4_UNUSED, MPP5_UNUSED, MPP6_UNUSED, MPP7_UNUSED, MPP8_UNUSED, MPP9_UNUSED, MPP10_UNUSED, MPP11_UNUSED, MPP12_SATA_LED, /* SATA 0 presence */ MPP13_SATA_LED, /* SATA 1 presence */ MPP14_SATA_LED, /* SATA 0 active */ MPP15_SATA_LED, /* SATA 1 active */ /* 16: Power LED control (0 = On, 1 = Off) */ MPP16_GPIO, /* 17: Power LED control select (0 = CPLD, 1 = GPIO16) */ MPP17_GPIO, /* 18: Power button status (0 = Released, 1 = Pressed) */ MPP18_GPIO, MPP19_UNUSED, 0, }; static void __init edmini_v2_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(edminiv2_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_eth_init(&edmini_v2_eth_data); orion5x_i2c_init(); orion5x_sata_init(&edmini_v2_sata_data); orion5x_uart0_init(); orion5x_setup_dev_boot_win(EDMINI_V2_NOR_BOOT_BASE, EDMINI_V2_NOR_BOOT_SIZE); platform_device_register(&edmini_v2_nor_flash); platform_device_register(&edmini_v2_gpio_leds); platform_device_register(&edmini_v2_gpio_buttons); pr_notice("edmini_v2: USB device port, flash write and power-off " "are not yet supported.\n"); /* Get RTC IRQ and register the chip */ if (gpio_request(EDMINIV2_RTC_GPIO, "rtc") == 0) { if (gpio_direction_input(EDMINIV2_RTC_GPIO) == 0) edmini_v2_i2c_rtc.irq = gpio_to_irq(EDMINIV2_RTC_GPIO); else gpio_free(EDMINIV2_RTC_GPIO); } if (edmini_v2_i2c_rtc.irq == 0) pr_warning("edmini_v2: failed to get RTC IRQ\n"); i2c_register_board_info(0, &edmini_v2_i2c_rtc, 1); } /* Warning: LaCie use a wrong mach-type (0x20e=526) in their bootloader. */ MACHINE_START(EDMINI_V2, "LaCie Ethernet Disk mini V2") /* Maintainer: Christopher Moore <moore@free.fr> */ .boot_params = 0x00000100, .init_machine = edmini_v2_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, MACHINE_END
gpl-2.0
hiikezoe/android_kernel_asus_tf300t
virt/kvm/eventfd.c
2927
15591
/* * kvm eventfd support - use eventfd objects to signal various KVM events * * Copyright 2009 Novell. All Rights Reserved. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Author: * Gregory Haskins <ghaskins@novell.com> * * This file is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/workqueue.h> #include <linux/syscalls.h> #include <linux/wait.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/list.h> #include <linux/eventfd.h> #include <linux/kernel.h> #include <linux/slab.h> #include "iodev.h" /* * -------------------------------------------------------------------- * irqfd: Allows an fd to be used to inject an interrupt to the guest * * Credit goes to Avi Kivity for the original idea. * -------------------------------------------------------------------- */ struct _irqfd { /* Used for MSI fast-path */ struct kvm *kvm; wait_queue_t wait; /* Update side is protected by irqfds.lock */ struct kvm_kernel_irq_routing_entry __rcu *irq_entry; /* Used for level IRQ fast-path */ int gsi; struct work_struct inject; /* Used for setup/shutdown */ struct eventfd_ctx *eventfd; struct list_head list; poll_table pt; struct work_struct shutdown; }; static struct workqueue_struct *irqfd_cleanup_wq; static void irqfd_inject(struct work_struct *work) { struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); struct kvm *kvm = irqfd->kvm; kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); } /* * Race-free decouple logic (ordering is critical) */ static void irqfd_shutdown(struct work_struct *work) { struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown); u64 cnt; /* * Synchronize with the wait-queue and unhook ourselves to prevent * further events. */ eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); /* * We know no new events will be scheduled at this point, so block * until all previously outstanding events have completed */ flush_work_sync(&irqfd->inject); /* * It is now safe to release the object's resources */ eventfd_ctx_put(irqfd->eventfd); kfree(irqfd); } /* assumes kvm->irqfds.lock is held */ static bool irqfd_is_active(struct _irqfd *irqfd) { return list_empty(&irqfd->list) ? false : true; } /* * Mark the irqfd as inactive and schedule it for removal * * assumes kvm->irqfds.lock is held */ static void irqfd_deactivate(struct _irqfd *irqfd) { BUG_ON(!irqfd_is_active(irqfd)); list_del_init(&irqfd->list); queue_work(irqfd_cleanup_wq, &irqfd->shutdown); } /* * Called with wqh->lock held and interrupts disabled */ static int irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait); unsigned long flags = (unsigned long)key; struct kvm_kernel_irq_routing_entry *irq; struct kvm *kvm = irqfd->kvm; if (flags & POLLIN) { rcu_read_lock(); irq = rcu_dereference(irqfd->irq_entry); /* An event has been signaled, inject an interrupt */ if (irq) kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1); else schedule_work(&irqfd->inject); rcu_read_unlock(); } if (flags & POLLHUP) { /* The eventfd is closing, detach from KVM */ unsigned long flags; spin_lock_irqsave(&kvm->irqfds.lock, flags); /* * We must check if someone deactivated the irqfd before * we could acquire the irqfds.lock since the item is * deactivated from the KVM side before it is unhooked from * the wait-queue. If it is already deactivated, we can * simply return knowing the other side will cleanup for us. * We cannot race against the irqfd going away since the * other side is required to acquire wqh->lock, which we hold */ if (irqfd_is_active(irqfd)) irqfd_deactivate(irqfd); spin_unlock_irqrestore(&kvm->irqfds.lock, flags); } return 0; } static void irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, poll_table *pt) { struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt); add_wait_queue(wqh, &irqfd->wait); } /* Must be called under irqfds.lock */ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd, struct kvm_irq_routing_table *irq_rt) { struct kvm_kernel_irq_routing_entry *e; struct hlist_node *n; if (irqfd->gsi >= irq_rt->nr_rt_entries) { rcu_assign_pointer(irqfd->irq_entry, NULL); return; } hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) { /* Only fast-path MSI. */ if (e->type == KVM_IRQ_ROUTING_MSI) rcu_assign_pointer(irqfd->irq_entry, e); else rcu_assign_pointer(irqfd->irq_entry, NULL); } } static int kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) { struct kvm_irq_routing_table *irq_rt; struct _irqfd *irqfd, *tmp; struct file *file = NULL; struct eventfd_ctx *eventfd = NULL; int ret; unsigned int events; irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); if (!irqfd) return -ENOMEM; irqfd->kvm = kvm; irqfd->gsi = gsi; INIT_LIST_HEAD(&irqfd->list); INIT_WORK(&irqfd->inject, irqfd_inject); INIT_WORK(&irqfd->shutdown, irqfd_shutdown); file = eventfd_fget(fd); if (IS_ERR(file)) { ret = PTR_ERR(file); goto fail; } eventfd = eventfd_ctx_fileget(file); if (IS_ERR(eventfd)) { ret = PTR_ERR(eventfd); goto fail; } irqfd->eventfd = eventfd; /* * Install our own custom wake-up handling so we are notified via * a callback whenever someone signals the underlying eventfd */ init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); spin_lock_irq(&kvm->irqfds.lock); ret = 0; list_for_each_entry(tmp, &kvm->irqfds.items, list) { if (irqfd->eventfd != tmp->eventfd) continue; /* This fd is used for another irq already. */ ret = -EBUSY; spin_unlock_irq(&kvm->irqfds.lock); goto fail; } irq_rt = rcu_dereference_protected(kvm->irq_routing, lockdep_is_held(&kvm->irqfds.lock)); irqfd_update(kvm, irqfd, irq_rt); events = file->f_op->poll(file, &irqfd->pt); list_add_tail(&irqfd->list, &kvm->irqfds.items); /* * Check if there was an event already pending on the eventfd * before we registered, and trigger it as if we didn't miss it. */ if (events & POLLIN) schedule_work(&irqfd->inject); spin_unlock_irq(&kvm->irqfds.lock); /* * do not drop the file until the irqfd is fully initialized, otherwise * we might race against the POLLHUP */ fput(file); return 0; fail: if (eventfd && !IS_ERR(eventfd)) eventfd_ctx_put(eventfd); if (!IS_ERR(file)) fput(file); kfree(irqfd); return ret; } void kvm_eventfd_init(struct kvm *kvm) { spin_lock_init(&kvm->irqfds.lock); INIT_LIST_HEAD(&kvm->irqfds.items); INIT_LIST_HEAD(&kvm->ioeventfds); } /* * shutdown any irqfd's that match fd+gsi */ static int kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi) { struct _irqfd *irqfd, *tmp; struct eventfd_ctx *eventfd; eventfd = eventfd_ctx_fdget(fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) { /* * This rcu_assign_pointer is needed for when * another thread calls kvm_irq_routing_update before * we flush workqueue below (we synchronize with * kvm_irq_routing_update using irqfds.lock). * It is paired with synchronize_rcu done by caller * of that function. */ rcu_assign_pointer(irqfd->irq_entry, NULL); irqfd_deactivate(irqfd); } } spin_unlock_irq(&kvm->irqfds.lock); eventfd_ctx_put(eventfd); /* * Block until we know all outstanding shutdown jobs have completed * so that we guarantee there will not be any more interrupts on this * gsi once this deassign function returns. */ flush_workqueue(irqfd_cleanup_wq); return 0; } int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) { if (flags & KVM_IRQFD_FLAG_DEASSIGN) return kvm_irqfd_deassign(kvm, fd, gsi); return kvm_irqfd_assign(kvm, fd, gsi); } /* * This function is called as the kvm VM fd is being released. Shutdown all * irqfds that still remain open */ void kvm_irqfd_release(struct kvm *kvm) { struct _irqfd *irqfd, *tmp; spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) irqfd_deactivate(irqfd); spin_unlock_irq(&kvm->irqfds.lock); /* * Block until we know all outstanding shutdown jobs have completed * since we do not take a kvm* reference. */ flush_workqueue(irqfd_cleanup_wq); } /* * Change irq_routing and irqfd. * Caller must invoke synchronize_rcu afterwards. */ void kvm_irq_routing_update(struct kvm *kvm, struct kvm_irq_routing_table *irq_rt) { struct _irqfd *irqfd; spin_lock_irq(&kvm->irqfds.lock); rcu_assign_pointer(kvm->irq_routing, irq_rt); list_for_each_entry(irqfd, &kvm->irqfds.items, list) irqfd_update(kvm, irqfd, irq_rt); spin_unlock_irq(&kvm->irqfds.lock); } /* * create a host-wide workqueue for issuing deferred shutdown requests * aggregated from all vm* instances. We need our own isolated single-thread * queue to prevent deadlock against flushing the normal work-queue. */ static int __init irqfd_module_init(void) { irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup"); if (!irqfd_cleanup_wq) return -ENOMEM; return 0; } static void __exit irqfd_module_exit(void) { destroy_workqueue(irqfd_cleanup_wq); } module_init(irqfd_module_init); module_exit(irqfd_module_exit); /* * -------------------------------------------------------------------- * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal. * * userspace can register a PIO/MMIO address with an eventfd for receiving * notification when the memory has been touched. * -------------------------------------------------------------------- */ struct _ioeventfd { struct list_head list; u64 addr; int length; struct eventfd_ctx *eventfd; u64 datamatch; struct kvm_io_device dev; bool wildcard; }; static inline struct _ioeventfd * to_ioeventfd(struct kvm_io_device *dev) { return container_of(dev, struct _ioeventfd, dev); } static void ioeventfd_release(struct _ioeventfd *p) { eventfd_ctx_put(p->eventfd); list_del(&p->list); kfree(p); } static bool ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val) { u64 _val; if (!(addr == p->addr && len == p->length)) /* address-range must be precise for a hit */ return false; if (p->wildcard) /* all else equal, wildcard is always a hit */ return true; /* otherwise, we have to actually compare the data */ BUG_ON(!IS_ALIGNED((unsigned long)val, len)); switch (len) { case 1: _val = *(u8 *)val; break; case 2: _val = *(u16 *)val; break; case 4: _val = *(u32 *)val; break; case 8: _val = *(u64 *)val; break; default: return false; } return _val == p->datamatch ? true : false; } /* MMIO/PIO writes trigger an event if the addr/val match */ static int ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len, const void *val) { struct _ioeventfd *p = to_ioeventfd(this); if (!ioeventfd_in_range(p, addr, len, val)) return -EOPNOTSUPP; eventfd_signal(p->eventfd, 1); return 0; } /* * This function is called as KVM is completely shutting down. We do not * need to worry about locking just nuke anything we have as quickly as possible */ static void ioeventfd_destructor(struct kvm_io_device *this) { struct _ioeventfd *p = to_ioeventfd(this); ioeventfd_release(p); } static const struct kvm_io_device_ops ioeventfd_ops = { .write = ioeventfd_write, .destructor = ioeventfd_destructor, }; /* assumes kvm->slots_lock held */ static bool ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) { struct _ioeventfd *_p; list_for_each_entry(_p, &kvm->ioeventfds, list) if (_p->addr == p->addr && _p->length == p->length && (_p->wildcard || p->wildcard || _p->datamatch == p->datamatch)) return true; return false; } static int kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS; struct _ioeventfd *p; struct eventfd_ctx *eventfd; int ret; /* must be natural-word sized */ switch (args->len) { case 1: case 2: case 4: case 8: break; default: return -EINVAL; } /* check for range overflow */ if (args->addr + args->len < args->addr) return -EINVAL; /* check for extra flags that we don't understand */ if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) return -EINVAL; eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { ret = -ENOMEM; goto fail; } INIT_LIST_HEAD(&p->list); p->addr = args->addr; p->length = args->len; p->eventfd = eventfd; /* The datamatch feature is optional, otherwise this is a wildcard */ if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH) p->datamatch = args->datamatch; else p->wildcard = true; mutex_lock(&kvm->slots_lock); /* Verify that there isn't a match already */ if (ioeventfd_check_collision(kvm, p)) { ret = -EEXIST; goto unlock_fail; } kvm_iodevice_init(&p->dev, &ioeventfd_ops); ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev); if (ret < 0) goto unlock_fail; list_add_tail(&p->list, &kvm->ioeventfds); mutex_unlock(&kvm->slots_lock); return 0; unlock_fail: mutex_unlock(&kvm->slots_lock); fail: kfree(p); eventfd_ctx_put(eventfd); return ret; } static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS; struct _ioeventfd *p, *tmp; struct eventfd_ctx *eventfd; int ret = -ENOENT; eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); mutex_lock(&kvm->slots_lock); list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); if (p->eventfd != eventfd || p->addr != args->addr || p->length != args->len || p->wildcard != wildcard) continue; if (!p->wildcard && p->datamatch != args->datamatch) continue; kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); ioeventfd_release(p); ret = 0; break; } mutex_unlock(&kvm->slots_lock); eventfd_ctx_put(eventfd); return ret; } int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN) return kvm_deassign_ioeventfd(kvm, args); return kvm_assign_ioeventfd(kvm, args); }
gpl-2.0
klquicksall/Galaxy-Nexus-4.2
net/ipv6/xfrm6_output.c
2927
2582
/* * xfrm6_output.c - Common IPsec encapsulation code for IPv6. * Copyright (C) 2002 USAGI/WIDE Project * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/if_ether.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/icmpv6.h> #include <linux/netfilter_ipv6.h> #include <net/dst.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/xfrm.h> int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr) { return ip6_find_1stfragopt(skb, prevhdr); } EXPORT_SYMBOL(xfrm6_find_1stfragopt); static int xfrm6_tunnel_check_size(struct sk_buff *skb) { int mtu, ret = 0; struct dst_entry *dst = skb_dst(skb); mtu = dst_mtu(dst); if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (!skb->local_df && skb->len > mtu) { skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ret = -EMSGSIZE; } return ret; } int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb) { int err; err = xfrm6_tunnel_check_size(skb); if (err) return err; XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr; return xfrm6_extract_header(skb); } int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) { int err; err = xfrm_inner_extract_output(x, skb); if (err) return err; memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); #ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; #endif skb->protocol = htons(ETH_P_IPV6); skb->local_df = 1; return x->outer_mode->output2(x, skb); } EXPORT_SYMBOL(xfrm6_prepare_output); int xfrm6_output_finish(struct sk_buff *skb) { #ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; #endif skb->protocol = htons(ETH_P_IPV6); return xfrm_output(skb); } static int __xfrm6_output(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x = dst->xfrm; if ((x && x->props.mode == XFRM_MODE_TUNNEL) && ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb)))) { return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); } return x->outer_mode->afinfo->output_finish(skb); } int xfrm6_output(struct sk_buff *skb) { return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, skb_dst(skb)->dev, __xfrm6_output); }
gpl-2.0
manumanfred/kernel_omap
net/ipv6/xfrm6_output.c
2927
2582
/* * xfrm6_output.c - Common IPsec encapsulation code for IPv6. * Copyright (C) 2002 USAGI/WIDE Project * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/if_ether.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/icmpv6.h> #include <linux/netfilter_ipv6.h> #include <net/dst.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/xfrm.h> int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr) { return ip6_find_1stfragopt(skb, prevhdr); } EXPORT_SYMBOL(xfrm6_find_1stfragopt); static int xfrm6_tunnel_check_size(struct sk_buff *skb) { int mtu, ret = 0; struct dst_entry *dst = skb_dst(skb); mtu = dst_mtu(dst); if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (!skb->local_df && skb->len > mtu) { skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ret = -EMSGSIZE; } return ret; } int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb) { int err; err = xfrm6_tunnel_check_size(skb); if (err) return err; XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr; return xfrm6_extract_header(skb); } int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) { int err; err = xfrm_inner_extract_output(x, skb); if (err) return err; memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); #ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; #endif skb->protocol = htons(ETH_P_IPV6); skb->local_df = 1; return x->outer_mode->output2(x, skb); } EXPORT_SYMBOL(xfrm6_prepare_output); int xfrm6_output_finish(struct sk_buff *skb) { #ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; #endif skb->protocol = htons(ETH_P_IPV6); return xfrm_output(skb); } static int __xfrm6_output(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x = dst->xfrm; if ((x && x->props.mode == XFRM_MODE_TUNNEL) && ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb)))) { return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); } return x->outer_mode->afinfo->output_finish(skb); } int xfrm6_output(struct sk_buff *skb) { return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, skb_dst(skb)->dev, __xfrm6_output); }
gpl-2.0
NoelMacwan/SXDNanhu
drivers/watchdog/softdog.c
3183
7694
/* * SoftDog 0.07: A Software Watchdog Device * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide * warranty for any of this software. This material is provided * "AS-IS" and at no charge. * * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk> * * Software only watchdog driver. Unlike its big brother the WDT501P * driver this won't always recover a failed machine. * * 03/96: Angelo Haritsis <ah@doc.ic.ac.uk> : * Modularised. * Added soft_margin; use upon insmod to change the timer delay. * NB: uses same minor as wdt (WATCHDOG_MINOR); we could use separate * minors. * * 19980911 Alan Cox * Made SMP safe for 2.3.x * * 20011127 Joel Becker (jlbec@evilplan.org> * Added soft_noboot; Allows testing the softdog trigger without * requiring a recompile. * Added WDIOC_GETTIMEOUT and WDIOC_SETTIMOUT. * * 20020530 Joel Becker <joel.becker@oracle.com> * Added Matt Domsch's nowayout module option. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/uaccess.h> #include <linux/kernel.h> #define PFX "SoftDog: " #define TIMER_MARGIN 60 /* Default is 60 seconds */ static int soft_margin = TIMER_MARGIN; /* in seconds */ module_param(soft_margin, int, 0); MODULE_PARM_DESC(soft_margin, "Watchdog soft_margin in seconds. (0 < soft_margin < 65536, default=" __MODULE_STRING(TIMER_MARGIN) ")"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #ifdef ONLY_TESTING static int soft_noboot = 1; #else static int soft_noboot = 0; #endif /* ONLY_TESTING */ module_param(soft_noboot, int, 0); MODULE_PARM_DESC(soft_noboot, "Softdog action, set to 1 to ignore reboots, 0 to reboot " "(default depends on ONLY_TESTING)"); static int soft_panic; module_param(soft_panic, int, 0); MODULE_PARM_DESC(soft_panic, "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); /* * Our timer */ static void watchdog_fire(unsigned long); static struct timer_list watchdog_ticktock = TIMER_INITIALIZER(watchdog_fire, 0, 0); static unsigned long driver_open, orphan_timer; static char expect_close; /* * If the timer expires.. */ static void watchdog_fire(unsigned long data) { if (test_and_clear_bit(0, &orphan_timer)) module_put(THIS_MODULE); if (soft_noboot) printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); else if (soft_panic) { printk(KERN_CRIT PFX "Initiating panic.\n"); panic("Software Watchdog Timer expired."); } else { printk(KERN_CRIT PFX "Initiating system reboot.\n"); emergency_restart(); printk(KERN_CRIT PFX "Reboot didn't ?????\n"); } } /* * Softdog operations */ static int softdog_keepalive(void) { mod_timer(&watchdog_ticktock, jiffies+(soft_margin*HZ)); return 0; } static int softdog_stop(void) { del_timer(&watchdog_ticktock); return 0; } static int softdog_set_heartbeat(int t) { if ((t < 0x0001) || (t > 0xFFFF)) return -EINVAL; soft_margin = t; return 0; } /* * /dev/watchdog handling */ static int softdog_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &driver_open)) return -EBUSY; if (!test_and_clear_bit(0, &orphan_timer)) __module_get(THIS_MODULE); /* * Activate timer */ softdog_keepalive(); return nonseekable_open(inode, file); } static int softdog_release(struct inode *inode, struct file *file) { /* * Shut off the timer. * Lock it in if it's a module and we set nowayout */ if (expect_close == 42) { softdog_stop(); module_put(THIS_MODULE); } else { printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); set_bit(0, &orphan_timer); softdog_keepalive(); } clear_bit(0, &driver_open); expect_close = 0; return 0; } static ssize_t softdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* * Refresh the timer. */ if (len) { if (!nowayout) { size_t i; /* In case it was set long ago */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } softdog_keepalive(); } return len; } static long softdog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_margin; static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = "Software Watchdog", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: softdog_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_margin, p)) return -EFAULT; if (softdog_set_heartbeat(new_margin)) return -EINVAL; softdog_keepalive(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(soft_margin, p); default: return -ENOTTY; } } /* * Notifier for system down */ static int softdog_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) /* Turn the WDT off */ softdog_stop(); return NOTIFY_DONE; } /* * Kernel Interfaces */ static const struct file_operations softdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = softdog_write, .unlocked_ioctl = softdog_ioctl, .open = softdog_open, .release = softdog_release, }; static struct miscdevice softdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &softdog_fops, }; static struct notifier_block softdog_notifier = { .notifier_call = softdog_notify_sys, }; static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 " "initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d " "(nowayout= %d)\n"; static int __init watchdog_init(void) { int ret; /* Check that the soft_margin value is within it's range; if not reset to the default */ if (softdog_set_heartbeat(soft_margin)) { softdog_set_heartbeat(TIMER_MARGIN); printk(KERN_INFO PFX "soft_margin must be 0 < soft_margin < 65536, using %d\n", TIMER_MARGIN); } ret = register_reboot_notifier(&softdog_notifier); if (ret) { printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); return ret; } ret = misc_register(&softdog_miscdev); if (ret) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); unregister_reboot_notifier(&softdog_notifier); return ret; } printk(banner, soft_noboot, soft_margin, soft_panic, nowayout); return 0; } static void __exit watchdog_exit(void) { misc_deregister(&softdog_miscdev); unregister_reboot_notifier(&softdog_notifier); } module_init(watchdog_init); module_exit(watchdog_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("Software Watchdog Device Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
rdesfo/kernel
fs/afs/callback.c
4463
11600
/* * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. * * This software may be freely redistributed under the terms of the * GNU General Public License. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Authors: David Woodhouse <dwmw2@infradead.org> * David Howells <dhowells@redhat.com> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/circ_buf.h> #include <linux/sched.h> #include "internal.h" #if 0 unsigned afs_vnode_update_timeout = 10; #endif /* 0 */ #define afs_breakring_space(server) \ CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail, \ ARRAY_SIZE((server)->cb_break)) //static void afs_callback_updater(struct work_struct *); static struct workqueue_struct *afs_callback_update_worker; /* * allow the fileserver to request callback state (re-)initialisation */ void afs_init_callback_state(struct afs_server *server) { struct afs_vnode *vnode; _enter("{%p}", server); spin_lock(&server->cb_lock); /* kill all the promises on record from this server */ while (!RB_EMPTY_ROOT(&server->cb_promises)) { vnode = rb_entry(server->cb_promises.rb_node, struct afs_vnode, cb_promise); _debug("UNPROMISE { vid=%x:%u uq=%u}", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); rb_erase(&vnode->cb_promise, &server->cb_promises); vnode->cb_promised = false; } spin_unlock(&server->cb_lock); _leave(""); } /* * handle the data invalidation side of a callback being broken */ void afs_broken_callback_work(struct work_struct *work) { struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_broken_work); _enter(""); if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) return; /* we're only interested in dealing with a broken callback on *this* * vnode and only if no-one else has dealt with it yet */ if (!mutex_trylock(&vnode->validate_lock)) return; /* someone else is dealing with it */ if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { if (S_ISDIR(vnode->vfs_inode.i_mode)) afs_clear_permits(vnode); if (afs_vnode_fetch_status(vnode, NULL, NULL) < 0) goto out; if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) goto out; /* if the vnode's data version number changed then its contents * are different */ if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) afs_zap_data(vnode); } out: mutex_unlock(&vnode->validate_lock); /* avoid the potential race whereby the mutex_trylock() in this * function happens again between the clear_bit() and the * mutex_unlock() */ if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { _debug("requeue"); queue_work(afs_callback_update_worker, &vnode->cb_broken_work); } _leave(""); } /* * actually break a callback */ static void afs_break_callback(struct afs_server *server, struct afs_vnode *vnode) { _enter(""); set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); if (vnode->cb_promised) { spin_lock(&vnode->lock); _debug("break callback"); spin_lock(&server->cb_lock); if (vnode->cb_promised) { rb_erase(&vnode->cb_promise, &server->cb_promises); vnode->cb_promised = false; } spin_unlock(&server->cb_lock); queue_work(afs_callback_update_worker, &vnode->cb_broken_work); if (list_empty(&vnode->granted_locks) && !list_empty(&vnode->pending_locks)) afs_lock_may_be_available(vnode); spin_unlock(&vnode->lock); } } /* * allow the fileserver to explicitly break one callback * - happens when * - the backing file is changed * - a lock is released */ static void afs_break_one_callback(struct afs_server *server, struct afs_fid *fid) { struct afs_vnode *vnode; struct rb_node *p; _debug("find"); spin_lock(&server->fs_lock); p = server->fs_vnodes.rb_node; while (p) { vnode = rb_entry(p, struct afs_vnode, server_rb); if (fid->vid < vnode->fid.vid) p = p->rb_left; else if (fid->vid > vnode->fid.vid) p = p->rb_right; else if (fid->vnode < vnode->fid.vnode) p = p->rb_left; else if (fid->vnode > vnode->fid.vnode) p = p->rb_right; else if (fid->unique < vnode->fid.unique) p = p->rb_left; else if (fid->unique > vnode->fid.unique) p = p->rb_right; else goto found; } /* not found so we just ignore it (it may have moved to another * server) */ not_available: _debug("not avail"); spin_unlock(&server->fs_lock); _leave(""); return; found: _debug("found"); ASSERTCMP(server, ==, vnode->server); if (!igrab(AFS_VNODE_TO_I(vnode))) goto not_available; spin_unlock(&server->fs_lock); afs_break_callback(server, vnode); iput(&vnode->vfs_inode); _leave(""); } /* * allow the fileserver to break callback promises */ void afs_break_callbacks(struct afs_server *server, size_t count, struct afs_callback callbacks[]) { _enter("%p,%zu,", server, count); ASSERT(server != NULL); ASSERTCMP(count, <=, AFSCBMAX); for (; count > 0; callbacks++, count--) { _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", callbacks->fid.vid, callbacks->fid.vnode, callbacks->fid.unique, callbacks->version, callbacks->expiry, callbacks->type ); afs_break_one_callback(server, &callbacks->fid); } _leave(""); return; } /* * record the callback for breaking * - the caller must hold server->cb_lock */ static void afs_do_give_up_callback(struct afs_server *server, struct afs_vnode *vnode) { struct afs_callback *cb; _enter("%p,%p", server, vnode); cb = &server->cb_break[server->cb_break_head]; cb->fid = vnode->fid; cb->version = vnode->cb_version; cb->expiry = vnode->cb_expiry; cb->type = vnode->cb_type; smp_wmb(); server->cb_break_head = (server->cb_break_head + 1) & (ARRAY_SIZE(server->cb_break) - 1); /* defer the breaking of callbacks to try and collect as many as * possible to ship in one operation */ switch (atomic_inc_return(&server->cb_break_n)) { case 1 ... AFSCBMAX - 1: queue_delayed_work(afs_callback_update_worker, &server->cb_break_work, HZ * 2); break; case AFSCBMAX: afs_flush_callback_breaks(server); break; default: break; } ASSERT(server->cb_promises.rb_node != NULL); rb_erase(&vnode->cb_promise, &server->cb_promises); vnode->cb_promised = false; _leave(""); } /* * discard the callback on a deleted item */ void afs_discard_callback_on_delete(struct afs_vnode *vnode) { struct afs_server *server = vnode->server; _enter("%d", vnode->cb_promised); if (!vnode->cb_promised) { _leave(" [not promised]"); return; } ASSERT(server != NULL); spin_lock(&server->cb_lock); if (vnode->cb_promised) { ASSERT(server->cb_promises.rb_node != NULL); rb_erase(&vnode->cb_promise, &server->cb_promises); vnode->cb_promised = false; } spin_unlock(&server->cb_lock); _leave(""); } /* * give up the callback registered for a vnode on the file server when the * inode is being cleared */ void afs_give_up_callback(struct afs_vnode *vnode) { struct afs_server *server = vnode->server; DECLARE_WAITQUEUE(myself, current); _enter("%d", vnode->cb_promised); _debug("GIVE UP INODE %p", &vnode->vfs_inode); if (!vnode->cb_promised) { _leave(" [not promised]"); return; } ASSERT(server != NULL); spin_lock(&server->cb_lock); if (vnode->cb_promised && afs_breakring_space(server) == 0) { add_wait_queue(&server->cb_break_waitq, &myself); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (!vnode->cb_promised || afs_breakring_space(server) != 0) break; spin_unlock(&server->cb_lock); schedule(); spin_lock(&server->cb_lock); } remove_wait_queue(&server->cb_break_waitq, &myself); __set_current_state(TASK_RUNNING); } /* of course, it's always possible for the server to break this vnode's * callback first... */ if (vnode->cb_promised) afs_do_give_up_callback(server, vnode); spin_unlock(&server->cb_lock); _leave(""); } /* * dispatch a deferred give up callbacks operation */ void afs_dispatch_give_up_callbacks(struct work_struct *work) { struct afs_server *server = container_of(work, struct afs_server, cb_break_work.work); _enter(""); /* tell the fileserver to discard the callback promises it has * - in the event of ENOMEM or some other error, we just forget that we * had callbacks entirely, and the server will call us later to break * them */ afs_fs_give_up_callbacks(server, &afs_async_call); } /* * flush the outstanding callback breaks on a server */ void afs_flush_callback_breaks(struct afs_server *server) { mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0); } #if 0 /* * update a bunch of callbacks */ static void afs_callback_updater(struct work_struct *work) { struct afs_server *server; struct afs_vnode *vnode, *xvnode; time_t now; long timeout; int ret; server = container_of(work, struct afs_server, updater); _enter(""); now = get_seconds(); /* find the first vnode to update */ spin_lock(&server->cb_lock); for (;;) { if (RB_EMPTY_ROOT(&server->cb_promises)) { spin_unlock(&server->cb_lock); _leave(" [nothing]"); return; } vnode = rb_entry(rb_first(&server->cb_promises), struct afs_vnode, cb_promise); if (atomic_read(&vnode->usage) > 0) break; rb_erase(&vnode->cb_promise, &server->cb_promises); vnode->cb_promised = false; } timeout = vnode->update_at - now; if (timeout > 0) { queue_delayed_work(afs_vnode_update_worker, &afs_vnode_update, timeout * HZ); spin_unlock(&server->cb_lock); _leave(" [nothing]"); return; } list_del_init(&vnode->update); atomic_inc(&vnode->usage); spin_unlock(&server->cb_lock); /* we can now perform the update */ _debug("update %s", vnode->vldb.name); vnode->state = AFS_VL_UPDATING; vnode->upd_rej_cnt = 0; vnode->upd_busy_cnt = 0; ret = afs_vnode_update_record(vl, &vldb); switch (ret) { case 0: afs_vnode_apply_update(vl, &vldb); vnode->state = AFS_VL_UPDATING; break; case -ENOMEDIUM: vnode->state = AFS_VL_VOLUME_DELETED; break; default: vnode->state = AFS_VL_UNCERTAIN; break; } /* and then reschedule */ _debug("reschedule"); vnode->update_at = get_seconds() + afs_vnode_update_timeout; spin_lock(&server->cb_lock); if (!list_empty(&server->cb_promises)) { /* next update in 10 minutes, but wait at least 1 second more * than the newest record already queued so that we don't spam * the VL server suddenly with lots of requests */ xvnode = list_entry(server->cb_promises.prev, struct afs_vnode, update); if (vnode->update_at <= xvnode->update_at) vnode->update_at = xvnode->update_at + 1; xvnode = list_entry(server->cb_promises.next, struct afs_vnode, update); timeout = xvnode->update_at - now; if (timeout < 0) timeout = 0; } else { timeout = afs_vnode_update_timeout; } list_add_tail(&vnode->update, &server->cb_promises); _debug("timeout %ld", timeout); queue_delayed_work(afs_vnode_update_worker, &afs_vnode_update, timeout * HZ); spin_unlock(&server->cb_lock); afs_put_vnode(vl); } #endif /* * initialise the callback update process */ int __init afs_callback_update_init(void) { afs_callback_update_worker = create_singlethread_workqueue("kafs_callbackd"); return afs_callback_update_worker ? 0 : -ENOMEM; } /* * shut down the callback update process */ void afs_callback_update_kill(void) { destroy_workqueue(afs_callback_update_worker); }
gpl-2.0
1119553797/linux-sunxi
drivers/power/bq27x00_battery.c
4719
21821
/* * BQ27x00 battery driver * * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it> * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it> * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de> * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com> * * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. * */ /* * Datasheets: * http://focus.ti.com/docs/prod/folders/print/bq27000.html * http://focus.ti.com/docs/prod/folders/print/bq27500.html */ #include <linux/module.h> #include <linux/param.h> #include <linux/jiffies.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/idr.h> #include <linux/i2c.h> #include <linux/slab.h> #include <asm/unaligned.h> #include <linux/power/bq27x00_battery.h> #define DRIVER_VERSION "1.2.0" #define BQ27x00_REG_TEMP 0x06 #define BQ27x00_REG_VOLT 0x08 #define BQ27x00_REG_AI 0x14 #define BQ27x00_REG_FLAGS 0x0A #define BQ27x00_REG_TTE 0x16 #define BQ27x00_REG_TTF 0x18 #define BQ27x00_REG_TTECP 0x26 #define BQ27x00_REG_NAC 0x0C /* Nominal available capacity */ #define BQ27x00_REG_LMD 0x12 /* Last measured discharge */ #define BQ27x00_REG_CYCT 0x2A /* Cycle count total */ #define BQ27x00_REG_AE 0x22 /* Available energy */ #define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */ #define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */ #define BQ27000_FLAG_EDVF BIT(0) /* Final End-of-Discharge-Voltage flag */ #define BQ27000_FLAG_EDV1 BIT(1) /* First End-of-Discharge-Voltage flag */ #define BQ27000_FLAG_CI BIT(4) /* Capacity Inaccurate flag */ #define BQ27000_FLAG_FC BIT(5) #define BQ27000_FLAG_CHGS BIT(7) /* Charge state flag */ #define BQ27500_REG_SOC 0x2C #define BQ27500_REG_DCAP 0x3C /* Design capacity */ #define BQ27500_FLAG_DSC BIT(0) #define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */ #define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */ #define BQ27500_FLAG_FC BIT(9) #define BQ27000_RS 20 /* Resistor sense */ struct bq27x00_device_info; struct bq27x00_access_methods { int (*read)(struct bq27x00_device_info *di, u8 reg, bool single); }; enum bq27x00_chip { BQ27000, BQ27500 }; struct bq27x00_reg_cache { int temperature; int time_to_empty; int time_to_empty_avg; int time_to_full; int charge_full; int cycle_count; int capacity; int energy; int flags; }; struct bq27x00_device_info { struct device *dev; int id; enum bq27x00_chip chip; struct bq27x00_reg_cache cache; int charge_design_full; unsigned long last_update; struct delayed_work work; struct power_supply bat; struct bq27x00_access_methods bus; struct mutex lock; }; static enum power_supply_property bq27x00_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_ENERGY_NOW, }; static unsigned int poll_interval = 360; module_param(poll_interval, uint, 0644); MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \ "0 disables polling"); /* * Common code for BQ27x00 devices */ static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg, bool single) { return di->bus.read(di, reg, single); } /* * Return the battery Relative State-of-Charge * Or < 0 if something fails. */ static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di) { int rsoc; if (di->chip == BQ27500) rsoc = bq27x00_read(di, BQ27500_REG_SOC, false); else rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true); if (rsoc < 0) dev_dbg(di->dev, "error reading relative State-of-Charge\n"); return rsoc; } /* * Return a battery charge value in µAh * Or < 0 if something fails. */ static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg) { int charge; charge = bq27x00_read(di, reg, false); if (charge < 0) { dev_dbg(di->dev, "error reading charge register %02x: %d\n", reg, charge); return charge; } if (di->chip == BQ27500) charge *= 1000; else charge = charge * 3570 / BQ27000_RS; return charge; } /* * Return the battery Nominal available capaciy in µAh * Or < 0 if something fails. */ static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di) { return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC); } /* * Return the battery Last measured discharge in µAh * Or < 0 if something fails. */ static inline int bq27x00_battery_read_lmd(struct bq27x00_device_info *di) { return bq27x00_battery_read_charge(di, BQ27x00_REG_LMD); } /* * Return the battery Initial last measured discharge in µAh * Or < 0 if something fails. */ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di) { int ilmd; if (di->chip == BQ27500) ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false); else ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true); if (ilmd < 0) { dev_dbg(di->dev, "error reading initial last measured discharge\n"); return ilmd; } if (di->chip == BQ27500) ilmd *= 1000; else ilmd = ilmd * 256 * 3570 / BQ27000_RS; return ilmd; } /* * Return the battery Available energy in µWh * Or < 0 if something fails. */ static int bq27x00_battery_read_energy(struct bq27x00_device_info *di) { int ae; ae = bq27x00_read(di, BQ27x00_REG_AE, false); if (ae < 0) { dev_dbg(di->dev, "error reading available energy\n"); return ae; } if (di->chip == BQ27500) ae *= 1000; else ae = ae * 29200 / BQ27000_RS; return ae; } /* * Return the battery temperature in tenths of degree Celsius * Or < 0 if something fails. */ static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di) { int temp; temp = bq27x00_read(di, BQ27x00_REG_TEMP, false); if (temp < 0) { dev_err(di->dev, "error reading temperature\n"); return temp; } if (di->chip == BQ27500) temp -= 2731; else temp = ((temp * 5) - 5463) / 2; return temp; } /* * Return the battery Cycle count total * Or < 0 if something fails. */ static int bq27x00_battery_read_cyct(struct bq27x00_device_info *di) { int cyct; cyct = bq27x00_read(di, BQ27x00_REG_CYCT, false); if (cyct < 0) dev_err(di->dev, "error reading cycle count total\n"); return cyct; } /* * Read a time register. * Return < 0 if something fails. */ static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg) { int tval; tval = bq27x00_read(di, reg, false); if (tval < 0) { dev_dbg(di->dev, "error reading time register %02x: %d\n", reg, tval); return tval; } if (tval == 65535) return -ENODATA; return tval * 60; } static void bq27x00_update(struct bq27x00_device_info *di) { struct bq27x00_reg_cache cache = {0, }; bool is_bq27500 = di->chip == BQ27500; cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, !is_bq27500); if (cache.flags >= 0) { if (!is_bq27500 && (cache.flags & BQ27000_FLAG_CI)) { dev_info(di->dev, "battery is not calibrated! ignoring capacity values\n"); cache.capacity = -ENODATA; cache.energy = -ENODATA; cache.time_to_empty = -ENODATA; cache.time_to_empty_avg = -ENODATA; cache.time_to_full = -ENODATA; cache.charge_full = -ENODATA; } else { cache.capacity = bq27x00_battery_read_rsoc(di); cache.energy = bq27x00_battery_read_energy(di); cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE); cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP); cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF); cache.charge_full = bq27x00_battery_read_lmd(di); } cache.temperature = bq27x00_battery_read_temperature(di); cache.cycle_count = bq27x00_battery_read_cyct(di); /* We only have to read charge design full once */ if (di->charge_design_full <= 0) di->charge_design_full = bq27x00_battery_read_ilmd(di); } if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) { di->cache = cache; power_supply_changed(&di->bat); } di->last_update = jiffies; } static void bq27x00_battery_poll(struct work_struct *work) { struct bq27x00_device_info *di = container_of(work, struct bq27x00_device_info, work.work); bq27x00_update(di); if (poll_interval > 0) { /* The timer does not have to be accurate. */ set_timer_slack(&di->work.timer, poll_interval * HZ / 4); schedule_delayed_work(&di->work, poll_interval * HZ); } } /* * Return the battery average current in µA * Note that current can be negative signed as well * Or 0 if something fails. */ static int bq27x00_battery_current(struct bq27x00_device_info *di, union power_supply_propval *val) { int curr; int flags; curr = bq27x00_read(di, BQ27x00_REG_AI, false); if (curr < 0) { dev_err(di->dev, "error reading current\n"); return curr; } if (di->chip == BQ27500) { /* bq27500 returns signed value */ val->intval = (int)((s16)curr) * 1000; } else { flags = bq27x00_read(di, BQ27x00_REG_FLAGS, false); if (flags & BQ27000_FLAG_CHGS) { dev_dbg(di->dev, "negative current!\n"); curr = -curr; } val->intval = curr * 3570 / BQ27000_RS; } return 0; } static int bq27x00_battery_status(struct bq27x00_device_info *di, union power_supply_propval *val) { int status; if (di->chip == BQ27500) { if (di->cache.flags & BQ27500_FLAG_FC) status = POWER_SUPPLY_STATUS_FULL; else if (di->cache.flags & BQ27500_FLAG_DSC) status = POWER_SUPPLY_STATUS_DISCHARGING; else status = POWER_SUPPLY_STATUS_CHARGING; } else { if (di->cache.flags & BQ27000_FLAG_FC) status = POWER_SUPPLY_STATUS_FULL; else if (di->cache.flags & BQ27000_FLAG_CHGS) status = POWER_SUPPLY_STATUS_CHARGING; else if (power_supply_am_i_supplied(&di->bat)) status = POWER_SUPPLY_STATUS_NOT_CHARGING; else status = POWER_SUPPLY_STATUS_DISCHARGING; } val->intval = status; return 0; } static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di, union power_supply_propval *val) { int level; if (di->chip == BQ27500) { if (di->cache.flags & BQ27500_FLAG_FC) level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; else if (di->cache.flags & BQ27500_FLAG_SOC1) level = POWER_SUPPLY_CAPACITY_LEVEL_LOW; else if (di->cache.flags & BQ27500_FLAG_SOCF) level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; else level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; } else { if (di->cache.flags & BQ27000_FLAG_FC) level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; else if (di->cache.flags & BQ27000_FLAG_EDV1) level = POWER_SUPPLY_CAPACITY_LEVEL_LOW; else if (di->cache.flags & BQ27000_FLAG_EDVF) level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; else level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; } val->intval = level; return 0; } /* * Return the battery Voltage in milivolts * Or < 0 if something fails. */ static int bq27x00_battery_voltage(struct bq27x00_device_info *di, union power_supply_propval *val) { int volt; volt = bq27x00_read(di, BQ27x00_REG_VOLT, false); if (volt < 0) { dev_err(di->dev, "error reading voltage\n"); return volt; } val->intval = volt * 1000; return 0; } static int bq27x00_simple_value(int value, union power_supply_propval *val) { if (value < 0) return value; val->intval = value; return 0; } #define to_bq27x00_device_info(x) container_of((x), \ struct bq27x00_device_info, bat); static int bq27x00_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret = 0; struct bq27x00_device_info *di = to_bq27x00_device_info(psy); mutex_lock(&di->lock); if (time_is_before_jiffies(di->last_update + 5 * HZ)) { cancel_delayed_work_sync(&di->work); bq27x00_battery_poll(&di->work.work); } mutex_unlock(&di->lock); if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) return -ENODEV; switch (psp) { case POWER_SUPPLY_PROP_STATUS: ret = bq27x00_battery_status(di, val); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = bq27x00_battery_voltage(di, val); break; case POWER_SUPPLY_PROP_PRESENT: val->intval = di->cache.flags < 0 ? 0 : 1; break; case POWER_SUPPLY_PROP_CURRENT_NOW: ret = bq27x00_battery_current(di, val); break; case POWER_SUPPLY_PROP_CAPACITY: ret = bq27x00_simple_value(di->cache.capacity, val); break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: ret = bq27x00_battery_capacity_level(di, val); break; case POWER_SUPPLY_PROP_TEMP: ret = bq27x00_simple_value(di->cache.temperature, val); break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: ret = bq27x00_simple_value(di->cache.time_to_empty, val); break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val); break; case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: ret = bq27x00_simple_value(di->cache.time_to_full, val); break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_CHARGE_NOW: ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val); break; case POWER_SUPPLY_PROP_CHARGE_FULL: ret = bq27x00_simple_value(di->cache.charge_full, val); break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: ret = bq27x00_simple_value(di->charge_design_full, val); break; case POWER_SUPPLY_PROP_CYCLE_COUNT: ret = bq27x00_simple_value(di->cache.cycle_count, val); break; case POWER_SUPPLY_PROP_ENERGY_NOW: ret = bq27x00_simple_value(di->cache.energy, val); break; default: return -EINVAL; } return ret; } static void bq27x00_external_power_changed(struct power_supply *psy) { struct bq27x00_device_info *di = to_bq27x00_device_info(psy); cancel_delayed_work_sync(&di->work); schedule_delayed_work(&di->work, 0); } static int bq27x00_powersupply_init(struct bq27x00_device_info *di) { int ret; di->bat.type = POWER_SUPPLY_TYPE_BATTERY; di->bat.properties = bq27x00_battery_props; di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props); di->bat.get_property = bq27x00_battery_get_property; di->bat.external_power_changed = bq27x00_external_power_changed; INIT_DELAYED_WORK(&di->work, bq27x00_battery_poll); mutex_init(&di->lock); ret = power_supply_register(di->dev, &di->bat); if (ret) { dev_err(di->dev, "failed to register battery: %d\n", ret); return ret; } dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION); bq27x00_update(di); return 0; } static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di) { /* * power_supply_unregister call bq27x00_battery_get_property which * call bq27x00_battery_poll. * Make sure that bq27x00_battery_poll will not call * schedule_delayed_work again after unregister (which cause OOPS). */ poll_interval = 0; cancel_delayed_work_sync(&di->work); power_supply_unregister(&di->bat); mutex_destroy(&di->lock); } /* i2c specific code */ #ifdef CONFIG_BATTERY_BQ27X00_I2C /* If the system has several batteries we need a different name for each * of them... */ static DEFINE_IDR(battery_id); static DEFINE_MUTEX(battery_mutex); static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single) { struct i2c_client *client = to_i2c_client(di->dev); struct i2c_msg msg[2]; unsigned char data[2]; int ret; if (!client->adapter) return -ENODEV; msg[0].addr = client->addr; msg[0].flags = 0; msg[0].buf = &reg; msg[0].len = sizeof(reg); msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; msg[1].buf = data; if (single) msg[1].len = 1; else msg[1].len = 2; ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); if (ret < 0) return ret; if (!single) ret = get_unaligned_le16(data); else ret = data[0]; return ret; } static int bq27x00_battery_probe(struct i2c_client *client, const struct i2c_device_id *id) { char *name; struct bq27x00_device_info *di; int num; int retval = 0; /* Get new ID for the new battery device */ retval = idr_pre_get(&battery_id, GFP_KERNEL); if (retval == 0) return -ENOMEM; mutex_lock(&battery_mutex); retval = idr_get_new(&battery_id, client, &num); mutex_unlock(&battery_mutex); if (retval < 0) return retval; name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num); if (!name) { dev_err(&client->dev, "failed to allocate device name\n"); retval = -ENOMEM; goto batt_failed_1; } di = kzalloc(sizeof(*di), GFP_KERNEL); if (!di) { dev_err(&client->dev, "failed to allocate device info data\n"); retval = -ENOMEM; goto batt_failed_2; } di->id = num; di->dev = &client->dev; di->chip = id->driver_data; di->bat.name = name; di->bus.read = &bq27x00_read_i2c; if (bq27x00_powersupply_init(di)) goto batt_failed_3; i2c_set_clientdata(client, di); return 0; batt_failed_3: kfree(di); batt_failed_2: kfree(name); batt_failed_1: mutex_lock(&battery_mutex); idr_remove(&battery_id, num); mutex_unlock(&battery_mutex); return retval; } static int bq27x00_battery_remove(struct i2c_client *client) { struct bq27x00_device_info *di = i2c_get_clientdata(client); bq27x00_powersupply_unregister(di); kfree(di->bat.name); mutex_lock(&battery_mutex); idr_remove(&battery_id, di->id); mutex_unlock(&battery_mutex); kfree(di); return 0; } static const struct i2c_device_id bq27x00_id[] = { { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */ { "bq27500", BQ27500 }, {}, }; MODULE_DEVICE_TABLE(i2c, bq27x00_id); static struct i2c_driver bq27x00_battery_driver = { .driver = { .name = "bq27x00-battery", }, .probe = bq27x00_battery_probe, .remove = bq27x00_battery_remove, .id_table = bq27x00_id, }; static inline int bq27x00_battery_i2c_init(void) { int ret = i2c_add_driver(&bq27x00_battery_driver); if (ret) printk(KERN_ERR "Unable to register BQ27x00 i2c driver\n"); return ret; } static inline void bq27x00_battery_i2c_exit(void) { i2c_del_driver(&bq27x00_battery_driver); } #else static inline int bq27x00_battery_i2c_init(void) { return 0; } static inline void bq27x00_battery_i2c_exit(void) {}; #endif /* platform specific code */ #ifdef CONFIG_BATTERY_BQ27X00_PLATFORM static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg, bool single) { struct device *dev = di->dev; struct bq27000_platform_data *pdata = dev->platform_data; unsigned int timeout = 3; int upper, lower; int temp; if (!single) { /* Make sure the value has not changed in between reading the * lower and the upper part */ upper = pdata->read(dev, reg + 1); do { temp = upper; if (upper < 0) return upper; lower = pdata->read(dev, reg); if (lower < 0) return lower; upper = pdata->read(dev, reg + 1); } while (temp != upper && --timeout); if (timeout == 0) return -EIO; return (upper << 8) | lower; } return pdata->read(dev, reg); } static int __devinit bq27000_battery_probe(struct platform_device *pdev) { struct bq27x00_device_info *di; struct bq27000_platform_data *pdata = pdev->dev.platform_data; int ret; if (!pdata) { dev_err(&pdev->dev, "no platform_data supplied\n"); return -EINVAL; } if (!pdata->read) { dev_err(&pdev->dev, "no hdq read callback supplied\n"); return -EINVAL; } di = kzalloc(sizeof(*di), GFP_KERNEL); if (!di) { dev_err(&pdev->dev, "failed to allocate device info data\n"); return -ENOMEM; } platform_set_drvdata(pdev, di); di->dev = &pdev->dev; di->chip = BQ27000; di->bat.name = pdata->name ?: dev_name(&pdev->dev); di->bus.read = &bq27000_read_platform; ret = bq27x00_powersupply_init(di); if (ret) goto err_free; return 0; err_free: platform_set_drvdata(pdev, NULL); kfree(di); return ret; } static int __devexit bq27000_battery_remove(struct platform_device *pdev) { struct bq27x00_device_info *di = platform_get_drvdata(pdev); bq27x00_powersupply_unregister(di); platform_set_drvdata(pdev, NULL); kfree(di); return 0; } static struct platform_driver bq27000_battery_driver = { .probe = bq27000_battery_probe, .remove = __devexit_p(bq27000_battery_remove), .driver = { .name = "bq27000-battery", .owner = THIS_MODULE, }, }; static inline int bq27x00_battery_platform_init(void) { int ret = platform_driver_register(&bq27000_battery_driver); if (ret) printk(KERN_ERR "Unable to register BQ27000 platform driver\n"); return ret; } static inline void bq27x00_battery_platform_exit(void) { platform_driver_unregister(&bq27000_battery_driver); } #else static inline int bq27x00_battery_platform_init(void) { return 0; } static inline void bq27x00_battery_platform_exit(void) {}; #endif /* * Module stuff */ static int __init bq27x00_battery_init(void) { int ret; ret = bq27x00_battery_i2c_init(); if (ret) return ret; ret = bq27x00_battery_platform_init(); if (ret) bq27x00_battery_i2c_exit(); return ret; } module_init(bq27x00_battery_init); static void __exit bq27x00_battery_exit(void) { bq27x00_battery_platform_exit(); bq27x00_battery_i2c_exit(); } module_exit(bq27x00_battery_exit); MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); MODULE_DESCRIPTION("BQ27x00 battery monitor driver"); MODULE_LICENSE("GPL");
gpl-2.0
MoKee/android_kernel_htc_dlx
net/sched/act_api.c
4719
24047
/* * net/sched/act_api.c Packet action API. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Author: Jamal Hadi Salim * * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/err.h> #include <linux/module.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/sch_generic.h> #include <net/act_api.h> #include <net/netlink.h> void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) { unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); struct tcf_common **p1p; for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) { if (*p1p == p) { write_lock_bh(hinfo->lock); *p1p = p->tcfc_next; write_unlock_bh(hinfo->lock); gen_kill_estimator(&p->tcfc_bstats, &p->tcfc_rate_est); /* * gen_estimator est_timer() might access p->tcfc_lock * or bstats, wait a RCU grace period before freeing p */ kfree_rcu(p, tcfc_rcu); return; } } WARN_ON(1); } EXPORT_SYMBOL(tcf_hash_destroy); int tcf_hash_release(struct tcf_common *p, int bind, struct tcf_hashinfo *hinfo) { int ret = 0; if (p) { if (bind) p->tcfc_bindcnt--; p->tcfc_refcnt--; if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { tcf_hash_destroy(p, hinfo); ret = 1; } } return ret; } EXPORT_SYMBOL(tcf_hash_release); static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, struct tc_action *a, struct tcf_hashinfo *hinfo) { struct tcf_common *p; int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; struct nlattr *nest; read_lock_bh(hinfo->lock); s_i = cb->args[0]; for (i = 0; i < (hinfo->hmask + 1); i++) { p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; for (; p; p = p->tcfc_next) { index++; if (index < s_i) continue; a->priv = p; a->order = n_i; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; nlmsg_trim(skb, nest); goto done; } nla_nest_end(skb, nest); n_i++; if (n_i >= TCA_ACT_MAX_PRIO) goto done; } } done: read_unlock_bh(hinfo->lock); if (n_i) cb->args[0] += n_i; return n_i; nla_put_failure: nla_nest_cancel(skb, nest); goto done; } static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, struct tcf_hashinfo *hinfo) { struct tcf_common *p, *s_p; struct nlattr *nest; int i = 0, n_i = 0; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); for (i = 0; i < (hinfo->hmask + 1); i++) { p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; while (p != NULL) { s_p = p->tcfc_next; if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) module_put(a->ops->owner); n_i++; p = s_p; } } NLA_PUT_U32(skb, TCA_FCNT, n_i); nla_nest_end(skb, nest); return n_i; nla_put_failure: nla_nest_cancel(skb, nest); return -EINVAL; } int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a) { struct tcf_hashinfo *hinfo = a->ops->hinfo; if (type == RTM_DELACTION) { return tcf_del_walker(skb, a, hinfo); } else if (type == RTM_GETACTION) { return tcf_dump_walker(skb, cb, a, hinfo); } else { WARN(1, "tcf_generic_walker: unknown action %d\n", type); return -EINVAL; } } EXPORT_SYMBOL(tcf_generic_walker); struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo) { struct tcf_common *p; read_lock_bh(hinfo->lock); for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; p = p->tcfc_next) { if (p->tcfc_index == index) break; } read_unlock_bh(hinfo->lock); return p; } EXPORT_SYMBOL(tcf_hash_lookup); u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo) { u32 val = *idx_gen; do { if (++val == 0) val = 1; } while (tcf_hash_lookup(val, hinfo)); return (*idx_gen = val); } EXPORT_SYMBOL(tcf_hash_new_index); int tcf_hash_search(struct tc_action *a, u32 index) { struct tcf_hashinfo *hinfo = a->ops->hinfo; struct tcf_common *p = tcf_hash_lookup(index, hinfo); if (p) { a->priv = p; return 1; } return 0; } EXPORT_SYMBOL(tcf_hash_search); struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind, struct tcf_hashinfo *hinfo) { struct tcf_common *p = NULL; if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { if (bind) p->tcfc_bindcnt++; p->tcfc_refcnt++; a->priv = p; } return p; } EXPORT_SYMBOL(tcf_hash_check); struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo) { struct tcf_common *p = kzalloc(size, GFP_KERNEL); if (unlikely(!p)) return ERR_PTR(-ENOMEM); p->tcfc_refcnt = 1; if (bind) p->tcfc_bindcnt = 1; spin_lock_init(&p->tcfc_lock); p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo); p->tcfc_tm.install = jiffies; p->tcfc_tm.lastuse = jiffies; if (est) { int err = gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, &p->tcfc_lock, est); if (err) { kfree(p); return ERR_PTR(err); } } a->priv = (void *) p; return p; } EXPORT_SYMBOL(tcf_hash_create); void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo) { unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); write_lock_bh(hinfo->lock); p->tcfc_next = hinfo->htab[h]; hinfo->htab[h] = p; write_unlock_bh(hinfo->lock); } EXPORT_SYMBOL(tcf_hash_insert); static struct tc_action_ops *act_base = NULL; static DEFINE_RWLOCK(act_mod_lock); int tcf_register_action(struct tc_action_ops *act) { struct tc_action_ops *a, **ap; write_lock(&act_mod_lock); for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { write_unlock(&act_mod_lock); return -EEXIST; } } act->next = NULL; *ap = act; write_unlock(&act_mod_lock); return 0; } EXPORT_SYMBOL(tcf_register_action); int tcf_unregister_action(struct tc_action_ops *act) { struct tc_action_ops *a, **ap; int err = -ENOENT; write_lock(&act_mod_lock); for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) if (a == act) break; if (a) { *ap = a->next; a->next = NULL; err = 0; } write_unlock(&act_mod_lock); return err; } EXPORT_SYMBOL(tcf_unregister_action); /* lookup by name */ static struct tc_action_ops *tc_lookup_action_n(char *kind) { struct tc_action_ops *a = NULL; if (kind) { read_lock(&act_mod_lock); for (a = act_base; a; a = a->next) { if (strcmp(kind, a->kind) == 0) { if (!try_module_get(a->owner)) { read_unlock(&act_mod_lock); return NULL; } break; } } read_unlock(&act_mod_lock); } return a; } /* lookup by nlattr */ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) { struct tc_action_ops *a = NULL; if (kind) { read_lock(&act_mod_lock); for (a = act_base; a; a = a->next) { if (nla_strcmp(kind, a->kind) == 0) { if (!try_module_get(a->owner)) { read_unlock(&act_mod_lock); return NULL; } break; } } read_unlock(&act_mod_lock); } return a; } #if 0 /* lookup by id */ static struct tc_action_ops *tc_lookup_action_id(u32 type) { struct tc_action_ops *a = NULL; if (type) { read_lock(&act_mod_lock); for (a = act_base; a; a = a->next) { if (a->type == type) { if (!try_module_get(a->owner)) { read_unlock(&act_mod_lock); return NULL; } break; } } read_unlock(&act_mod_lock); } return a; } #endif int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act, struct tcf_result *res) { const struct tc_action *a; int ret = -1; if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); ret = TC_ACT_OK; goto exec_done; } while ((a = act) != NULL) { repeat: if (a->ops && a->ops->act) { ret = a->ops->act(skb, a, res); if (TC_MUNGED & skb->tc_verd) { /* copied already, allow trampling */ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); } if (ret == TC_ACT_REPEAT) goto repeat; /* we need a ttl - JHS */ if (ret != TC_ACT_PIPE) goto exec_done; } act = a->next; } exec_done: return ret; } EXPORT_SYMBOL(tcf_action_exec); void tcf_action_destroy(struct tc_action *act, int bind) { struct tc_action *a; for (a = act; a; a = act) { if (a->ops && a->ops->cleanup) { if (a->ops->cleanup(a, bind) == ACT_P_DELETED) module_put(a->ops->owner); act = act->next; kfree(a); } else { /*FIXME: Remove later - catch insertion bugs*/ WARN(1, "tcf_action_destroy: BUG? destroying NULL ops\n"); act = act->next; kfree(a); } } } int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { int err = -EINVAL; if (a->ops == NULL || a->ops->dump == NULL) return err; return a->ops->dump(skb, a, bind, ref); } int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { int err = -EINVAL; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; if (a->ops == NULL || a->ops->dump == NULL) return err; NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); if (tcf_action_copy_stats(skb, a, 0)) goto nla_put_failure; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_old(skb, a, bind, ref); if (err > 0) { nla_nest_end(skb, nest); return err; } nla_put_failure: nlmsg_trim(skb, b); return -1; } EXPORT_SYMBOL(tcf_action_dump_1); int tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref) { struct tc_action *a; int err = -EINVAL; struct nlattr *nest; while ((a = act) != NULL) { act = a->next; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_1(skb, a, bind, ref); if (err < 0) goto errout; nla_nest_end(skb, nest); } return 0; nla_put_failure: err = -EINVAL; errout: nla_nest_cancel(skb, nest); return err; } struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind) { struct tc_action *a; struct tc_action_ops *a_o; char act_name[IFNAMSIZ]; struct nlattr *tb[TCA_ACT_MAX + 1]; struct nlattr *kind; int err; if (name == NULL) { err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; if (kind == NULL) goto err_out; if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) goto err_out; } else { err = -EINVAL; if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) goto err_out; } a_o = tc_lookup_action_n(act_name); if (a_o == NULL) { #ifdef CONFIG_MODULES rtnl_unlock(); request_module("act_%s", act_name); rtnl_lock(); a_o = tc_lookup_action_n(act_name); /* We dropped the RTNL semaphore in order to * perform the module load. So, even if we * succeeded in loading the module we have to * tell the caller to replay the request. We * indicate this using -EAGAIN. */ if (a_o != NULL) { err = -EAGAIN; goto err_mod; } #endif err = -ENOENT; goto err_out; } err = -ENOMEM; a = kzalloc(sizeof(*a), GFP_KERNEL); if (a == NULL) goto err_mod; /* backward compatibility for policer */ if (name == NULL) err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind); else err = a_o->init(nla, est, a, ovr, bind); if (err < 0) goto err_free; /* module count goes up only when brand new policy is created * if it exists and is only bound to in a_o->init() then * ACT_P_CREATED is not returned (a zero is). */ if (err != ACT_P_CREATED) module_put(a_o->owner); a->ops = a_o; return a; err_free: kfree(a); err_mod: module_put(a_o->owner); err_out: return ERR_PTR(err); } struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind) { struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *head = NULL, *act, *act_prev = NULL; int err; int i; err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); if (err < 0) return ERR_PTR(err); for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_init_1(tb[i], est, name, ovr, bind); if (IS_ERR(act)) goto err; act->order = i; if (head == NULL) head = act; else act_prev->next = act; act_prev = act; } return head; err: if (head != NULL) tcf_action_destroy(head, bind); return act; } int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, int compat_mode) { int err = 0; struct gnet_dump d; struct tcf_act_hdr *h = a->priv; if (h == NULL) goto errout; /* compat_mode being true specifies a call that is supposed * to add additional backward compatibility statistic TLVs. */ if (compat_mode) { if (a->type == TCA_OLD_COMPAT) err = gnet_stats_start_copy_compat(skb, 0, TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d); else return 0; } else err = gnet_stats_start_copy(skb, TCA_ACT_STATS, &h->tcf_lock, &d); if (err < 0) goto errout; if (a->ops != NULL && a->ops->get_stats != NULL) if (a->ops->get_stats(skb, a) < 0) goto errout; if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || gnet_stats_copy_rate_est(&d, &h->tcf_bstats, &h->tcf_rate_est) < 0 || gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) goto errout; if (gnet_stats_finish_copy(&d) < 0) goto errout; return 0; errout: return -1; } static int tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, u16 flags, int event, int bind, int ref) { struct tcamsg *t; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto nla_put_failure; if (tcf_action_dump(skb, a, bind, ref) < 0) goto nla_put_failure; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nla_put_failure: nlmsg_failure: nlmsg_trim(skb, b); return -1; } static int act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) { struct sk_buff *skb; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; if (tca_get_fill(skb, a, pid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { kfree_skb(skb); return -EINVAL; } return rtnl_unicast(skb, net, pid); } static struct tc_action * tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) { struct nlattr *tb[TCA_ACT_MAX + 1]; struct tc_action *a; int index; int err; err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; if (tb[TCA_ACT_INDEX] == NULL || nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) goto err_out; index = nla_get_u32(tb[TCA_ACT_INDEX]); err = -ENOMEM; a = kzalloc(sizeof(struct tc_action), GFP_KERNEL); if (a == NULL) goto err_out; err = -EINVAL; a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); if (a->ops == NULL) goto err_free; if (a->ops->lookup == NULL) goto err_mod; err = -ENOENT; if (a->ops->lookup(a, index) == 0) goto err_mod; module_put(a->ops->owner); return a; err_mod: module_put(a->ops->owner); err_free: kfree(a); err_out: return ERR_PTR(err); } static void cleanup_a(struct tc_action *act) { struct tc_action *a; for (a = act; a; a = act) { act = a->next; kfree(a); } } static struct tc_action *create_a(int i) { struct tc_action *act; act = kzalloc(sizeof(*act), GFP_KERNEL); if (act == NULL) { pr_debug("create_a: failed to alloc!\n"); return NULL; } act->order = i; return act; } static int tca_action_flush(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 pid) { struct sk_buff *skb; unsigned char *b; struct nlmsghdr *nlh; struct tcamsg *t; struct netlink_callback dcb; struct nlattr *nest; struct nlattr *tb[TCA_ACT_MAX + 1]; struct nlattr *kind; struct tc_action *a = create_a(0); int err = -ENOMEM; if (a == NULL) { pr_debug("tca_action_flush: couldnt create tc_action\n"); return err; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { pr_debug("tca_action_flush: failed skb alloc\n"); kfree(a); return err; } b = skb_tail_pointer(skb); err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; a->ops = tc_lookup_action(kind); if (a->ops == NULL) goto err_out; nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto nla_put_failure; err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); if (err < 0) goto nla_put_failure; if (err == 0) goto noflush_out; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; nlh->nlmsg_flags |= NLM_F_ROOT; module_put(a->ops->owner); kfree(a); err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); if (err > 0) return 0; return err; nla_put_failure: nlmsg_failure: module_put(a->ops->owner); err_out: noflush_out: kfree_skb(skb); kfree(a); return err; } static int tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) { int i, ret; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *head = NULL, *act, *act_prev = NULL; ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); if (ret < 0) return ret; if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { if (tb[1] != NULL) return tca_action_flush(net, tb[1], n, pid); else return -EINVAL; } for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_get_1(tb[i], n, pid); if (IS_ERR(act)) { ret = PTR_ERR(act); goto err; } act->order = i; if (head == NULL) head = act; else act_prev->next = act; act_prev = act; } if (event == RTM_GETACTION) ret = act_get_notify(net, pid, n, head, event); else { /* delete */ struct sk_buff *skb; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { ret = -ENOBUFS; goto err; } if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event, 0, 1) <= 0) { kfree_skb(skb); ret = -EINVAL; goto err; } /* now do the delete */ tcf_action_destroy(head, 0); ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); if (ret > 0) return 0; return ret; } err: cleanup_a(head); return ret; } static int tcf_add_notify(struct net *net, struct tc_action *a, u32 pid, u32 seq, int event, u16 flags) { struct tcamsg *t; struct nlmsghdr *nlh; struct sk_buff *skb; struct nlattr *nest; unsigned char *b; int err = 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; b = skb_tail_pointer(skb); nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto nla_put_failure; if (tcf_action_dump(skb, a, 0, 0) < 0) goto nla_put_failure; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; NETLINK_CB(skb).dst_group = RTNLGRP_TC; err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO); if (err > 0) err = 0; return err; nla_put_failure: nlmsg_failure: kfree_skb(skb); return -1; } static int tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) { int ret = 0; struct tc_action *act; struct tc_action *a; u32 seq = n->nlmsg_seq; act = tcf_action_init(nla, NULL, NULL, ovr, 0); if (act == NULL) goto done; if (IS_ERR(act)) { ret = PTR_ERR(act); goto done; } /* dump then free all the actions after update; inserted policy * stays intact */ ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); for (a = act; a; a = act) { act = a->next; kfree(a); } done: return ret; } static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_ACT_MAX + 1]; u32 pid = skb ? NETLINK_CB(skb).pid : 0; int ret = 0, ovr = 0; ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); if (ret < 0) return ret; if (tca[TCA_ACT_TAB] == NULL) { pr_notice("tc_ctl_action: received NO action attribs\n"); return -EINVAL; } /* n->nlmsg_flags & NLM_F_CREATE */ switch (n->nlmsg_type) { case RTM_NEWACTION: /* we are going to assume all other flags * imply create only if it doesn't exist * Note that CREATE | EXCL implies that * but since we want avoid ambiguity (eg when flags * is zero) then just set this */ if (n->nlmsg_flags & NLM_F_REPLACE) ovr = 1; replay: ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); if (ret == -EAGAIN) goto replay; break; case RTM_DELACTION: ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, pid, RTM_DELACTION); break; case RTM_GETACTION: ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, pid, RTM_GETACTION); break; default: BUG(); } return ret; } static struct nlattr * find_dump_kind(const struct nlmsghdr *n) { struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct nlattr *nla[TCAA_MAX + 1]; struct nlattr *kind; if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0) return NULL; tb1 = nla[TCA_ACT_TAB]; if (tb1 == NULL) return NULL; if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL) < 0) return NULL; if (tb[1] == NULL) return NULL; if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), nla_len(tb[1]), NULL) < 0) return NULL; kind = tb2[TCA_ACT_KIND]; return kind; } static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) { struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; struct tc_action_ops *a_o; struct tc_action a; int ret = 0; struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); struct nlattr *kind = find_dump_kind(cb->nlh); if (kind == NULL) { pr_info("tc_dump_action: action bad kind\n"); return 0; } a_o = tc_lookup_action(kind); if (a_o == NULL) return 0; memset(&a, 0, sizeof(struct tc_action)); a.ops = a_o; if (a_o->walk == NULL) { WARN(1, "tc_dump_action: %s !capable of dumping table\n", a_o->kind); goto nla_put_failure; } nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*t)); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto nla_put_failure; ret = a_o->walk(skb, cb, RTM_GETACTION, &a); if (ret < 0) goto nla_put_failure; if (ret > 0) { nla_nest_end(skb, nest); ret = skb->len; } else nla_nest_cancel(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; if (NETLINK_CB(cb->skb).pid && ret) nlh->nlmsg_flags |= NLM_F_MULTI; module_put(a_o->owner); return skb->len; nla_put_failure: nlmsg_failure: module_put(a_o->owner); nlmsg_trim(skb, b); return skb->len; } static int __init tc_action_init(void) { rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, NULL); return 0; } subsys_initcall(tc_action_init);
gpl-2.0
morisbartyno/android_kernel_samsung_millet1
arch/x86/platform/uv/uv_time.c
4975
10455
/* * SGI RTC clock/timer routines. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) Dimitri Sivanich */ #include <linux/clockchips.h> #include <linux/slab.h> #include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_hub.h> #include <asm/uv/bios.h> #include <asm/uv/uv.h> #include <asm/apic.h> #include <asm/cpu.h> #define RTC_NAME "sgi_rtc" static cycle_t uv_read_rtc(struct clocksource *cs); static int uv_rtc_next_event(unsigned long, struct clock_event_device *); static void uv_rtc_timer_setup(enum clock_event_mode, struct clock_event_device *); static struct clocksource clocksource_uv = { .name = RTC_NAME, .rating = 299, .read = uv_read_rtc, .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static struct clock_event_device clock_event_device_uv = { .name = RTC_NAME, .features = CLOCK_EVT_FEAT_ONESHOT, .shift = 20, .rating = 400, .irq = -1, .set_next_event = uv_rtc_next_event, .set_mode = uv_rtc_timer_setup, .event_handler = NULL, }; static DEFINE_PER_CPU(struct clock_event_device, cpu_ced); /* There is one of these allocated per node */ struct uv_rtc_timer_head { spinlock_t lock; /* next cpu waiting for timer, local node relative: */ int next_cpu; /* number of cpus on this node: */ int ncpus; struct { int lcpu; /* systemwide logical cpu number */ u64 expires; /* next timer expiration for this cpu */ } cpu[1]; }; /* * Access to uv_rtc_timer_head via blade id. */ static struct uv_rtc_timer_head **blade_info __read_mostly; static int uv_rtc_evt_enable; /* * Hardware interface routines */ /* Send IPIs to another node */ static void uv_rtc_send_IPI(int cpu) { unsigned long apicid, val; int pnode; apicid = cpu_physical_id(cpu); pnode = uv_apicid_to_pnode(apicid); apicid |= uv_apicid_hibits; val = (1UL << UVH_IPI_INT_SEND_SHFT) | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); } /* Check for an RTC interrupt pending */ static int uv_intr_pending(int pnode) { if (is_uv1_hub()) return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) & UV1H_EVENT_OCCURRED0_RTC1_MASK; else return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) & UV2H_EVENT_OCCURRED2_RTC_1_MASK; } /* Setup interrupt and return non-zero if early expiration occurred. */ static int uv_setup_intr(int cpu, u64 expires) { u64 val; unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits; int pnode = uv_cpu_to_pnode(cpu); uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, UVH_RTC1_INT_CONFIG_M_MASK); uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L); if (is_uv1_hub()) uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, UV1H_EVENT_OCCURRED0_RTC1_MASK); else uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS, UV2H_EVENT_OCCURRED2_RTC_1_MASK); val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); /* Set configuration */ uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val); /* Initialize comparator value */ uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); if (uv_read_rtc(NULL) <= expires) return 0; return !uv_intr_pending(pnode); } /* * Per-cpu timer tracking routines */ static __init void uv_rtc_deallocate_timers(void) { int bid; for_each_possible_blade(bid) { kfree(blade_info[bid]); } kfree(blade_info); } /* Allocate per-node list of cpu timer expiration times. */ static __init int uv_rtc_allocate_timers(void) { int cpu; blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL); if (!blade_info) return -ENOMEM; memset(blade_info, 0, uv_possible_blades * sizeof(void *)); for_each_present_cpu(cpu) { int nid = cpu_to_node(cpu); int bid = uv_cpu_to_blade_id(cpu); int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id; struct uv_rtc_timer_head *head = blade_info[bid]; if (!head) { head = kmalloc_node(sizeof(struct uv_rtc_timer_head) + (uv_blade_nr_possible_cpus(bid) * 2 * sizeof(u64)), GFP_KERNEL, nid); if (!head) { uv_rtc_deallocate_timers(); return -ENOMEM; } spin_lock_init(&head->lock); head->ncpus = uv_blade_nr_possible_cpus(bid); head->next_cpu = -1; blade_info[bid] = head; } head->cpu[bcpu].lcpu = cpu; head->cpu[bcpu].expires = ULLONG_MAX; } return 0; } /* Find and set the next expiring timer. */ static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode) { u64 lowest = ULLONG_MAX; int c, bcpu = -1; head->next_cpu = -1; for (c = 0; c < head->ncpus; c++) { u64 exp = head->cpu[c].expires; if (exp < lowest) { bcpu = c; lowest = exp; } } if (bcpu >= 0) { head->next_cpu = bcpu; c = head->cpu[bcpu].lcpu; if (uv_setup_intr(c, lowest)) /* If we didn't set it up in time, trigger */ uv_rtc_send_IPI(c); } else { uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, UVH_RTC1_INT_CONFIG_M_MASK); } } /* * Set expiration time for current cpu. * * Returns 1 if we missed the expiration time. */ static int uv_rtc_set_timer(int cpu, u64 expires) { int pnode = uv_cpu_to_pnode(cpu); int bid = uv_cpu_to_blade_id(cpu); struct uv_rtc_timer_head *head = blade_info[bid]; int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id; u64 *t = &head->cpu[bcpu].expires; unsigned long flags; int next_cpu; spin_lock_irqsave(&head->lock, flags); next_cpu = head->next_cpu; *t = expires; /* Will this one be next to go off? */ if (next_cpu < 0 || bcpu == next_cpu || expires < head->cpu[next_cpu].expires) { head->next_cpu = bcpu; if (uv_setup_intr(cpu, expires)) { *t = ULLONG_MAX; uv_rtc_find_next_timer(head, pnode); spin_unlock_irqrestore(&head->lock, flags); return -ETIME; } } spin_unlock_irqrestore(&head->lock, flags); return 0; } /* * Unset expiration time for current cpu. * * Returns 1 if this timer was pending. */ static int uv_rtc_unset_timer(int cpu, int force) { int pnode = uv_cpu_to_pnode(cpu); int bid = uv_cpu_to_blade_id(cpu); struct uv_rtc_timer_head *head = blade_info[bid]; int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id; u64 *t = &head->cpu[bcpu].expires; unsigned long flags; int rc = 0; spin_lock_irqsave(&head->lock, flags); if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) rc = 1; if (rc) { *t = ULLONG_MAX; /* Was the hardware setup for this timer? */ if (head->next_cpu == bcpu) uv_rtc_find_next_timer(head, pnode); } spin_unlock_irqrestore(&head->lock, flags); return rc; } /* * Kernel interface routines. */ /* * Read the RTC. * * Starting with HUB rev 2.0, the UV RTC register is replicated across all * cachelines of it's own page. This allows faster simultaneous reads * from a given socket. */ static cycle_t uv_read_rtc(struct clocksource *cs) { unsigned long offset; if (uv_get_min_hub_revision_id() == 1) offset = 0; else offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); } /* * Program the next event, relative to now */ static int uv_rtc_next_event(unsigned long delta, struct clock_event_device *ced) { int ced_cpu = cpumask_first(ced->cpumask); return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL)); } /* * Setup the RTC timer in oneshot mode */ static void uv_rtc_timer_setup(enum clock_event_mode mode, struct clock_event_device *evt) { int ced_cpu = cpumask_first(evt->cpumask); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_RESUME: /* Nothing to do here yet */ break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: uv_rtc_unset_timer(ced_cpu, 1); break; } } static void uv_rtc_interrupt(void) { int cpu = smp_processor_id(); struct clock_event_device *ced = &per_cpu(cpu_ced, cpu); if (!ced || !ced->event_handler) return; if (uv_rtc_unset_timer(cpu, 0) != 1) return; ced->event_handler(ced); } static int __init uv_enable_evt_rtc(char *str) { uv_rtc_evt_enable = 1; return 1; } __setup("uvrtcevt", uv_enable_evt_rtc); static __init void uv_rtc_register_clockevents(struct work_struct *dummy) { struct clock_event_device *ced = &__get_cpu_var(cpu_ced); *ced = clock_event_device_uv; ced->cpumask = cpumask_of(smp_processor_id()); clockevents_register_device(ced); } static __init int uv_rtc_setup_clock(void) { int rc; if (!is_uv_system()) return -ENODEV; rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second); if (rc) printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); else printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n", sn_rtc_cycles_per_second/(unsigned long)1E6); if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback) return rc; /* Setup and register clockevents */ rc = uv_rtc_allocate_timers(); if (rc) goto error; x86_platform_ipi_callback = uv_rtc_interrupt; clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second, NSEC_PER_SEC, clock_event_device_uv.shift); clock_event_device_uv.min_delta_ns = NSEC_PER_SEC / sn_rtc_cycles_per_second; clock_event_device_uv.max_delta_ns = clocksource_uv.mask * (NSEC_PER_SEC / sn_rtc_cycles_per_second); rc = schedule_on_each_cpu(uv_rtc_register_clockevents); if (rc) { x86_platform_ipi_callback = NULL; uv_rtc_deallocate_timers(); goto error; } printk(KERN_INFO "UV RTC clockevents registered\n"); return 0; error: clocksource_unregister(&clocksource_uv); printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc); return rc; } arch_initcall(uv_rtc_setup_clock);
gpl-2.0
kogone/android_kernel_oneplus_msm8974
arch/powerpc/perf/power4-pmu.c
7279
17303
/* * Performance counter support for POWER4 (GP) and POWER4+ (GQ) processors. * * Copyright 2009 Paul Mackerras, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> /* * Bits in event code for POWER4 */ #define PM_PMC_SH 12 /* PMC number (1-based) for direct events */ #define PM_PMC_MSK 0xf #define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */ #define PM_UNIT_MSK 0xf #define PM_LOWER_SH 6 #define PM_LOWER_MSK 1 #define PM_LOWER_MSKS 0x40 #define PM_BYTE_SH 4 /* Byte number of event bus to use */ #define PM_BYTE_MSK 3 #define PM_PMCSEL_MSK 7 /* * Unit code values */ #define PM_FPU 1 #define PM_ISU1 2 #define PM_IFU 3 #define PM_IDU0 4 #define PM_ISU1_ALT 6 #define PM_ISU2 7 #define PM_IFU_ALT 8 #define PM_LSU0 9 #define PM_LSU1 0xc #define PM_GPS 0xf /* * Bits in MMCR0 for POWER4 */ #define MMCR0_PMC1SEL_SH 8 #define MMCR0_PMC2SEL_SH 1 #define MMCR_PMCSEL_MSK 0x1f /* * Bits in MMCR1 for POWER4 */ #define MMCR1_TTM0SEL_SH 62 #define MMCR1_TTC0SEL_SH 61 #define MMCR1_TTM1SEL_SH 59 #define MMCR1_TTC1SEL_SH 58 #define MMCR1_TTM2SEL_SH 56 #define MMCR1_TTC2SEL_SH 55 #define MMCR1_TTM3SEL_SH 53 #define MMCR1_TTC3SEL_SH 52 #define MMCR1_TTMSEL_MSK 3 #define MMCR1_TD_CP_DBG0SEL_SH 50 #define MMCR1_TD_CP_DBG1SEL_SH 48 #define MMCR1_TD_CP_DBG2SEL_SH 46 #define MMCR1_TD_CP_DBG3SEL_SH 44 #define MMCR1_DEBUG0SEL_SH 43 #define MMCR1_DEBUG1SEL_SH 42 #define MMCR1_DEBUG2SEL_SH 41 #define MMCR1_DEBUG3SEL_SH 40 #define MMCR1_PMC1_ADDER_SEL_SH 39 #define MMCR1_PMC2_ADDER_SEL_SH 38 #define MMCR1_PMC6_ADDER_SEL_SH 37 #define MMCR1_PMC5_ADDER_SEL_SH 36 #define MMCR1_PMC8_ADDER_SEL_SH 35 #define MMCR1_PMC7_ADDER_SEL_SH 34 #define MMCR1_PMC3_ADDER_SEL_SH 33 #define MMCR1_PMC4_ADDER_SEL_SH 32 #define MMCR1_PMC3SEL_SH 27 #define MMCR1_PMC4SEL_SH 22 #define MMCR1_PMC5SEL_SH 17 #define MMCR1_PMC6SEL_SH 12 #define MMCR1_PMC7SEL_SH 7 #define MMCR1_PMC8SEL_SH 2 /* note bit 0 is in MMCRA for GP */ static short mmcr1_adder_bits[8] = { MMCR1_PMC1_ADDER_SEL_SH, MMCR1_PMC2_ADDER_SEL_SH, MMCR1_PMC3_ADDER_SEL_SH, MMCR1_PMC4_ADDER_SEL_SH, MMCR1_PMC5_ADDER_SEL_SH, MMCR1_PMC6_ADDER_SEL_SH, MMCR1_PMC7_ADDER_SEL_SH, MMCR1_PMC8_ADDER_SEL_SH }; /* * Bits in MMCRA */ #define MMCRA_PMC8SEL0_SH 17 /* PMC8SEL bit 0 for GP */ /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 * 3210987654321098765432109876543210987654321098765432109876543210 * |[ >[ >[ >|||[ >[ >< >< >< >< ><><><><><><><><> * | UC1 UC2 UC3 ||| PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 * \SMPL ||\TTC3SEL * |\TTC_IFU_SEL * \TTM2SEL0 * * SMPL - SAMPLE_ENABLE constraint * 56: SAMPLE_ENABLE value 0x0100_0000_0000_0000 * * UC1 - unit constraint 1: can't have all three of FPU/ISU1/IDU0|ISU2 * 55: UC1 error 0x0080_0000_0000_0000 * 54: FPU events needed 0x0040_0000_0000_0000 * 53: ISU1 events needed 0x0020_0000_0000_0000 * 52: IDU0|ISU2 events needed 0x0010_0000_0000_0000 * * UC2 - unit constraint 2: can't have all three of FPU/IFU/LSU0 * 51: UC2 error 0x0008_0000_0000_0000 * 50: FPU events needed 0x0004_0000_0000_0000 * 49: IFU events needed 0x0002_0000_0000_0000 * 48: LSU0 events needed 0x0001_0000_0000_0000 * * UC3 - unit constraint 3: can't have all four of LSU0/IFU/IDU0|ISU2/ISU1 * 47: UC3 error 0x8000_0000_0000 * 46: LSU0 events needed 0x4000_0000_0000 * 45: IFU events needed 0x2000_0000_0000 * 44: IDU0|ISU2 events needed 0x1000_0000_0000 * 43: ISU1 events needed 0x0800_0000_0000 * * TTM2SEL0 * 42: 0 = IDU0 events needed * 1 = ISU2 events needed 0x0400_0000_0000 * * TTC_IFU_SEL * 41: 0 = IFU.U events needed * 1 = IFU.L events needed 0x0200_0000_0000 * * TTC3SEL * 40: 0 = LSU1.U events needed * 1 = LSU1.L events needed 0x0100_0000_0000 * * PS1 * 39: PS1 error 0x0080_0000_0000 * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000 * * PS2 * 35: PS2 error 0x0008_0000_0000 * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000 * * B0 * 28-31: Byte 0 event source 0xf000_0000 * 1 = FPU * 2 = ISU1 * 3 = IFU * 4 = IDU0 * 7 = ISU2 * 9 = LSU0 * c = LSU1 * f = GPS * * B1, B2, B3 * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources * * P8 * 15: P8 error 0x8000 * 14-15: Count of events needing PMC8 * * P1..P7 * 0-13: Count of events needing PMC1..PMC7 * * Note: this doesn't allow events using IFU.U to be combined with events * using IFU.L, though that is feasible (using TTM0 and TTM2). However * there are no listed events for IFU.L (they are debug events not * verified for performance monitoring) so this shouldn't cause a * problem. */ static struct unitinfo { unsigned long value, mask; int unit; int lowerbit; } p4_unitinfo[16] = { [PM_FPU] = { 0x44000000000000ul, 0x88000000000000ul, PM_FPU, 0 }, [PM_ISU1] = { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 }, [PM_ISU1_ALT] = { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 }, [PM_IFU] = { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 }, [PM_IFU_ALT] = { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 }, [PM_IDU0] = { 0x10100000000000ul, 0x80840000000000ul, PM_IDU0, 1 }, [PM_ISU2] = { 0x10140000000000ul, 0x80840000000000ul, PM_ISU2, 0 }, [PM_LSU0] = { 0x01400000000000ul, 0x08800000000000ul, PM_LSU0, 0 }, [PM_LSU1] = { 0x00000000000000ul, 0x00010000000000ul, PM_LSU1, 40 }, [PM_GPS] = { 0x00000000000000ul, 0x00000000000000ul, PM_GPS, 0 } }; static unsigned char direct_marked_event[8] = { (1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */ (1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */ (1<<3), /* PMC3: PM_MRK_ST_CMPL_INT */ (1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */ (1<<4) | (1<<5), /* PMC5: PM_MRK_GRP_TIMEO */ (1<<3) | (1<<4) | (1<<5), /* PMC6: PM_MRK_ST_GPS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */ (1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */ (1<<4), /* PMC8: PM_MRK_LSU_FIN */ }; /* * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ static int p4_marked_instr_event(u64 event) { int pmc, psel, unit, byte, bit; unsigned int mask; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; psel = event & PM_PMCSEL_MSK; if (pmc) { if (direct_marked_event[pmc - 1] & (1 << psel)) return 1; if (psel == 0) /* add events */ bit = (pmc <= 4)? pmc - 1: 8 - pmc; else if (psel == 6) /* decode events */ bit = 4; else return 0; } else bit = psel; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; mask = 0; switch (unit) { case PM_LSU1: if (event & PM_LOWER_MSKS) mask = 1 << 28; /* byte 7 bit 4 */ else mask = 6 << 24; /* byte 3 bits 1 and 2 */ break; case PM_LSU0: /* byte 3, bit 3; byte 2 bits 0,2,3,4,5; byte 1 */ mask = 0x083dff00; } return (mask >> (byte * 8 + bit)) & 1; } static int p4_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) { int pmc, byte, unit, lower, sh; unsigned long mask = 0, value = 0; int grp = -1; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 8) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; value |= 1 << sh; grp = ((pmc - 1) >> 1) & 1; } unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; if (unit) { lower = (event >> PM_LOWER_SH) & PM_LOWER_MSK; /* * Bus events on bytes 0 and 2 can be counted * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8. */ if (!pmc) grp = byte & 1; if (!p4_unitinfo[unit].unit) return -1; mask |= p4_unitinfo[unit].mask; value |= p4_unitinfo[unit].value; sh = p4_unitinfo[unit].lowerbit; if (sh > 1) value |= (unsigned long)lower << sh; else if (lower != sh) return -1; unit = p4_unitinfo[unit].unit; /* Set byte lane select field */ mask |= 0xfULL << (28 - 4 * byte); value |= (unsigned long)unit << (28 - 4 * byte); } if (grp == 0) { /* increment PMC1/2/5/6 field */ mask |= 0x8000000000ull; value |= 0x1000000000ull; } else { /* increment PMC3/4/7/8 field */ mask |= 0x800000000ull; value |= 0x100000000ull; } /* Marked instruction events need sample_enable set */ if (p4_marked_instr_event(event)) { mask |= 1ull << 56; value |= 1ull << 56; } /* PMCSEL=6 decode events on byte 2 need sample_enable clear */ if (pmc && (event & PM_PMCSEL_MSK) == 6 && byte == 2) mask |= 1ull << 56; *maskp = mask; *valp = value; return 0; } static unsigned int ppc_inst_cmpl[] = { 0x1001, 0x4001, 0x6001, 0x7001, 0x8001 }; static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, na; alt[0] = event; na = 1; /* 2 possibilities for PM_GRP_DISP_REJECT */ if (event == 0x8003 || event == 0x0224) { alt[1] = event ^ (0x8003 ^ 0x0224); return 2; } /* 2 possibilities for PM_ST_MISS_L1 */ if (event == 0x0c13 || event == 0x0c23) { alt[1] = event ^ (0x0c13 ^ 0x0c23); return 2; } /* several possibilities for PM_INST_CMPL */ for (i = 0; i < ARRAY_SIZE(ppc_inst_cmpl); ++i) { if (event == ppc_inst_cmpl[i]) { for (j = 0; j < ARRAY_SIZE(ppc_inst_cmpl); ++j) if (j != i) alt[na++] = ppc_inst_cmpl[j]; break; } } return na; } static int p4_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[]) { unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0; unsigned int pmc, unit, byte, psel, lower; unsigned int ttm, grp; unsigned int pmc_inuse = 0; unsigned int pmc_grp_use[2]; unsigned char busbyte[4]; unsigned char unituse[16]; unsigned int unitlower = 0; int i; if (n_ev > 8) return -1; /* First pass to count resource use */ pmc_grp_use[0] = pmc_grp_use[1] = 0; memset(busbyte, 0, sizeof(busbyte)); memset(unituse, 0, sizeof(unituse)); for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc_inuse & (1 << (pmc - 1))) return -1; pmc_inuse |= 1 << (pmc - 1); /* count 1/2/5/6 vs 3/4/7/8 use */ ++pmc_grp_use[((pmc - 1) >> 1) & 1]; } unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; lower = (event[i] >> PM_LOWER_SH) & PM_LOWER_MSK; if (unit) { if (!pmc) ++pmc_grp_use[byte & 1]; if (unit == 6 || unit == 8) /* map alt ISU1/IFU codes: 6->2, 8->3 */ unit = (unit >> 1) - 1; if (busbyte[byte] && busbyte[byte] != unit) return -1; busbyte[byte] = unit; lower <<= unit; if (unituse[unit] && lower != (unitlower & lower)) return -1; unituse[unit] = 1; unitlower |= lower; } } if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4) return -1; /* * Assign resources and set multiplexer selects. * * Units 1,2,3 are on TTM0, 4,6,7 on TTM1, 8,10 on TTM2. * Each TTMx can only select one unit, but since * units 2 and 6 are both ISU1, and 3 and 8 are both IFU, * we have some choices. */ if (unituse[2] & (unituse[1] | (unituse[3] & unituse[9]))) { unituse[6] = 1; /* Move 2 to 6 */ unituse[2] = 0; } if (unituse[3] & (unituse[1] | unituse[2])) { unituse[8] = 1; /* Move 3 to 8 */ unituse[3] = 0; unitlower = (unitlower & ~8) | ((unitlower & 8) << 5); } /* Check only one unit per TTMx */ if (unituse[1] + unituse[2] + unituse[3] > 1 || unituse[4] + unituse[6] + unituse[7] > 1 || unituse[8] + unituse[9] > 1 || (unituse[5] | unituse[10] | unituse[11] | unituse[13] | unituse[14])) return -1; /* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */ mmcr1 |= (unsigned long)(unituse[3] * 2 + unituse[2]) << MMCR1_TTM0SEL_SH; mmcr1 |= (unsigned long)(unituse[7] * 3 + unituse[6] * 2) << MMCR1_TTM1SEL_SH; mmcr1 |= (unsigned long)unituse[9] << MMCR1_TTM2SEL_SH; /* Set TTCxSEL fields. */ if (unitlower & 0xe) mmcr1 |= 1ull << MMCR1_TTC0SEL_SH; if (unitlower & 0xf0) mmcr1 |= 1ull << MMCR1_TTC1SEL_SH; if (unitlower & 0xf00) mmcr1 |= 1ull << MMCR1_TTC2SEL_SH; if (unitlower & 0x7000) mmcr1 |= 1ull << MMCR1_TTC3SEL_SH; /* Set byte lane select fields. */ for (byte = 0; byte < 4; ++byte) { unit = busbyte[byte]; if (!unit) continue; if (unit == 0xf) { /* special case for GPS */ mmcr1 |= 1ull << (MMCR1_DEBUG0SEL_SH - byte); } else { if (!unituse[unit]) ttm = unit - 1; /* 2->1, 3->2 */ else ttm = unit >> 2; mmcr1 |= (unsigned long)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); } } /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; psel = event[i] & PM_PMCSEL_MSK; if (!pmc) { /* Bus event or 00xxx direct event (off or cycles) */ if (unit) psel |= 0x10 | ((byte & 2) << 2); for (pmc = 0; pmc < 8; ++pmc) { if (pmc_inuse & (1 << pmc)) continue; grp = (pmc >> 1) & 1; if (unit) { if (grp == (byte & 1)) break; } else if (pmc_grp_use[grp] < 4) { ++pmc_grp_use[grp]; break; } } pmc_inuse |= 1 << pmc; } else { /* Direct event */ --pmc; if (psel == 0 && (byte & 2)) /* add events on higher-numbered bus */ mmcr1 |= 1ull << mmcr1_adder_bits[pmc]; else if (psel == 6 && byte == 3) /* seem to need to set sample_enable here */ mmcra |= MMCRA_SAMPLE_ENABLE; psel |= 8; } if (pmc <= 1) mmcr0 |= psel << (MMCR0_PMC1SEL_SH - 7 * pmc); else mmcr1 |= psel << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)); if (pmc == 7) /* PMC8 */ mmcra |= (psel & 1) << MMCRA_PMC8SEL0_SH; hwc[i] = pmc; if (p4_marked_instr_event(event[i])) mmcra |= MMCRA_SAMPLE_ENABLE; } if (pmc_inuse & 1) mmcr0 |= MMCR0_PMC1CE; if (pmc_inuse & 0xfe) mmcr0 |= MMCR0_PMCjCE; mmcra |= 0x2000; /* mark only one IOP per PPC instruction */ /* Return MMCRx values */ mmcr[0] = mmcr0; mmcr[1] = mmcr1; mmcr[2] = mmcra; return 0; } static void p4_disable_pmc(unsigned int pmc, unsigned long mmcr[]) { /* * Setting the PMCxSEL field to 0 disables PMC x. * (Note that pmc is 0-based here, not 1-based.) */ if (pmc <= 1) { mmcr[0] &= ~(0x1fUL << (MMCR0_PMC1SEL_SH - 7 * pmc)); } else { mmcr[1] &= ~(0x1fUL << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2))); if (pmc == 7) mmcr[2] &= ~(1UL << MMCRA_PMC8SEL0_SH); } } static int p4_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 7, [PERF_COUNT_HW_INSTRUCTIONS] = 0x1001, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */ [PERF_COUNT_HW_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */ [PERF_COUNT_HW_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */ }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. */ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x8c10, 0x3c10 }, [C(OP_WRITE)] = { 0x7c10, 0xc13 }, [C(OP_PREFETCH)] = { 0xc35, 0 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0, 0 }, }, [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { 0, 0 }, [C(OP_PREFETCH)] = { 0xc34, 0 }, }, [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x904 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x900 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x330, 0x331 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { -1, -1 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; static struct power_pmu power4_pmu = { .name = "POWER4/4+", .n_counter = 8, .max_alternatives = 5, .add_fields = 0x0000001100005555ul, .test_adder = 0x0011083300000000ul, .compute_mmcr = p4_compute_mmcr, .get_constraint = p4_get_constraint, .get_alternatives = p4_get_alternatives, .disable_pmc = p4_disable_pmc, .n_generic = ARRAY_SIZE(p4_generic_events), .generic_events = p4_generic_events, .cache_events = &power4_cache_events, .flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING, }; static int __init init_power4_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4")) return -ENODEV; return register_power_pmu(&power4_pmu); } early_initcall(init_power4_pmu);
gpl-2.0
MyAOSP/kernel_samsung_tuna
drivers/leds/ledtrig-sleep.c
8047
2179
/* drivers/leds/ledtrig-sleep.c * * Copyright (C) 2007 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/earlysuspend.h> #include <linux/leds.h> #include <linux/suspend.h> static int ledtrig_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored); DEFINE_LED_TRIGGER(ledtrig_sleep) static struct notifier_block ledtrig_sleep_pm_notifier = { .notifier_call = ledtrig_sleep_pm_callback, .priority = 0, }; static void ledtrig_sleep_early_suspend(struct early_suspend *h) { led_trigger_event(ledtrig_sleep, LED_FULL); } static void ledtrig_sleep_early_resume(struct early_suspend *h) { led_trigger_event(ledtrig_sleep, LED_OFF); } static struct early_suspend ledtrig_sleep_early_suspend_handler = { .suspend = ledtrig_sleep_early_suspend, .resume = ledtrig_sleep_early_resume, }; static int ledtrig_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) { switch (action) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: led_trigger_event(ledtrig_sleep, LED_OFF); return NOTIFY_OK; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: led_trigger_event(ledtrig_sleep, LED_FULL); return NOTIFY_OK; } return NOTIFY_DONE; } static int __init ledtrig_sleep_init(void) { led_trigger_register_simple("sleep", &ledtrig_sleep); register_pm_notifier(&ledtrig_sleep_pm_notifier); register_early_suspend(&ledtrig_sleep_early_suspend_handler); return 0; } static void __exit ledtrig_sleep_exit(void) { unregister_early_suspend(&ledtrig_sleep_early_suspend_handler); unregister_pm_notifier(&ledtrig_sleep_pm_notifier); led_trigger_unregister_simple(ledtrig_sleep); } module_init(ledtrig_sleep_init); module_exit(ledtrig_sleep_exit);
gpl-2.0
bagnz0r/GT-I8160_Kernel
arch/s390/kernel/audit.c
13935
1686
#include <linux/init.h> #include <linux/types.h> #include <linux/audit.h> #include <asm/unistd.h> #include "audit.h" static unsigned dir_class[] = { #include <asm-generic/audit_dir_write.h> ~0U }; static unsigned read_class[] = { #include <asm-generic/audit_read.h> ~0U }; static unsigned write_class[] = { #include <asm-generic/audit_write.h> ~0U }; static unsigned chattr_class[] = { #include <asm-generic/audit_change_attr.h> ~0U }; static unsigned signal_class[] = { #include <asm-generic/audit_signal.h> ~0U }; int audit_classify_arch(int arch) { #ifdef CONFIG_COMPAT if (arch == AUDIT_ARCH_S390) return 1; #endif return 0; } int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_COMPAT if (abi == AUDIT_ARCH_S390) return s390_classify_syscall(syscall); #endif switch(syscall) { case __NR_open: return 2; case __NR_openat: return 3; case __NR_socketcall: return 4; case __NR_execve: return 5; default: return 0; } } static int __init audit_classes_init(void) { #ifdef CONFIG_COMPAT audit_register_class(AUDIT_CLASS_WRITE_32, s390_write_class); audit_register_class(AUDIT_CLASS_READ_32, s390_read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE_32, s390_dir_class); audit_register_class(AUDIT_CLASS_CHATTR_32, s390_chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL_32, s390_signal_class); #endif audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } __initcall(audit_classes_init);
gpl-2.0
nerdyblonde/N80XX_Kernel
drivers/input/joystick/iforce/iforce-serio.c
14703
4470
/* * Copyright (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz> * Copyright (c) 2001, 2007 Johann Deneux <johann.deneux@gmail.com> * * USB/RS232 I-Force joysticks and wheels. */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include "iforce.h" void iforce_serial_xmit(struct iforce *iforce) { unsigned char cs; int i; unsigned long flags; if (test_and_set_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags)) { set_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags); return; } spin_lock_irqsave(&iforce->xmit_lock, flags); again: if (iforce->xmit.head == iforce->xmit.tail) { clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); spin_unlock_irqrestore(&iforce->xmit_lock, flags); return; } cs = 0x2b; serio_write(iforce->serio, 0x2b); serio_write(iforce->serio, iforce->xmit.buf[iforce->xmit.tail]); cs ^= iforce->xmit.buf[iforce->xmit.tail]; XMIT_INC(iforce->xmit.tail, 1); for (i=iforce->xmit.buf[iforce->xmit.tail]; i >= 0; --i) { serio_write(iforce->serio, iforce->xmit.buf[iforce->xmit.tail]); cs ^= iforce->xmit.buf[iforce->xmit.tail]; XMIT_INC(iforce->xmit.tail, 1); } serio_write(iforce->serio, cs); if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags)) goto again; clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); spin_unlock_irqrestore(&iforce->xmit_lock, flags); } static void iforce_serio_write_wakeup(struct serio *serio) { struct iforce *iforce = serio_get_drvdata(serio); iforce_serial_xmit(iforce); } static irqreturn_t iforce_serio_irq(struct serio *serio, unsigned char data, unsigned int flags) { struct iforce *iforce = serio_get_drvdata(serio); if (!iforce->pkt) { if (data == 0x2b) iforce->pkt = 1; goto out; } if (!iforce->id) { if (data > 3 && data != 0xff) iforce->pkt = 0; else iforce->id = data; goto out; } if (!iforce->len) { if (data > IFORCE_MAX_LENGTH) { iforce->pkt = 0; iforce->id = 0; } else { iforce->len = data; } goto out; } if (iforce->idx < iforce->len) { iforce->csum += iforce->data[iforce->idx++] = data; goto out; } if (iforce->idx == iforce->len) { iforce_process_packet(iforce, (iforce->id << 8) | iforce->idx, iforce->data); iforce->pkt = 0; iforce->id = 0; iforce->len = 0; iforce->idx = 0; iforce->csum = 0; } out: return IRQ_HANDLED; } static int iforce_serio_connect(struct serio *serio, struct serio_driver *drv) { struct iforce *iforce; int err; iforce = kzalloc(sizeof(struct iforce), GFP_KERNEL); if (!iforce) return -ENOMEM; iforce->bus = IFORCE_232; iforce->serio = serio; serio_set_drvdata(serio, iforce); err = serio_open(serio, drv); if (err) goto fail1; err = iforce_init_device(iforce); if (err) goto fail2; return 0; fail2: serio_close(serio); fail1: serio_set_drvdata(serio, NULL); kfree(iforce); return err; } static void iforce_serio_disconnect(struct serio *serio) { struct iforce *iforce = serio_get_drvdata(serio); input_unregister_device(iforce->dev); serio_close(serio); serio_set_drvdata(serio, NULL); kfree(iforce); } static struct serio_device_id iforce_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_IFORCE, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, iforce_serio_ids); struct serio_driver iforce_serio_drv = { .driver = { .name = "iforce", }, .description = "RS232 I-Force joysticks and wheels driver", .id_table = iforce_serio_ids, .write_wakeup = iforce_serio_write_wakeup, .interrupt = iforce_serio_irq, .connect = iforce_serio_connect, .disconnect = iforce_serio_disconnect, };
gpl-2.0
felipesanches/linux-media
drivers/media/i2c/vp27smpx.c
1136
5211
/* * vp27smpx - driver version 0.0.1 * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * * Based on a tvaudio patch from Takahiro Adachi <tadachi@tadachi-net.com> * and Kazuhiko Kawakami <kazz-0@mail.goo.ne.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> MODULE_DESCRIPTION("vp27smpx driver"); MODULE_AUTHOR("Hans Verkuil"); MODULE_LICENSE("GPL"); /* ----------------------------------------------------------------------- */ struct vp27smpx_state { struct v4l2_subdev sd; int radio; u32 audmode; }; static inline struct vp27smpx_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct vp27smpx_state, sd); } static void vp27smpx_set_audmode(struct v4l2_subdev *sd, u32 audmode) { struct vp27smpx_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); u8 data[3] = { 0x00, 0x00, 0x04 }; switch (audmode) { case V4L2_TUNER_MODE_MONO: case V4L2_TUNER_MODE_LANG1: break; case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1_LANG2: data[1] = 0x01; break; case V4L2_TUNER_MODE_LANG2: data[1] = 0x02; break; } if (i2c_master_send(client, data, sizeof(data)) != sizeof(data)) v4l2_err(sd, "I/O error setting audmode\n"); else state->audmode = audmode; } static int vp27smpx_s_radio(struct v4l2_subdev *sd) { struct vp27smpx_state *state = to_state(sd); state->radio = 1; return 0; } static int vp27smpx_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct vp27smpx_state *state = to_state(sd); state->radio = 0; return 0; } static int vp27smpx_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt) { struct vp27smpx_state *state = to_state(sd); if (!state->radio) vp27smpx_set_audmode(sd, vt->audmode); return 0; } static int vp27smpx_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct vp27smpx_state *state = to_state(sd); if (state->radio) return 0; vt->audmode = state->audmode; vt->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; vt->rxsubchans = V4L2_TUNER_SUB_MONO; return 0; } static int vp27smpx_log_status(struct v4l2_subdev *sd) { struct vp27smpx_state *state = to_state(sd); v4l2_info(sd, "Audio Mode: %u%s\n", state->audmode, state->radio ? " (Radio)" : ""); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops vp27smpx_core_ops = { .log_status = vp27smpx_log_status, }; static const struct v4l2_subdev_tuner_ops vp27smpx_tuner_ops = { .s_radio = vp27smpx_s_radio, .s_tuner = vp27smpx_s_tuner, .g_tuner = vp27smpx_g_tuner, }; static const struct v4l2_subdev_video_ops vp27smpx_video_ops = { .s_std = vp27smpx_s_std, }; static const struct v4l2_subdev_ops vp27smpx_ops = { .core = &vp27smpx_core_ops, .tuner = &vp27smpx_tuner_ops, .video = &vp27smpx_video_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ /* * Generic i2c probe * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' */ static int vp27smpx_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct vp27smpx_state *state; struct v4l2_subdev *sd; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &vp27smpx_ops); state->audmode = V4L2_TUNER_MODE_STEREO; /* initialize vp27smpx */ vp27smpx_set_audmode(sd, state->audmode); return 0; } static int vp27smpx_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id vp27smpx_id[] = { { "vp27smpx", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, vp27smpx_id); static struct i2c_driver vp27smpx_driver = { .driver = { .owner = THIS_MODULE, .name = "vp27smpx", }, .probe = vp27smpx_probe, .remove = vp27smpx_remove, .id_table = vp27smpx_id, }; module_i2c_driver(vp27smpx_driver);
gpl-2.0
clemsyn/Clemsyn-OC-kernel
arch/mips/kernel/irq-msc01.c
1648
4059
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Copyright (c) 2004 MIPS Inc * Author: chris@mips.com * * Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org> */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/msc01_ic.h> #include <asm/traps.h> static unsigned long _icctrl_msc; #define MSC01_IC_REG_BASE _icctrl_msc #define MSCIC_WRITE(reg, data) do { *(volatile u32 *)(reg) = data; } while (0) #define MSCIC_READ(reg, data) do { data = *(volatile u32 *)(reg); } while (0) static unsigned int irq_base; /* mask off an interrupt */ static inline void mask_msc_irq(unsigned int irq) { if (irq < (irq_base + 32)) MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base)); else MSCIC_WRITE(MSC01_IC_DISH, 1<<(irq - irq_base - 32)); } /* unmask an interrupt */ static inline void unmask_msc_irq(unsigned int irq) { if (irq < (irq_base + 32)) MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base)); else MSCIC_WRITE(MSC01_IC_ENAH, 1<<(irq - irq_base - 32)); } /* * Masks and ACKs an IRQ */ static void level_mask_and_ack_msc_irq(unsigned int irq) { mask_msc_irq(irq); if (!cpu_has_veic) MSCIC_WRITE(MSC01_IC_EOI, 0); /* This actually needs to be a call into platform code */ smtc_im_ack_irq(irq); } /* * Masks and ACKs an IRQ */ static void edge_mask_and_ack_msc_irq(unsigned int irq) { mask_msc_irq(irq); if (!cpu_has_veic) MSCIC_WRITE(MSC01_IC_EOI, 0); else { u32 r; MSCIC_READ(MSC01_IC_SUP+irq*8, r); MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); } smtc_im_ack_irq(irq); } /* * End IRQ processing */ static void end_msc_irq(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) unmask_msc_irq(irq); } /* * Interrupt handler for interrupts coming from SOC-it. */ void ll_msc_irq(void) { unsigned int irq; /* read the interrupt vector register */ MSCIC_READ(MSC01_IC_VEC, irq); if (irq < 64) do_IRQ(irq + irq_base); else { /* Ignore spurious interrupt */ } } static void msc_bind_eic_interrupt(int irq, int set) { MSCIC_WRITE(MSC01_IC_RAMW, (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF)); } static struct irq_chip msc_levelirq_type = { .name = "SOC-it-Level", .ack = level_mask_and_ack_msc_irq, .mask = mask_msc_irq, .mask_ack = level_mask_and_ack_msc_irq, .unmask = unmask_msc_irq, .eoi = unmask_msc_irq, .end = end_msc_irq, }; static struct irq_chip msc_edgeirq_type = { .name = "SOC-it-Edge", .ack = edge_mask_and_ack_msc_irq, .mask = mask_msc_irq, .mask_ack = edge_mask_and_ack_msc_irq, .unmask = unmask_msc_irq, .eoi = unmask_msc_irq, .end = end_msc_irq, }; void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq) { _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000); /* Reset interrupt controller - initialises all registers to 0 */ MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT); board_bind_eic_interrupt = &msc_bind_eic_interrupt; for (; nirq >= 0; nirq--, imp++) { int n = imp->im_irq; switch (imp->im_type) { case MSC01_IRQ_EDGE: set_irq_chip_and_handler_name(irqbase + n, &msc_edgeirq_type, handle_edge_irq, "edge"); if (cpu_has_veic) MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); else MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); break; case MSC01_IRQ_LEVEL: set_irq_chip_and_handler_name(irqbase+n, &msc_levelirq_type, handle_level_irq, "level"); if (cpu_has_veic) MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); else MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl); } } irq_base = irqbase; MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); /* Enable interrupt generation */ }
gpl-2.0
cosmicexplorer/linux
arch/sh/kernel/cpu/sh4a/clock-sh7722.c
1904
8231
/* * arch/sh/kernel/cpu/sh4a/clock-sh7722.c * * SH7722 clock framework support * * Copyright (C) 2009 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clkdev.h> #include <linux/sh_clk.h> #include <asm/clock.h> #include <cpu/sh7722.h> /* SH7722 registers */ #define FRQCR 0xa4150000 #define VCLKCR 0xa4150004 #define SCLKACR 0xa4150008 #define SCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 #define MSTPCR0 0xa4150030 #define MSTPCR1 0xa4150034 #define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ static struct clk r_clk = { .rate = 32768, }; /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ struct clk extal_clk = { .rate = 33333333, }; /* The dll block multiplies the 32khz r_clk, may be used instead of extal */ static unsigned long dll_recalc(struct clk *clk) { unsigned long mult; if (__raw_readl(PLLCR) & 0x1000) mult = __raw_readl(DLLFRQ); else mult = 0; return clk->parent->rate * mult; } static struct sh_clk_ops dll_clk_ops = { .recalc = dll_recalc, }; static struct clk dll_clk = { .ops = &dll_clk_ops, .parent = &r_clk, .flags = CLK_ENABLE_ON_INIT, }; static unsigned long pll_recalc(struct clk *clk) { unsigned long mult = 1; unsigned long div = 1; if (__raw_readl(PLLCR) & 0x4000) mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1); else div = 2; return (clk->parent->rate * mult) / div; } static struct sh_clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, }; struct clk *main_clks[] = { &r_clk, &extal_clk, &dll_clk, &pll_clk, }; static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), .multipliers = multipliers, .nr_multipliers = ARRAY_SIZE(multipliers), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, }; #define DIV4(_reg, _bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR }; struct clk div4_clks[DIV4_NR] = { [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT), [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0), }; enum { DIV4_IRDA, DIV4_ENABLE_NR }; struct clk div4_enable_clks[DIV4_ENABLE_NR] = { [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x1fff, 0), }; enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR }; struct clk div4_reparent_clks[DIV4_REPARENT_NR] = { [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0), [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0), }; enum { DIV6_V, DIV6_NR }; struct clk div6_clks[DIV6_NR] = { [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0), }; static struct clk mstp_clks[HWBLK_NR] = { [HWBLK_URAM] = SH_CLK_MSTP32(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT), [HWBLK_XYMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), [HWBLK_TMU] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0), [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0), [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0), [HWBLK_FLCTL] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0), [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0), [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 6, 0), [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0), [HWBLK_IIC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0), [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 8, 0), [HWBLK_SDHI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 18, 0), [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR2, 14, 0), [HWBLK_USBF] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 11, 0), [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 9, 0), [HWBLK_SIU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 8, 0), [HWBLK_JPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0), [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0), [HWBLK_BEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0), [HWBLK_CEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0), [HWBLK_VEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0), [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0), [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 0, 0), }; static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("rclk", &r_clk), CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("dll_clk", &dll_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), /* DIV4 clocks */ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]), CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]), CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]), CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]), /* DIV6 clocks */ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), /* MSTP clocks */ CLKDEV_CON_ID("uram0", &mstp_clks[HWBLK_URAM]), CLKDEV_CON_ID("xymem0", &mstp_clks[HWBLK_XYMEM]), CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU]), CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]), CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]), CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]), CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]), CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]), CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]), CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]), CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]), CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI]), CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]), CLKDEV_CON_ID("usbf0", &mstp_clks[HWBLK_USBF]), CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]), CLKDEV_DEV_ID("siu-pcm-audio", &mstp_clks[HWBLK_SIU]), CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]), CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]), CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]), CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[HWBLK_CEU]), CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU]), CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]), CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]), }; int __init arch_clk_init(void) { int k, ret = 0; /* autodetect extal or dll configuration */ if (__raw_readl(PLLCR) & 0x1000) pll_clk.parent = &dll_clk; else pll_clk.parent = &extal_clk; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div4_enable_register(div4_enable_clks, DIV4_ENABLE_NR, &div4_table); if (!ret) ret = sh_clk_div4_reparent_register(div4_reparent_clks, DIV4_REPARENT_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR); return ret; }
gpl-2.0
avisconti/prova
sound/soc/msm/mpq8064.c
2160
51005
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/jack.h> #include <sound/pcm_params.h> #include <asm/mach-types.h> #include <mach/socinfo.h> #include "msm-pcm-routing.h" #include "../codecs/wcd9310.h" /* 8064 machine driver */ #define PM8921_MPP_BASE (PM8921_GPIO_BASE + PM8921_NR_GPIOS) #define PM8821_NR_MPPS (4) #define PM8821_MPP_BASE (PM8921_MPP_BASE + PM8921_NR_MPPS) #define PM8921_GPIO_BASE NR_GPIO_IRQS #define PM8921_GPIO_PM_TO_SYS(pm_gpio) (pm_gpio - 1 + PM8921_GPIO_BASE) #define GPIO_EXPANDER_IRQ_BASE (TABLA_INTERRUPT_BASE + \ NR_TABLA_IRQS) #define GPIO_EXPANDER_GPIO_BASE (PM8821_MPP_BASE + PM8821_NR_MPPS) #define GPIO_EPM_EXPANDER_BASE GPIO_EXPANDER_GPIO_BASE #define SX150X_EPM_NR_GPIOS 16 #define SX150X_EPM_NR_IRQS 8 #define SX150X_EXP1_GPIO_BASE (GPIO_EPM_EXPANDER_BASE + \ SX150X_EPM_NR_GPIOS) #define SX150X_EXP1_IRQ_BASE (GPIO_EXPANDER_IRQ_BASE + \ SX150X_EPM_NR_IRQS) #define SX150X_EXP1_NR_IRQS 16 #define SX150X_EXP1_NR_GPIOS 16 #define SX150X_EXP2_GPIO_BASE (SX150X_EXP1_GPIO_BASE + \ SX150X_EXP1_NR_GPIOS) #define SX150X_EXP2_IRQ_BASE (SX150X_EXP1_IRQ_BASE + SX150X_EXP1_NR_IRQS) #define SX150X_EXP2_NR_IRQS 8 #define SX150X_EXP2_NR_GPIOS 8 #define SX150X_EXP3_GPIO_BASE (SX150X_EXP2_GPIO_BASE + \ SX150X_EXP2_NR_GPIOS) #define SX150X_EXP3_IRQ_BASE (SX150X_EXP2_IRQ_BASE + SX150X_EXP2_NR_IRQS) #define SX150X_EXP3_NR_IRQS 8 #define SX150X_EXP3_NR_GPIOS 8 #define SX150X_EXP4_GPIO_BASE (SX150X_EXP3_GPIO_BASE + \ SX150X_EXP3_NR_GPIOS) #define SX150X_EXP4_IRQ_BASE (SX150X_EXP3_IRQ_BASE + SX150X_EXP3_NR_IRQS) #define SX150X_EXP4_NR_IRQS 16 #define SX150X_EXP4_NR_GPIOS 16 #define SX150X_GPIO(_expander, _pin) (SX150X_EXP##_expander##_GPIO_BASE + _pin) enum { SX150X_EPM, SX150X_EXP1, SX150X_EXP2, SX150X_EXP3, SX150X_EXP4, }; #define MPQ8064_SPK_ON 1 #define MPQ8064_SPK_OFF 0 #define MSM_SLIM_0_RX_MAX_CHANNELS 2 #define MSM_SLIM_0_TX_MAX_CHANNELS 4 #define BOTTOM_SPK_AMP_POS 0x1 #define BOTTOM_SPK_AMP_NEG 0x2 #define TOP_SPK_AMP_POS 0x4 #define TOP_SPK_AMP_NEG 0x8 #define GPIO_AUX_PCM_DOUT 43 #define GPIO_AUX_PCM_DIN 44 #define GPIO_AUX_PCM_SYNC 45 #define GPIO_AUX_PCM_CLK 46 #define TABLA_EXT_CLK_RATE 12288000 #define TABLA_MBHC_DEF_BUTTONS 8 #define TABLA_MBHC_DEF_RLOADS 5 #define GPIO_SEC_I2S_RX_SCK 47 #define GPIO_SEC_I2S_RX_WS 48 #define GPIO_SEC_I2S_RX_DOUT 49 #define GPIO_SEC_I2S_RX_MCLK 50 #define I2S_MCLK_RATE 12288000 #define GPIO_MI2S_WS 27 #define GPIO_MI2S_SCLK 28 #define GPIO_MI2S_DOUT3 29 #define GPIO_MI2S_DOUT2 30 #define GPIO_MI2S_DOUT1 31 #define GPIO_MI2S_DOUT0 32 #define GPIO_MI2S_MCLK 33 static struct clk *sec_i2s_rx_osr_clk; static struct clk *sec_i2s_rx_bit_clk; struct request_gpio { unsigned gpio_no; char *gpio_name; }; static struct request_gpio sec_i2s_rx_gpio[] = { { .gpio_no = GPIO_SEC_I2S_RX_MCLK, .gpio_name = "SEC_I2S_RX_MCLK", }, { .gpio_no = GPIO_SEC_I2S_RX_SCK, .gpio_name = "SEC_I2S_RX_SCK", }, { .gpio_no = GPIO_SEC_I2S_RX_WS, .gpio_name = "SEC_I2S_RX_WS", }, { .gpio_no = GPIO_SEC_I2S_RX_DOUT, .gpio_name = "SEC_I2S_RX_DOUT", }, }; static struct request_gpio mi2s_gpio[] = { { .gpio_no = GPIO_MI2S_WS, .gpio_name = "MI2S_WS", }, { .gpio_no = GPIO_MI2S_SCLK, .gpio_name = "MI2S_SCLK", }, { .gpio_no = GPIO_MI2S_DOUT3, .gpio_name = "MI2S_DOUT3", }, { .gpio_no = GPIO_MI2S_DOUT2, .gpio_name = "MI2S_DOUT2", }, { .gpio_no = GPIO_MI2S_DOUT1, .gpio_name = "MI2S_DOUT1", }, { .gpio_no = GPIO_MI2S_DOUT0, .gpio_name = "MI2S_DOUT0", }, { .gpio_no = GPIO_MI2S_MCLK, .gpio_name = "MI2S_MCLK", }, }; static struct clk *mi2s_bit_clk; static u32 top_spk_pamp_gpio = PM8921_GPIO_PM_TO_SYS(18); static u32 bottom_spk_pamp_gpio = PM8921_GPIO_PM_TO_SYS(19); static int msm_spk_control; static int msm_ext_bottom_spk_pamp; static int msm_ext_top_spk_pamp; static int msm_slim_0_rx_ch = 1; static int msm_slim_0_tx_ch = 1; static int msm_hdmi_rx_ch = 8; static int mi2s_rate_variable; static int hdmi_rate_variable; static struct clk *codec_clk; static int clk_users; static struct snd_soc_jack hs_jack; static struct snd_soc_jack button_jack; static int detect_dtv_platform; static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable, bool dapm); static struct tabla_mbhc_config mbhc_cfg = { .headset_jack = &hs_jack, .button_jack = &button_jack, .read_fw_bin = false, .calibration = NULL, .micbias = TABLA_MICBIAS2, .mclk_cb_fn = msm_enable_codec_ext_clk, .mclk_rate = TABLA_EXT_CLK_RATE, .gpio = 0, /* MBHC GPIO is not configured */ .gpio_irq = 0, .gpio_level_insert = 1, }; static void msm_enable_ext_spk_amp_gpio(u32 spk_amp_gpio) { int ret = 0; struct pm_gpio param = { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 1, .pull = PM_GPIO_PULL_NO, .vin_sel = PM_GPIO_VIN_S4, .out_strength = PM_GPIO_STRENGTH_MED, . function = PM_GPIO_FUNC_NORMAL, }; if (spk_amp_gpio == bottom_spk_pamp_gpio) { ret = gpio_request(bottom_spk_pamp_gpio, "BOTTOM_SPK_AMP"); if (ret) { pr_err("%s: Error requesting BOTTOM SPK AMP GPIO %u\n", __func__, bottom_spk_pamp_gpio); return; } ret = pm8xxx_gpio_config(bottom_spk_pamp_gpio, &param); if (ret) pr_err("%s: Failed to configure Bottom Spk Ampl" " gpio %u\n", __func__, bottom_spk_pamp_gpio); else { pr_debug("%s: enable Bottom spkr amp gpio\n", __func__); gpio_direction_output(bottom_spk_pamp_gpio, 1); } } else if (spk_amp_gpio == top_spk_pamp_gpio) { ret = gpio_request(top_spk_pamp_gpio, "TOP_SPK_AMP"); if (ret) { pr_err("%s: Error requesting GPIO %d\n", __func__, top_spk_pamp_gpio); return; } ret = pm8xxx_gpio_config(top_spk_pamp_gpio, &param); if (ret) pr_err("%s: Failed to configure Top Spk Ampl" " gpio %u\n", __func__, top_spk_pamp_gpio); else { pr_debug("%s: enable Top spkr amp gpio\n", __func__); gpio_direction_output(top_spk_pamp_gpio, 1); } } else { pr_err("%s: ERROR : Invalid External Speaker Ampl GPIO." " gpio = %u\n", __func__, spk_amp_gpio); return; } } static void msm_ext_spk_power_amp_on(u32 spk) { if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) { if ((msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) && (msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) { pr_debug("%s() External Bottom Speaker Ampl already " "turned on. spk = 0x%08x\n", __func__, spk); return; } msm_ext_bottom_spk_pamp |= spk; if ((msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) && (msm_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) { msm_enable_ext_spk_amp_gpio(bottom_spk_pamp_gpio); pr_debug("%s: slepping 4 ms after turning on external " " Bottom Speaker Ampl\n", __func__); usleep_range(4000, 4000); } } else if (spk & (TOP_SPK_AMP_POS | TOP_SPK_AMP_NEG)) { if ((msm_ext_top_spk_pamp & TOP_SPK_AMP_POS) && (msm_ext_top_spk_pamp & TOP_SPK_AMP_NEG)) { pr_debug("%s() External Top Speaker Ampl already" "turned on. spk = 0x%08x\n", __func__, spk); return; } msm_ext_top_spk_pamp |= spk; if ((msm_ext_top_spk_pamp & TOP_SPK_AMP_POS) && (msm_ext_top_spk_pamp & TOP_SPK_AMP_NEG)) { msm_enable_ext_spk_amp_gpio(top_spk_pamp_gpio); pr_debug("%s: sleeping 4 ms after turning on " " external Top Speaker Ampl\n", __func__); usleep_range(4000, 4000); } } else { pr_err("%s: ERROR : Invalid External Speaker Ampl. spk = 0x%08x\n", __func__, spk); return; } } static void msm_ext_spk_power_amp_off(u32 spk) { if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) { if (!msm_ext_bottom_spk_pamp) return; gpio_direction_output(bottom_spk_pamp_gpio, 0); gpio_free(bottom_spk_pamp_gpio); msm_ext_bottom_spk_pamp = 0; pr_debug("%s: sleeping 4 ms after turning off external Bottom" " Speaker Ampl\n", __func__); usleep_range(4000, 4000); } else if (spk & (TOP_SPK_AMP_POS | TOP_SPK_AMP_NEG)) { if (!msm_ext_top_spk_pamp) return; gpio_direction_output(top_spk_pamp_gpio, 0); gpio_free(top_spk_pamp_gpio); msm_ext_top_spk_pamp = 0; pr_debug("%s: sleeping 4 ms after turning off external Top" " Spkaker Ampl\n", __func__); usleep_range(4000, 4000); } else { pr_err("%s: ERROR : Invalid Ext Spk Ampl. spk = 0x%08x\n", __func__, spk); return; } } static void msm_ext_control(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = &codec->dapm; pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control); if (msm_spk_control == MPQ8064_SPK_ON) { snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg"); } else { snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Pos"); snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Neg"); snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Pos"); snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Neg"); } snd_soc_dapm_sync(dapm); } static int msm_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control); ucontrol->value.integer.value[0] = msm_spk_control; return 0; } static int msm_set_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); pr_debug("%s()\n", __func__); if (msm_spk_control == ucontrol->value.integer.value[0]) return 0; msm_spk_control = ucontrol->value.integer.value[0]; msm_ext_control(codec); return 1; } static int msm_spkramp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { pr_debug("%s() %x\n", __func__, SND_SOC_DAPM_EVENT_ON(event)); if (SND_SOC_DAPM_EVENT_ON(event)) { if (!strncmp(w->name, "Ext Spk Bottom Pos", 18)) msm_ext_spk_power_amp_on(BOTTOM_SPK_AMP_POS); else if (!strncmp(w->name, "Ext Spk Bottom Neg", 18)) msm_ext_spk_power_amp_on(BOTTOM_SPK_AMP_NEG); else if (!strncmp(w->name, "Ext Spk Top Pos", 15)) msm_ext_spk_power_amp_on(TOP_SPK_AMP_POS); else if (!strncmp(w->name, "Ext Spk Top Neg", 15)) msm_ext_spk_power_amp_on(TOP_SPK_AMP_NEG); else { pr_err("%s() Invalid Speaker Widget = %s\n", __func__, w->name); return -EINVAL; } } else { if (!strncmp(w->name, "Ext Spk Bottom Pos", 18)) msm_ext_spk_power_amp_off(BOTTOM_SPK_AMP_POS); else if (!strncmp(w->name, "Ext Spk Bottom Neg", 18)) msm_ext_spk_power_amp_off(BOTTOM_SPK_AMP_NEG); else if (!strncmp(w->name, "Ext Spk Top Pos", 15)) msm_ext_spk_power_amp_off(TOP_SPK_AMP_POS); else if (!strncmp(w->name, "Ext Spk Top Neg", 15)) msm_ext_spk_power_amp_off(TOP_SPK_AMP_NEG); else { pr_err("%s() Invalid Speaker Widget = %s\n", __func__, w->name); return -EINVAL; } } return 0; } static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable, bool dapm) { pr_debug("%s: enable = %d\n", __func__, enable); if (enable) { clk_users++; pr_debug("%s: clk_users = %d\n", __func__, clk_users); if (clk_users != 1) return 0; if (codec_clk) { clk_set_rate(codec_clk, TABLA_EXT_CLK_RATE); clk_prepare_enable(codec_clk); tabla_mclk_enable(codec, 1, dapm); } else { pr_err("%s: Error setting Tabla MCLK\n", __func__); clk_users--; return -EINVAL; } } else { pr_debug("%s: clk_users = %d\n", __func__, clk_users); if (clk_users == 0) return 0; clk_users--; if (!clk_users) { pr_debug("%s: disabling MCLK. clk_users = %d\n", __func__, clk_users); clk_disable_unprepare(codec_clk); tabla_mclk_enable(codec, 0, dapm); } } return 0; } static int msm_mclk_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { pr_debug("%s: event = %d\n", __func__, event); switch (event) { case SND_SOC_DAPM_PRE_PMU: clk_users++; pr_debug("%s: clk_users = %d\n", __func__, clk_users); if (clk_users != 1) return 0; if (codec_clk) { clk_set_rate(codec_clk, 12288000); clk_prepare_enable(codec_clk); tabla_mclk_enable(w->codec, 1, true); } else { pr_err("%s: Error setting Tabla MCLK\n", __func__); clk_users--; return -EINVAL; } break; case SND_SOC_DAPM_POST_PMD: pr_debug("%s: clk_users = %d\n", __func__, clk_users); if (clk_users == 0) return 0; clk_users--; if (!clk_users) { pr_debug("%s: disabling MCLK. clk_users = %d\n", __func__, clk_users); clk_disable_unprepare(codec_clk); tabla_mclk_enable(w->codec, 0, true); } break; } return 0; } static const struct snd_soc_dapm_widget msm_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0, msm_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SPK("Ext Spk Bottom Pos", msm_spkramp_event), SND_SOC_DAPM_SPK("Ext Spk Bottom Neg", msm_spkramp_event), SND_SOC_DAPM_SPK("Ext Spk Top Pos", msm_spkramp_event), SND_SOC_DAPM_SPK("Ext Spk Top Neg", msm_spkramp_event), SND_SOC_DAPM_MIC("Handset Mic", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("ANCRight Headset Mic", NULL), SND_SOC_DAPM_MIC("ANCLeft Headset Mic", NULL), }; static const struct snd_soc_dapm_route common_audio_map[] = { {"RX_BIAS", NULL, "MCLK"}, {"LDO_H", NULL, "MCLK"}, /* Speaker path */ {"Ext Spk Bottom Pos", NULL, "LINEOUT1"}, {"Ext Spk Bottom Neg", NULL, "LINEOUT3"}, {"Ext Spk Top Pos", NULL, "LINEOUT2"}, {"Ext Spk Top Neg", NULL, "LINEOUT4"}, /* Microphone path */ {"AMIC1", NULL, "MIC BIAS1 Internal1"}, {"MIC BIAS1 Internal1", NULL, "Handset Mic"}, {"AMIC2", NULL, "MIC BIAS2 External"}, {"MIC BIAS2 External", NULL, "Headset Mic"}, /** * AMIC3 and AMIC4 inputs are connected to ANC microphones * These mics are biased differently on CDP and FLUID * routing entries below are based on bias arrangement * on FLUID. */ {"AMIC3", NULL, "MIC BIAS3 Internal1"}, {"MIC BIAS3 Internal1", NULL, "ANCRight Headset Mic"}, {"AMIC4", NULL, "MIC BIAS1 Internal2"}, {"MIC BIAS1 Internal2", NULL, "ANCLeft Headset Mic"}, {"HEADPHONE", NULL, "LDO_H"}, }; static const char *spk_function[] = {"Off", "On"}; static const char *slim0_rx_ch_text[] = {"One", "Two"}; static const char *slim0_tx_ch_text[] = {"One", "Two", "Three", "Four"}; static const char * const hdmi_rx_ch_text[] = {"Two", "Three", "Four", "Five", "Six", "Seven", "Eight"}; static const char * const mi2s_rate[] = {"Default", "Variable"}; static const char * const hdmi_rate[] = {"Default", "Variable"}; static const struct soc_enum msm_enum[] = { SOC_ENUM_SINGLE_EXT(2, spk_function), SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text), SOC_ENUM_SINGLE_EXT(4, slim0_tx_ch_text), SOC_ENUM_SINGLE_EXT(7, hdmi_rx_ch_text), SOC_ENUM_SINGLE_EXT(2, mi2s_rate), SOC_ENUM_SINGLE_EXT(2, hdmi_rate), }; static int msm_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__, msm_slim_0_rx_ch); ucontrol->value.integer.value[0] = msm_slim_0_rx_ch - 1; return 0; } static int msm_slim_0_rx_ch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { msm_slim_0_rx_ch = ucontrol->value.integer.value[0] + 1; pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__, msm_slim_0_rx_ch); return 1; } static int msm_slim_0_tx_ch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__, msm_slim_0_tx_ch); ucontrol->value.integer.value[0] = msm_slim_0_tx_ch - 1; return 0; } static int msm_slim_0_tx_ch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { msm_slim_0_tx_ch = ucontrol->value.integer.value[0] + 1; pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__, msm_slim_0_tx_ch); return 1; } static int msm_hdmi_rx_ch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: msm_hdmi_rx_ch = %d\n", __func__, msm_hdmi_rx_ch); ucontrol->value.integer.value[0] = msm_hdmi_rx_ch - 2; return 0; } static int msm_hdmi_rx_ch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { msm_hdmi_rx_ch = ucontrol->value.integer.value[0] + 2; pr_debug("%s: msm_hdmi_rx_ch = %d\n", __func__, msm_hdmi_rx_ch); return 1; } static int msm_mi2s_rate_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { mi2s_rate_variable = ucontrol->value.integer.value[0]; pr_debug("%s: mi2s_rate_variable = %d\n", __func__, mi2s_rate_variable); return 0; } static int msm_mi2s_rate_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = mi2s_rate_variable; return 0; } static int msm_hdmi_rate_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { hdmi_rate_variable = ucontrol->value.integer.value[0]; pr_debug("%s: hdmi_rate_variable = %d\n", __func__, hdmi_rate_variable); return 0; } static int msm_hdmi_rate_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = hdmi_rate_variable; return 0; } static const struct snd_kcontrol_new tabla_msm_controls[] = { SOC_ENUM_EXT("Speaker Function", msm_enum[0], msm_get_spk, msm_set_spk), SOC_ENUM_EXT("SLIM_0_RX Channels", msm_enum[1], msm_slim_0_rx_ch_get, msm_slim_0_rx_ch_put), SOC_ENUM_EXT("SLIM_0_TX Channels", msm_enum[2], msm_slim_0_tx_ch_get, msm_slim_0_tx_ch_put), SOC_ENUM_EXT("HDMI_RX Channels", msm_enum[3], msm_hdmi_rx_ch_get, msm_hdmi_rx_ch_put), SOC_ENUM_EXT("SEC RX Rate", msm_enum[4], msm_mi2s_rate_get, msm_mi2s_rate_put), SOC_ENUM_EXT("HDMI RX Rate", msm_enum[5], msm_hdmi_rate_get, msm_hdmi_rate_put), }; static void *def_tabla_mbhc_cal(void) { void *tabla_cal; struct tabla_mbhc_btn_detect_cfg *btn_cfg; u16 *btn_low, *btn_high; u8 *n_ready, *n_cic, *gain; tabla_cal = kzalloc(TABLA_MBHC_CAL_SIZE(TABLA_MBHC_DEF_BUTTONS, TABLA_MBHC_DEF_RLOADS), GFP_KERNEL); if (!tabla_cal) { pr_err("%s: out of memory\n", __func__); return NULL; } #define S(X, Y) ((TABLA_MBHC_CAL_GENERAL_PTR(tabla_cal)->X) = (Y)) S(t_ldoh, 100); S(t_bg_fast_settle, 100); S(t_shutdown_plug_rem, 255); S(mbhc_nsa, 4); S(mbhc_navg, 4); #undef S #define S(X, Y) ((TABLA_MBHC_CAL_PLUG_DET_PTR(tabla_cal)->X) = (Y)) S(mic_current, TABLA_PID_MIC_5_UA); S(hph_current, TABLA_PID_MIC_5_UA); S(t_mic_pid, 100); S(t_ins_complete, 250); S(t_ins_retry, 200); #undef S #define S(X, Y) ((TABLA_MBHC_CAL_PLUG_TYPE_PTR(tabla_cal)->X) = (Y)) S(v_no_mic, 30); S(v_hs_max, 2400); #undef S #define S(X, Y) ((TABLA_MBHC_CAL_BTN_DET_PTR(tabla_cal)->X) = (Y)) S(c[0], 62); S(c[1], 124); S(nc, 1); S(n_meas, 3); S(mbhc_nsc, 11); S(n_btn_meas, 1); S(n_btn_con, 2); S(num_btn, TABLA_MBHC_DEF_BUTTONS); S(v_btn_press_delta_sta, 100); S(v_btn_press_delta_cic, 50); #undef S btn_cfg = TABLA_MBHC_CAL_BTN_DET_PTR(tabla_cal); btn_low = tabla_mbhc_cal_btn_det_mp(btn_cfg, TABLA_BTN_DET_V_BTN_LOW); btn_high = tabla_mbhc_cal_btn_det_mp(btn_cfg, TABLA_BTN_DET_V_BTN_HIGH); btn_low[0] = -50; btn_high[0] = 20; btn_low[1] = 21; btn_high[1] = 62; btn_low[2] = 62; btn_high[2] = 104; btn_low[3] = 105; btn_high[3] = 143; btn_low[4] = 144; btn_high[4] = 181; btn_low[5] = 182; btn_high[5] = 218; btn_low[6] = 219; btn_high[6] = 254; btn_low[7] = 255; btn_high[7] = 330; n_ready = tabla_mbhc_cal_btn_det_mp(btn_cfg, TABLA_BTN_DET_N_READY); n_ready[0] = 80; n_ready[1] = 68; n_cic = tabla_mbhc_cal_btn_det_mp(btn_cfg, TABLA_BTN_DET_N_CIC); n_cic[0] = 60; n_cic[1] = 47; gain = tabla_mbhc_cal_btn_det_mp(btn_cfg, TABLA_BTN_DET_GAIN); gain[0] = 11; gain[1] = 9; return tabla_cal; } static int msm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret = 0; unsigned int rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS]; unsigned int rx_ch_cnt = 0, tx_ch_cnt = 0; pr_debug("%s: ch=%d\n", __func__, msm_slim_0_rx_ch); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ret = snd_soc_dai_get_channel_map(codec_dai, &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch); if (ret < 0) { pr_err("%s: failed to get codec chan map\n", __func__); goto end; } ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0, msm_slim_0_rx_ch, rx_ch); if (ret < 0) { pr_err("%s: failed to set cpu chan map\n", __func__); goto end; } } else { ret = snd_soc_dai_get_channel_map(codec_dai, &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch); if (ret < 0) { pr_err("%s: failed to get codec chan map\n", __func__); goto end; } ret = snd_soc_dai_set_channel_map(cpu_dai, msm_slim_0_tx_ch, tx_ch, 0 , 0); if (ret < 0) { pr_err("%s: failed to set cpu chan map\n", __func__); goto end; } } end: return ret; } static int mpq_dtv_amp_power_up(void) { int ret; pr_debug("%s()\n", __func__); ret = gpio_request(SX150X_GPIO(1, 14), "DTV AMP Sleep"); if (ret) { pr_err("%s: DTV AMP Sleep GPIO request returns %d\n", __func__, ret); return ret; } ret = gpio_direction_output(SX150X_GPIO(1, 14), 0); if (ret) { pr_err("%s: DTV AMP Sleep GPIO set output returns %d\n", __func__, ret); return ret; } ret = gpio_request(SX150X_GPIO(1, 13), "DTV AMP Mute"); if (ret) { pr_err("%s: DTV AMP Mute GPIO request returns %d\n", __func__, ret); return ret; } ret = gpio_direction_output(SX150X_GPIO(1, 13), 0); if (ret) { pr_err("%s: DTV AMP Mute GPIO set output returns %d\n", __func__, ret); return ret; } return ret; } static int mpq_dtv_amp_power_down(void) { int ret; pr_debug("%s()\n", __func__); ret = gpio_direction_output(SX150X_GPIO(1, 14), 1); if (ret) { pr_err("%s: DTV AMP Sleep GPIO set output failed\n", __func__); return ret; } gpio_free(SX150X_GPIO(1, 14)); ret = gpio_direction_output(SX150X_GPIO(1, 13), 1); if (ret) { pr_err("%s: DTV AMP Mute GPIO set output failed\n", __func__); return ret; } gpio_free(SX150X_GPIO(1, 13)); return ret; } static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd) { int err; struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dai *codec_dai = rtd->codec_dai; unsigned int rx_ch[TABLA_RX_MAX] = {138, 139, 140, 141, 142, 143, 144}; unsigned int tx_ch[TABLA_TX_MAX] = {128, 129, 130, 131, 132, 133, 134, 135, 136, 137}; pr_debug("%s(), dev_name%s\n", __func__, dev_name(cpu_dai->dev)); snd_soc_dapm_new_controls(dapm, msm_dapm_widgets, ARRAY_SIZE(msm_dapm_widgets)); snd_soc_dapm_add_routes(dapm, common_audio_map, ARRAY_SIZE(common_audio_map)); snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg"); snd_soc_dapm_sync(dapm); err = snd_soc_jack_new(codec, "Headset Jack", (SND_JACK_HEADSET | SND_JACK_OC_HPHL | SND_JACK_OC_HPHR), &hs_jack); if (err) { pr_err("failed to create new jack\n"); return err; } err = snd_soc_jack_new(codec, "Button Jack", TABLA_JACK_BUTTON_MASK, &button_jack); if (err) { pr_err("failed to create new jack\n"); return err; } codec_clk = clk_get(cpu_dai->dev, "osr_clk"); err = tabla_hs_detect(codec, &mbhc_cfg); snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch), tx_ch, ARRAY_SIZE(rx_ch), rx_ch); if (detect_dtv_platform) { err = gpio_request(SX150X_GPIO(1, 11), "DTV AMP Gain0"); if (err) { pr_err("%s: DTV AMP Gain0 request returns %d\n", __func__, err); return err; } err = gpio_direction_output(SX150X_GPIO(1, 11), 0); if (err) { pr_err("%s: DTV AMP Gain0 set output returns %d\n", __func__, err); return err; } gpio_free(SX150X_GPIO(1, 11)); err = gpio_request(SX150X_GPIO(1, 12), "DTV AMP Gain1"); if (err) { pr_err("%s: DTV AMP Gain0 request returns %d\n", __func__, err); return err; } err = gpio_direction_output(SX150X_GPIO(1, 12), 0); if (err) { pr_err("%s: DTV AMP Gain1 set output returns %d\n", __func__, err); return err; } gpio_free(SX150X_GPIO(1, 12)); err = gpio_request(SX150X_GPIO(1, 15), "DTV AMP Status"); if (err) { pr_err("%s: DTV AMP Status request returns %d\n", __func__, err); return err; } err = gpio_direction_input(SX150X_GPIO(1, 15)); if (err) { pr_err("%s: DTV AMP Status set output returns %d\n", __func__, err); return err; } err = mpq_dtv_amp_power_down(); if (err) { pr_err("%s: DTV AMP Status set output returns %d\n", __func__, err); return err; } } return err; } static int msm_slim_0_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); pr_debug("%s()\n", __func__); rate->min = rate->max = 48000; channels->min = channels->max = msm_slim_0_rx_ch; return 0; } static int msm_slim_0_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); pr_debug("%s()\n", __func__); rate->min = rate->max = 48000; channels->min = channels->max = msm_slim_0_tx_ch; return 0; } static int mpq8064_proxy_be_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); pr_debug("%s ()\n", __func__); rate->min = rate->max = 48000; channels->min = channels->max = 2; return 0; } static int msm_be_i2s_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); pr_debug("%s mi2s_rate_variable = %d\n", __func__, mi2s_rate_variable); /*Configure the sample rate as 48000 KHz for the LPCM playback*/ if (!mi2s_rate_variable) rate->min = rate->max = 48000; channels->min = channels->max = 2; return 0; } static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); pr_debug("%s()\n", __func__); rate->min = rate->max = 48000; return 0; } static int msm_hdmi_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); pr_debug("%s channels->min %u channels->max %u ()\n", __func__, channels->min, channels->max); /*Configure the sample rate as 48000 KHz for the LPCM playback*/ if (!hdmi_rate_variable) rate->min = rate->max = 48000; channels->min = channels->max = msm_hdmi_rx_ch; return 0; } static int msm_mi2s_free_gpios(void) { int i; for (i = 0; i < ARRAY_SIZE(mi2s_gpio); i++) gpio_free(mi2s_gpio[i].gpio_no); return 0; } static void msm_mi2s_shutdown(struct snd_pcm_substream *substream) { if (mi2s_bit_clk) { clk_disable_unprepare(mi2s_bit_clk); clk_put(mi2s_bit_clk); mi2s_bit_clk = NULL; } } static int configure_mi2s_gpio(void) { int rtn; int i; int j; for (i = 0; i < ARRAY_SIZE(mi2s_gpio); i++) { rtn = gpio_request(mi2s_gpio[i].gpio_no, mi2s_gpio[i].gpio_name); pr_debug("%s: gpio = %d, gpio name = %s, rtn = %d\n", __func__, mi2s_gpio[i].gpio_no, mi2s_gpio[i].gpio_name, rtn); if (rtn) { pr_err("%s: Failed to request gpio %d\n", __func__, mi2s_gpio[i].gpio_no); for (j = i; j >= 0; j--) gpio_free(mi2s_gpio[j].gpio_no); goto err; } } err: return rtn; } static int msm_mi2s_startup(struct snd_pcm_substream *substream) { int ret = 0; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; mi2s_bit_clk = clk_get(cpu_dai->dev, "bit_clk"); if (IS_ERR(mi2s_bit_clk)) return PTR_ERR(mi2s_bit_clk); clk_set_rate(mi2s_bit_clk, 0); ret = clk_prepare_enable(mi2s_bit_clk); if (IS_ERR_VALUE(ret)) { pr_err("Unable to enable mi2s_bit_clk\n"); clk_put(mi2s_bit_clk); return ret; } ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBM_CFM); if (IS_ERR_VALUE(ret)) pr_err("set format for CPU dai failed\n"); return ret; } static int mpq8064_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); /* PCM only supports mono output with 8khz sample rate */ rate->min = rate->max = 8000; channels->min = channels->max = 1; return 0; } static int mpq8064_aux_pcm_get_gpios(void) { int ret = 0; pr_debug("%s\n", __func__); ret = gpio_request(GPIO_AUX_PCM_DOUT, "AUX PCM DOUT"); if (ret < 0) { pr_err("%s: Failed to request gpio(%d): AUX PCM DOUT", __func__, GPIO_AUX_PCM_DOUT); goto fail_dout; } ret = gpio_request(GPIO_AUX_PCM_DIN, "AUX PCM DIN"); if (ret < 0) { pr_err("%s: Failed to request gpio(%d): AUX PCM DIN", __func__, GPIO_AUX_PCM_DIN); goto fail_din; } ret = gpio_request(GPIO_AUX_PCM_SYNC, "AUX PCM SYNC"); if (ret < 0) { pr_err("%s: Failed to request gpio(%d): AUX PCM SYNC", __func__, GPIO_AUX_PCM_SYNC); goto fail_sync; } ret = gpio_request(GPIO_AUX_PCM_CLK, "AUX PCM CLK"); if (ret < 0) { pr_err("%s: Failed to request gpio(%d): AUX PCM CLK", __func__, GPIO_AUX_PCM_CLK); goto fail_clk; } return 0; fail_clk: gpio_free(GPIO_AUX_PCM_SYNC); fail_sync: gpio_free(GPIO_AUX_PCM_DIN); fail_din: gpio_free(GPIO_AUX_PCM_DOUT); fail_dout: return ret; } static int mpq8064_aux_pcm_free_gpios(void) { gpio_free(GPIO_AUX_PCM_DIN); gpio_free(GPIO_AUX_PCM_DOUT); gpio_free(GPIO_AUX_PCM_SYNC); gpio_free(GPIO_AUX_PCM_CLK); return 0; } static int msm_startup(struct snd_pcm_substream *substream) { pr_debug("%s(): substream = %s stream = %d\n", __func__, substream->name, substream->stream); if (detect_dtv_platform) mpq_dtv_amp_power_up(); return 0; } static void msm_shutdown(struct snd_pcm_substream *substream) { pr_debug("%s(): substream = %s stream = %d\n", __func__, substream->name, substream->stream); if (detect_dtv_platform) mpq_dtv_amp_power_down(); } static int mpq8064_auxpcm_startup(struct snd_pcm_substream *substream) { int ret = 0; pr_debug("%s(): substream = %s\n", __func__, substream->name); ret = mpq8064_aux_pcm_get_gpios(); if (ret < 0) { pr_err("%s: Aux PCM GPIO request failed\n", __func__); return -EINVAL; } return 0; } static void mpq8064_auxpcm_shutdown(struct snd_pcm_substream *substream) { pr_debug("%s(): substream = %s\n", __func__, substream->name); mpq8064_aux_pcm_free_gpios(); } static struct snd_soc_ops msm_be_ops = { .startup = msm_startup, .hw_params = msm_hw_params, .shutdown = msm_shutdown, }; static struct snd_soc_ops mpq8064_auxpcm_be_ops = { .startup = mpq8064_auxpcm_startup, .shutdown = mpq8064_auxpcm_shutdown, }; static int mpq8064_sec_i2s_rx_free_gpios(void) { int i; for (i = 0; i < ARRAY_SIZE(sec_i2s_rx_gpio); i++) gpio_free(sec_i2s_rx_gpio[i].gpio_no); return 0; } static int mpq8064_sec_i2s_rx_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { int rate = params_rate(params); int bit_clk_set = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: bit_clk_set = I2S_MCLK_RATE/(rate * 2 * 16); clk_set_rate(sec_i2s_rx_bit_clk, bit_clk_set); break; case SNDRV_PCM_FORMAT_S24_LE: bit_clk_set = I2S_MCLK_RATE/(rate * 2 * 24); clk_set_rate(sec_i2s_rx_bit_clk, bit_clk_set); break; default: pr_err("wrong format\n"); break; } } return 0; } static void mpq8064_sec_i2s_rx_shutdown(struct snd_pcm_substream *substream) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (sec_i2s_rx_bit_clk) { clk_disable_unprepare(sec_i2s_rx_bit_clk); clk_put(sec_i2s_rx_bit_clk); sec_i2s_rx_bit_clk = NULL; } if (sec_i2s_rx_osr_clk) { clk_disable_unprepare(sec_i2s_rx_osr_clk); clk_put(sec_i2s_rx_osr_clk); sec_i2s_rx_osr_clk = NULL; } } pr_info("%s(): substream = %s stream = %d\n", __func__, substream->name, substream->stream); } static int configure_sec_i2s_rx_gpio(void) { int rtn; int i; int j; for (i = 0; i < ARRAY_SIZE(sec_i2s_rx_gpio); i++) { rtn = gpio_request(sec_i2s_rx_gpio[i].gpio_no, sec_i2s_rx_gpio[i].gpio_name); pr_debug("%s: gpio = %d, gpio name = %s, rtn = %d\n", __func__, sec_i2s_rx_gpio[i].gpio_no, sec_i2s_rx_gpio[i].gpio_name, rtn); if (rtn) { pr_err("%s: Failed to request gpio %d\n", __func__, sec_i2s_rx_gpio[i].gpio_no); for (j = i; j >= 0; j--) gpio_free(sec_i2s_rx_gpio[j].gpio_no); goto err; } } err: return rtn; } static int mpq8064_sec_i2s_rx_startup(struct snd_pcm_substream *substream) { int ret = 0; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { sec_i2s_rx_osr_clk = clk_get(cpu_dai->dev, "osr_clk"); if (IS_ERR(sec_i2s_rx_osr_clk)) { pr_err("Failed to get sec_i2s_rx_osr_clk\n"); return PTR_ERR(sec_i2s_rx_osr_clk); } clk_set_rate(sec_i2s_rx_osr_clk, I2S_MCLK_RATE); clk_prepare_enable(sec_i2s_rx_osr_clk); sec_i2s_rx_bit_clk = clk_get(cpu_dai->dev, "bit_clk"); if (IS_ERR(sec_i2s_rx_bit_clk)) { pr_err("Failed to get sec i2s osr_clk\n"); clk_disable_unprepare(sec_i2s_rx_osr_clk); clk_put(sec_i2s_rx_osr_clk); return PTR_ERR(sec_i2s_rx_bit_clk); } clk_set_rate(sec_i2s_rx_bit_clk, 1); ret = clk_prepare_enable(sec_i2s_rx_bit_clk); if (ret != 0) { pr_err("Unable to enable sec i2s rx_bit_clk\n"); clk_put(sec_i2s_rx_bit_clk); clk_disable_unprepare(sec_i2s_rx_osr_clk); clk_put(sec_i2s_rx_osr_clk); return ret; } ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) pr_err("set format for codec dai failed\n"); } pr_debug("%s: ret = %d\n", __func__, ret); pr_info("%s(): substream = %s stream = %d\n", __func__, substream->name, substream->stream); return ret; } static struct snd_soc_ops mpq8064_sec_i2s_rx_be_ops = { .startup = mpq8064_sec_i2s_rx_startup, .shutdown = mpq8064_sec_i2s_rx_shutdown, .hw_params = mpq8064_sec_i2s_rx_hw_params, }; static struct snd_soc_ops msm_mi2s_tx_be_ops = { .startup = msm_mi2s_startup, .shutdown = msm_mi2s_shutdown, }; /* Digital audio interface glue - connects codec <---> CPU */ static struct snd_soc_dai_link msm_dai[] = { /* FrontEnd DAI Links */ { .name = "MSM8960 Media1", .stream_name = "MultiMedia1", .cpu_dai_name = "MultiMedia1", .platform_name = "msm-pcm-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA1 }, { .name = "MSM8960 Media2", .stream_name = "MultiMedia2", .cpu_dai_name = "MultiMedia2", .platform_name = "msm-multi-ch-pcm-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA2, }, { .name = "Circuit-Switch Voice", .stream_name = "CS-Voice", .cpu_dai_name = "CS-VOICE", .platform_name = "msm-pcm-voice", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .be_id = MSM_FRONTEND_DAI_CS_VOICE, }, { .name = "MSM VoIP", .stream_name = "VoIP", .cpu_dai_name = "VoIP", .platform_name = "msm-voip-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .be_id = MSM_FRONTEND_DAI_VOIP, }, { .name = "MSM8960 Media3", .stream_name = "MultiMedia3", .cpu_dai_name = "MultiMedia3", .platform_name = "msm-multi-ch-pcm-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA3, }, /* Hostless PMC purpose */ { .name = "SLIMBUS_0 Hostless", .stream_name = "SLIMBUS_0 Hostless", .cpu_dai_name = "SLIMBUS0_HOSTLESS", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, { .name = "INT_FM Hostless", .stream_name = "INT_FM Hostless", .cpu_dai_name = "INT_FM_HOSTLESS", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, { .name = "MSM AFE-PCM RX", .stream_name = "AFE-PROXY RX", .cpu_dai_name = "msm-dai-q6.241", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .platform_name = "msm-pcm-afe", .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ }, { .name = "MSM AFE-PCM TX", .stream_name = "AFE-PROXY TX", .cpu_dai_name = "msm-dai-q6.240", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .platform_name = "msm-pcm-afe", .ignore_suspend = 1, }, { .name = "MSM8960 Compr1", .stream_name = "COMPR1", .cpu_dai_name = "MultiMedia4", .platform_name = "msm-compr-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA4, }, { .name = "Voice Stub", .stream_name = "Voice Stub", .cpu_dai_name = "VOICE_STUB", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, /* HDMI Hostless */ { .name = "HDMI_RX_HOSTLESS", .stream_name = "HDMI_RX_HOSTLESS", .cpu_dai_name = "HDMI_HOSTLESS", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, /* MI2S TX Hostless */ { .name = "MI2S_TX Hostless", .stream_name = "MI2S_TX Hostless", .cpu_dai_name = "MI2S_TX_HOSTLESS", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, /* Secondary I2S RX Hostless */ { .name = "SEC_I2S_RX Hostless", .stream_name = "SEC_I2S_RX Hostless", .cpu_dai_name = "SEC_I2S_RX_HOSTLESS", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dainlink has playback support */ .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, { .name = "MSM8960 Media5", .stream_name = "MultiMedia5", .cpu_dai_name = "MultiMedia5", .platform_name = "msm-multi-ch-pcm-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dailink has playback support */ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA5 }, { .name = "MSM8960 Media6", .stream_name = "MultiMedia6", .cpu_dai_name = "MultiMedia6", .platform_name = "msm-multi-ch-pcm-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dailink has playback support */ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA6 }, { .name = "MSM8960 Compr2", .stream_name = "COMPR2", .cpu_dai_name = "MultiMedia7", .platform_name = "msm-compr-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dailink has playback support */ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA7, }, { .name = "MSM8960 Compr3", .stream_name = "COMPR3", .cpu_dai_name = "MultiMedia8", .platform_name = "msm-compr-dsp", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dailink has playback support */ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA8, }, { .name = "AUXPCM Hostless", .stream_name = "AUXPCM Hostless", .cpu_dai_name = "AUXPCM_HOSTLESS", .platform_name = "msm-pcm-hostless", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_suspend = 1, .ignore_pmdown_time = 1, /* dainlink has playback support */ .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, { .name = "MSM8960 Pseudo", .stream_name = "Pseudo", .cpu_dai_name = "Pseudo", .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_PSEUDO, }, /* Backend DAI Links */ { .name = LPASS_BE_SLIMBUS_0_RX, .stream_name = "Slimbus Playback", .cpu_dai_name = "msm-dai-q6.16384", .platform_name = "msm-pcm-routing", .codec_name = "tabla_codec", .codec_dai_name = "tabla_rx1", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX, .init = &msm_audrx_init, .be_hw_params_fixup = msm_slim_0_rx_be_hw_params_fixup, .ops = &msm_be_ops, .ignore_pmdown_time = 1, /* this dainlink has playback support */ }, { .name = LPASS_BE_SLIMBUS_0_TX, .stream_name = "Slimbus Capture", .cpu_dai_name = "msm-dai-q6.16385", .platform_name = "msm-pcm-routing", .codec_name = "tabla_codec", .codec_dai_name = "tabla_tx1", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX, .be_hw_params_fixup = msm_slim_0_tx_be_hw_params_fixup, .ops = &msm_be_ops, }, { .name = LPASS_BE_SEC_I2S_RX, .stream_name = "Secondary I2S Playback", .cpu_dai_name = "msm-dai-q6.4", .platform_name = "msm-pcm-routing", .codec_name = "cs8427-spdif.5-0014", .codec_dai_name = "spdif_rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_SEC_I2S_RX, .be_hw_params_fixup = msm_be_i2s_hw_params_fixup, .ops = &mpq8064_sec_i2s_rx_be_ops, .ignore_pmdown_time = 1, /* this dainlink has playback support */ }, { .name = LPASS_BE_INT_FM_RX, .stream_name = "Internal FM Playback", .cpu_dai_name = "msm-dai-q6.12292", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_INT_FM_RX, .be_hw_params_fixup = msm_be_hw_params_fixup, .ignore_pmdown_time = 1, /* this dainlink has playback support */ }, { .name = LPASS_BE_INT_FM_TX, .stream_name = "Internal FM Capture", .cpu_dai_name = "msm-dai-q6.12293", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_INT_FM_TX, .be_hw_params_fixup = msm_be_hw_params_fixup, }, /* HDMI BACK END DAI Link */ { .name = LPASS_BE_HDMI, .stream_name = "HDMI Playback", .cpu_dai_name = "msm-dai-q6-hdmi.8", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_HDMI_RX, .be_hw_params_fixup = msm_hdmi_be_hw_params_fixup, .ignore_pmdown_time = 1, /* this dainlink has playback support */ }, { .name = LPASS_BE_MI2S_TX, .stream_name = "MI2S Capture", .cpu_dai_name = "msm-dai-q6-mi2s", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_MI2S_TX, .be_hw_params_fixup = msm_be_hw_params_fixup, .ops = &msm_mi2s_tx_be_ops, }, /* Backend AFE DAI Links */ { .name = LPASS_BE_AFE_PCM_RX, .stream_name = "AFE Playback", .cpu_dai_name = "msm-dai-q6.224", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_AFE_PCM_RX, .be_hw_params_fixup = mpq8064_proxy_be_params_fixup, .ignore_pmdown_time = 1, /* this dainlink has playback support */ }, { .name = LPASS_BE_AFE_PCM_TX, .stream_name = "AFE Capture", .cpu_dai_name = "msm-dai-q6.225", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .be_hw_params_fixup = mpq8064_proxy_be_params_fixup, .be_id = MSM_BACKEND_DAI_AFE_PCM_TX, }, /* AUX PCM Backend DAI Links */ { .name = LPASS_BE_AUXPCM_RX, .stream_name = "AUX PCM Playback", .cpu_dai_name = "msm-dai-q6.2", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_AUXPCM_RX, .be_hw_params_fixup = mpq8064_auxpcm_be_params_fixup, .ops = &mpq8064_auxpcm_be_ops, .ignore_pmdown_time = 1, }, { .name = LPASS_BE_AUXPCM_TX, .stream_name = "AUX PCM Capture", .cpu_dai_name = "msm-dai-q6.3", .platform_name = "msm-pcm-routing", .codec_name = "msm-stub-codec.1", .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_AUXPCM_TX, .be_hw_params_fixup = mpq8064_auxpcm_be_params_fixup, }, { .name = LPASS_BE_PSEUDO, .stream_name = "PSEUDO Playback", .cpu_dai_name = "msm-dai-q6.32769", .platform_name = "msm-pcm-routing", .codec_name = "snd-soc-dummy", .codec_dai_name = "snd-soc-dummy-dai", .no_pcm = 1, .be_id = MSM_BACKEND_DAI_PSEUDO_PORT, .be_hw_params_fixup = msm_be_hw_params_fixup, .ignore_pmdown_time = 1, }, }; static struct snd_soc_card snd_soc_card_msm = { .name = "mpq8064-tabla-snd-card", .dai_link = msm_dai, .num_links = ARRAY_SIZE(msm_dai), .controls = tabla_msm_controls, .num_controls = ARRAY_SIZE(tabla_msm_controls), }; static struct platform_device *msm_snd_device; static int __init msm_audio_init(void) { int ret; if (socinfo_get_id() != 130) { pr_err("%s: Not the right machine type\n", __func__); return -ENODEV; } if (socinfo_get_pmic_model() == PMIC_MODEL_PM8917) bottom_spk_pamp_gpio = PM8921_GPIO_PM_TO_SYS(16); if (machine_is_mpq8064_dtv()) detect_dtv_platform = 1; pr_info("MPQ8064: detect_dtv_platform is %d\n", detect_dtv_platform); mbhc_cfg.calibration = def_tabla_mbhc_cal(); if (!mbhc_cfg.calibration) { pr_err("Calibration data allocation failed\n"); return -ENOMEM; } msm_snd_device = platform_device_alloc("soc-audio", 0); if (!msm_snd_device) { pr_err("Platform device allocation failed\n"); kfree(mbhc_cfg.calibration); return -ENOMEM; } platform_set_drvdata(msm_snd_device, &snd_soc_card_msm); ret = platform_device_add(msm_snd_device); if (ret) { platform_device_put(msm_snd_device); kfree(mbhc_cfg.calibration); return ret; } configure_sec_i2s_rx_gpio(); configure_mi2s_gpio(); return ret; } module_init(msm_audio_init); static void __exit msm_audio_exit(void) { if (socinfo_get_id() != 130) { pr_err("%s: Not the right machine type\n", __func__); return ; } mpq8064_sec_i2s_rx_free_gpios(); msm_mi2s_free_gpios(); platform_device_unregister(msm_snd_device); kfree(mbhc_cfg.calibration); } module_exit(msm_audio_exit); MODULE_DESCRIPTION("ALSA SoC mpq8064"); MODULE_LICENSE("GPL v2");
gpl-2.0
djvoleur/test
drivers/net/ethernet/emulex/benet/be_roce.c
2160
4901
/* * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * * Contact Information: * linux-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ #include <linux/mutex.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/module.h> #include "be.h" #include "be_cmds.h" static struct ocrdma_driver *ocrdma_drv; static LIST_HEAD(be_adapter_list); static DEFINE_MUTEX(be_adapter_list_lock); static void _be_roce_dev_add(struct be_adapter *adapter) { struct be_dev_info dev_info; int i, num_vec; struct pci_dev *pdev = adapter->pdev; if (!ocrdma_drv) return; if (pdev->device == OC_DEVICE_ID5) { /* only msix is supported on these devices */ if (!msix_enabled(adapter)) return; /* DPP region address and length */ dev_info.dpp_unmapped_addr = pci_resource_start(pdev, 2); dev_info.dpp_unmapped_len = pci_resource_len(pdev, 2); } else { dev_info.dpp_unmapped_addr = 0; dev_info.dpp_unmapped_len = 0; } dev_info.pdev = adapter->pdev; dev_info.db = adapter->db; dev_info.unmapped_db = adapter->roce_db.io_addr; dev_info.db_page_size = adapter->roce_db.size; dev_info.db_total_size = adapter->roce_db.total_size; dev_info.netdev = adapter->netdev; memcpy(dev_info.mac_addr, adapter->netdev->dev_addr, ETH_ALEN); dev_info.dev_family = adapter->sli_family; if (msix_enabled(adapter)) { /* provide all the vectors, so that EQ creation response * can decide which one to use. */ num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec; dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX; dev_info.msix.num_vectors = min(num_vec, MAX_ROCE_MSIX_VECTORS); /* provide start index of the vector, * so in case of linear usage, * it can use the base as starting point. */ dev_info.msix.start_vector = adapter->num_evt_qs; for (i = 0; i < dev_info.msix.num_vectors; i++) { dev_info.msix.vector_list[i] = adapter->msix_entries[i].vector; } } else { dev_info.msix.num_vectors = 0; dev_info.intr_mode = BE_INTERRUPT_MODE_INTX; } adapter->ocrdma_dev = ocrdma_drv->add(&dev_info); } void be_roce_dev_add(struct be_adapter *adapter) { if (be_roce_supported(adapter)) { INIT_LIST_HEAD(&adapter->entry); mutex_lock(&be_adapter_list_lock); list_add_tail(&adapter->entry, &be_adapter_list); /* invoke add() routine of roce driver only if * valid driver registered with add method and add() is not yet * invoked on a given adapter. */ _be_roce_dev_add(adapter); mutex_unlock(&be_adapter_list_lock); } } void _be_roce_dev_remove(struct be_adapter *adapter) { if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev) ocrdma_drv->remove(adapter->ocrdma_dev); adapter->ocrdma_dev = NULL; } void be_roce_dev_remove(struct be_adapter *adapter) { if (be_roce_supported(adapter)) { mutex_lock(&be_adapter_list_lock); _be_roce_dev_remove(adapter); list_del(&adapter->entry); mutex_unlock(&be_adapter_list_lock); } } void _be_roce_dev_open(struct be_adapter *adapter) { if (ocrdma_drv && adapter->ocrdma_dev && ocrdma_drv->state_change_handler) ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 0); } void be_roce_dev_open(struct be_adapter *adapter) { if (be_roce_supported(adapter)) { mutex_lock(&be_adapter_list_lock); _be_roce_dev_open(adapter); mutex_unlock(&be_adapter_list_lock); } } void _be_roce_dev_close(struct be_adapter *adapter) { if (ocrdma_drv && adapter->ocrdma_dev && ocrdma_drv->state_change_handler) ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 1); } void be_roce_dev_close(struct be_adapter *adapter) { if (be_roce_supported(adapter)) { mutex_lock(&be_adapter_list_lock); _be_roce_dev_close(adapter); mutex_unlock(&be_adapter_list_lock); } } int be_roce_register_driver(struct ocrdma_driver *drv) { struct be_adapter *dev; mutex_lock(&be_adapter_list_lock); if (ocrdma_drv) { mutex_unlock(&be_adapter_list_lock); return -EINVAL; } ocrdma_drv = drv; list_for_each_entry(dev, &be_adapter_list, entry) { struct net_device *netdev; _be_roce_dev_add(dev); netdev = dev->netdev; if (netif_running(netdev) && netif_oper_up(netdev)) _be_roce_dev_open(dev); } mutex_unlock(&be_adapter_list_lock); return 0; } EXPORT_SYMBOL(be_roce_register_driver); void be_roce_unregister_driver(struct ocrdma_driver *drv) { struct be_adapter *dev; mutex_lock(&be_adapter_list_lock); list_for_each_entry(dev, &be_adapter_list, entry) { if (dev->ocrdma_dev) _be_roce_dev_remove(dev); } ocrdma_drv = NULL; mutex_unlock(&be_adapter_list_lock); } EXPORT_SYMBOL(be_roce_unregister_driver);
gpl-2.0
morogoku/MoRoKernel-I9300-4.4.4
drivers/ssb/sprom.c
2416
5355
/* * Sonics Silicon Backplane * Common SPROM support routines * * Copyright (C) 2005-2008 Michael Buesch <mb@bu3sch.de> * Copyright (C) 2005 Martin Langer <martin-langer@gmx.de> * Copyright (C) 2005 Stefano Brivio <st3@riseup.net> * Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org> * Copyright (C) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> * * Licensed under the GNU/GPL. See COPYING for details. */ #include "ssb_private.h" #include <linux/ctype.h> #include <linux/slab.h> static int(*get_fallback_sprom)(struct ssb_bus *dev, struct ssb_sprom *out); static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, size_t sprom_size_words) { int i, pos = 0; for (i = 0; i < sprom_size_words; i++) pos += snprintf(buf + pos, buf_len - pos - 1, "%04X", swab16(sprom[i]) & 0xFFFF); pos += snprintf(buf + pos, buf_len - pos - 1, "\n"); return pos + 1; } static int hex2sprom(u16 *sprom, const char *dump, size_t len, size_t sprom_size_words) { char c, tmp[5] = { 0 }; int err, cnt = 0; unsigned long parsed; /* Strip whitespace at the end. */ while (len) { c = dump[len - 1]; if (!isspace(c) && c != '\0') break; len--; } /* Length must match exactly. */ if (len != sprom_size_words * 4) return -EINVAL; while (cnt < sprom_size_words) { memcpy(tmp, dump, 4); dump += 4; err = strict_strtoul(tmp, 16, &parsed); if (err) return err; sprom[cnt++] = swab16((u16)parsed); } return 0; } /* Common sprom device-attribute show-handler */ ssize_t ssb_attr_sprom_show(struct ssb_bus *bus, char *buf, int (*sprom_read)(struct ssb_bus *bus, u16 *sprom)) { u16 *sprom; int err = -ENOMEM; ssize_t count = 0; size_t sprom_size_words = bus->sprom_size; sprom = kcalloc(sprom_size_words, sizeof(u16), GFP_KERNEL); if (!sprom) goto out; /* Use interruptible locking, as the SPROM write might * be holding the lock for several seconds. So allow userspace * to cancel operation. */ err = -ERESTARTSYS; if (mutex_lock_interruptible(&bus->sprom_mutex)) goto out_kfree; err = sprom_read(bus, sprom); mutex_unlock(&bus->sprom_mutex); if (!err) count = sprom2hex(sprom, buf, PAGE_SIZE, sprom_size_words); out_kfree: kfree(sprom); out: return err ? err : count; } /* Common sprom device-attribute store-handler */ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, const char *buf, size_t count, int (*sprom_check_crc)(const u16 *sprom, size_t size), int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)) { u16 *sprom; int res = 0, err = -ENOMEM; size_t sprom_size_words = bus->sprom_size; struct ssb_freeze_context freeze; sprom = kcalloc(bus->sprom_size, sizeof(u16), GFP_KERNEL); if (!sprom) goto out; err = hex2sprom(sprom, buf, count, sprom_size_words); if (err) { err = -EINVAL; goto out_kfree; } err = sprom_check_crc(sprom, sprom_size_words); if (err) { err = -EINVAL; goto out_kfree; } /* Use interruptible locking, as the SPROM write might * be holding the lock for several seconds. So allow userspace * to cancel operation. */ err = -ERESTARTSYS; if (mutex_lock_interruptible(&bus->sprom_mutex)) goto out_kfree; err = ssb_devices_freeze(bus, &freeze); if (err) { ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze all devices\n"); goto out_unlock; } res = sprom_write(bus, sprom); err = ssb_devices_thaw(&freeze); if (err) ssb_printk(KERN_ERR PFX "SPROM write: Could not thaw all devices\n"); out_unlock: mutex_unlock(&bus->sprom_mutex); out_kfree: kfree(sprom); out: if (res) return res; return err ? err : count; } /** * ssb_arch_register_fallback_sprom - Registers a method providing a * fallback SPROM if no SPROM is found. * * @sprom_callback: The callback function. * * With this function the architecture implementation may register a * callback handler which fills the SPROM data structure. The fallback is * only used for PCI based SSB devices, where no valid SPROM can be found * in the shadow registers. * * This function is useful for weird architectures that have a half-assed * SSB device hardwired to their PCI bus. * * Note that it does only work with PCI attached SSB devices. PCMCIA * devices currently don't use this fallback. * Architectures must provide the SPROM for native SSB devices anyway, so * the fallback also isn't used for native devices. * * This function is available for architecture code, only. So it is not * exported. */ int ssb_arch_register_fallback_sprom(int (*sprom_callback)(struct ssb_bus *bus, struct ssb_sprom *out)) { if (get_fallback_sprom) return -EEXIST; get_fallback_sprom = sprom_callback; return 0; } int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out) { if (!get_fallback_sprom) return -ENOENT; return get_fallback_sprom(bus, out); } /* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ bool ssb_is_sprom_available(struct ssb_bus *bus) { /* status register only exists on chipcomon rev >= 11 and we need check for >= 31 only */ /* this routine differs from specs as we do not access SPROM directly on PCMCIA */ if (bus->bustype == SSB_BUSTYPE_PCI && bus->chipco.dev && /* can be unavailable! */ bus->chipco.dev->id.revision >= 31) return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM; return true; }
gpl-2.0
junkTzu/kernel-MB860
net/rds/ib_rdma.c
2928
20420
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/rculist.h> #include "rds.h" #include "ib.h" #include "xlist.h" static DEFINE_PER_CPU(unsigned long, clean_list_grace); #define CLEAN_LIST_BUSY_BIT 0 /* * This is stored as mr->r_trans_private. */ struct rds_ib_mr { struct rds_ib_device *device; struct rds_ib_mr_pool *pool; struct ib_fmr *fmr; struct xlist_head xlist; /* unmap_list is for freeing */ struct list_head unmap_list; unsigned int remap_count; struct scatterlist *sg; unsigned int sg_len; u64 *dma; int sg_dma_len; }; /* * Our own little FMR pool */ struct rds_ib_mr_pool { struct mutex flush_lock; /* serialize fmr invalidate */ struct delayed_work flush_worker; /* flush worker */ atomic_t item_count; /* total # of MRs */ atomic_t dirty_count; /* # dirty of MRs */ struct xlist_head drop_list; /* MRs that have reached their max_maps limit */ struct xlist_head free_list; /* unused MRs */ struct xlist_head clean_list; /* global unused & unamapped MRs */ wait_queue_head_t flush_wait; atomic_t free_pinned; /* memory pinned by free MRs */ unsigned long max_items; unsigned long max_items_soft; unsigned long max_free_pinned; struct ib_fmr_attr fmr_attr; }; static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **); static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); static void rds_ib_mr_pool_flush_worker(struct work_struct *work); static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) { struct rds_ib_device *rds_ibdev; struct rds_ib_ipaddr *i_ipaddr; rcu_read_lock(); list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) { list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { if (i_ipaddr->ipaddr == ipaddr) { atomic_inc(&rds_ibdev->refcount); rcu_read_unlock(); return rds_ibdev; } } } rcu_read_unlock(); return NULL; } static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) { struct rds_ib_ipaddr *i_ipaddr; i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL); if (!i_ipaddr) return -ENOMEM; i_ipaddr->ipaddr = ipaddr; spin_lock_irq(&rds_ibdev->spinlock); list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list); spin_unlock_irq(&rds_ibdev->spinlock); return 0; } static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) { struct rds_ib_ipaddr *i_ipaddr; struct rds_ib_ipaddr *to_free = NULL; spin_lock_irq(&rds_ibdev->spinlock); list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { if (i_ipaddr->ipaddr == ipaddr) { list_del_rcu(&i_ipaddr->list); to_free = i_ipaddr; break; } } spin_unlock_irq(&rds_ibdev->spinlock); if (to_free) { synchronize_rcu(); kfree(to_free); } } int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) { struct rds_ib_device *rds_ibdev_old; rds_ibdev_old = rds_ib_get_device(ipaddr); if (rds_ibdev_old) { rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); rds_ib_dev_put(rds_ibdev_old); } return rds_ib_add_ipaddr(rds_ibdev, ipaddr); } void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) { struct rds_ib_connection *ic = conn->c_transport_data; /* conn was previously on the nodev_conns_list */ spin_lock_irq(&ib_nodev_conns_lock); BUG_ON(list_empty(&ib_nodev_conns)); BUG_ON(list_empty(&ic->ib_node)); list_del(&ic->ib_node); spin_lock(&rds_ibdev->spinlock); list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); spin_unlock(&rds_ibdev->spinlock); spin_unlock_irq(&ib_nodev_conns_lock); ic->rds_ibdev = rds_ibdev; atomic_inc(&rds_ibdev->refcount); } void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) { struct rds_ib_connection *ic = conn->c_transport_data; /* place conn on nodev_conns_list */ spin_lock(&ib_nodev_conns_lock); spin_lock_irq(&rds_ibdev->spinlock); BUG_ON(list_empty(&ic->ib_node)); list_del(&ic->ib_node); spin_unlock_irq(&rds_ibdev->spinlock); list_add_tail(&ic->ib_node, &ib_nodev_conns); spin_unlock(&ib_nodev_conns_lock); ic->rds_ibdev = NULL; rds_ib_dev_put(rds_ibdev); } void rds_ib_destroy_nodev_conns(void) { struct rds_ib_connection *ic, *_ic; LIST_HEAD(tmp_list); /* avoid calling conn_destroy with irqs off */ spin_lock_irq(&ib_nodev_conns_lock); list_splice(&ib_nodev_conns, &tmp_list); spin_unlock_irq(&ib_nodev_conns_lock); list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) rds_conn_destroy(ic->conn); } struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) { struct rds_ib_mr_pool *pool; pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return ERR_PTR(-ENOMEM); INIT_XLIST_HEAD(&pool->free_list); INIT_XLIST_HEAD(&pool->drop_list); INIT_XLIST_HEAD(&pool->clean_list); mutex_init(&pool->flush_lock); init_waitqueue_head(&pool->flush_wait); INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); pool->fmr_attr.max_pages = fmr_message_size; pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; pool->fmr_attr.page_shift = PAGE_SHIFT; pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; /* We never allow more than max_items MRs to be allocated. * When we exceed more than max_items_soft, we start freeing * items more aggressively. * Make sure that max_items > max_items_soft > max_items / 2 */ pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4; pool->max_items = rds_ibdev->max_fmrs; return pool; } void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) { struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; iinfo->rdma_mr_max = pool->max_items; iinfo->rdma_mr_size = pool->fmr_attr.max_pages; } void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) { cancel_delayed_work_sync(&pool->flush_worker); rds_ib_flush_mr_pool(pool, 1, NULL); WARN_ON(atomic_read(&pool->item_count)); WARN_ON(atomic_read(&pool->free_pinned)); kfree(pool); } static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl, struct rds_ib_mr **ibmr_ret) { struct xlist_head *ibmr_xl; ibmr_xl = xlist_del_head_fast(xl); *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist); } static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) { struct rds_ib_mr *ibmr = NULL; struct xlist_head *ret; unsigned long *flag; preempt_disable(); flag = &__get_cpu_var(clean_list_grace); set_bit(CLEAN_LIST_BUSY_BIT, flag); ret = xlist_del_head(&pool->clean_list); if (ret) ibmr = list_entry(ret, struct rds_ib_mr, xlist); clear_bit(CLEAN_LIST_BUSY_BIT, flag); preempt_enable(); return ibmr; } static inline void wait_clean_list_grace(void) { int cpu; unsigned long *flag; for_each_online_cpu(cpu) { flag = &per_cpu(clean_list_grace, cpu); while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) cpu_relax(); } } static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) { struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; struct rds_ib_mr *ibmr = NULL; int err = 0, iter = 0; if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) schedule_delayed_work(&pool->flush_worker, 10); while (1) { ibmr = rds_ib_reuse_fmr(pool); if (ibmr) return ibmr; /* No clean MRs - now we have the choice of either * allocating a fresh MR up to the limit imposed by the * driver, or flush any dirty unused MRs. * We try to avoid stalling in the send path if possible, * so we allocate as long as we're allowed to. * * We're fussy with enforcing the FMR limit, though. If the driver * tells us we can't use more than N fmrs, we shouldn't start * arguing with it */ if (atomic_inc_return(&pool->item_count) <= pool->max_items) break; atomic_dec(&pool->item_count); if (++iter > 2) { rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted); return ERR_PTR(-EAGAIN); } /* We do have some empty MRs. Flush them out. */ rds_ib_stats_inc(s_ib_rdma_mr_pool_wait); rds_ib_flush_mr_pool(pool, 0, &ibmr); if (ibmr) return ibmr; } ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev)); if (!ibmr) { err = -ENOMEM; goto out_no_cigar; } memset(ibmr, 0, sizeof(*ibmr)); ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE| IB_ACCESS_REMOTE_ATOMIC), &pool->fmr_attr); if (IS_ERR(ibmr->fmr)) { err = PTR_ERR(ibmr->fmr); ibmr->fmr = NULL; printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err); goto out_no_cigar; } rds_ib_stats_inc(s_ib_rdma_mr_alloc); return ibmr; out_no_cigar: if (ibmr) { if (ibmr->fmr) ib_dealloc_fmr(ibmr->fmr); kfree(ibmr); } atomic_dec(&pool->item_count); return ERR_PTR(err); } static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, struct scatterlist *sg, unsigned int nents) { struct ib_device *dev = rds_ibdev->dev; struct scatterlist *scat = sg; u64 io_addr = 0; u64 *dma_pages; u32 len; int page_cnt, sg_dma_len; int i, j; int ret; sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL); if (unlikely(!sg_dma_len)) { printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n"); return -EBUSY; } len = 0; page_cnt = 0; for (i = 0; i < sg_dma_len; ++i) { unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); if (dma_addr & ~PAGE_MASK) { if (i > 0) return -EINVAL; else ++page_cnt; } if ((dma_addr + dma_len) & ~PAGE_MASK) { if (i < sg_dma_len - 1) return -EINVAL; else ++page_cnt; } len += dma_len; } page_cnt += len >> PAGE_SHIFT; if (page_cnt > fmr_message_size) return -EINVAL; dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC, rdsibdev_to_node(rds_ibdev)); if (!dma_pages) return -ENOMEM; page_cnt = 0; for (i = 0; i < sg_dma_len; ++i) { unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); for (j = 0; j < dma_len; j += PAGE_SIZE) dma_pages[page_cnt++] = (dma_addr & PAGE_MASK) + j; } ret = ib_map_phys_fmr(ibmr->fmr, dma_pages, page_cnt, io_addr); if (ret) goto out; /* Success - we successfully remapped the MR, so we can * safely tear down the old mapping. */ rds_ib_teardown_mr(ibmr); ibmr->sg = scat; ibmr->sg_len = nents; ibmr->sg_dma_len = sg_dma_len; ibmr->remap_count++; rds_ib_stats_inc(s_ib_rdma_mr_used); ret = 0; out: kfree(dma_pages); return ret; } void rds_ib_sync_mr(void *trans_private, int direction) { struct rds_ib_mr *ibmr = trans_private; struct rds_ib_device *rds_ibdev = ibmr->device; switch (direction) { case DMA_FROM_DEVICE: ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, ibmr->sg_dma_len, DMA_BIDIRECTIONAL); break; case DMA_TO_DEVICE: ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, ibmr->sg_dma_len, DMA_BIDIRECTIONAL); break; } } static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) { struct rds_ib_device *rds_ibdev = ibmr->device; if (ibmr->sg_dma_len) { ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len, DMA_BIDIRECTIONAL); ibmr->sg_dma_len = 0; } /* Release the s/g list */ if (ibmr->sg_len) { unsigned int i; for (i = 0; i < ibmr->sg_len; ++i) { struct page *page = sg_page(&ibmr->sg[i]); /* FIXME we need a way to tell a r/w MR * from a r/o MR */ BUG_ON(irqs_disabled()); set_page_dirty(page); put_page(page); } kfree(ibmr->sg); ibmr->sg = NULL; ibmr->sg_len = 0; } } static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) { unsigned int pinned = ibmr->sg_len; __rds_ib_teardown_mr(ibmr); if (pinned) { struct rds_ib_device *rds_ibdev = ibmr->device; struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; atomic_sub(pinned, &pool->free_pinned); } } static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) { unsigned int item_count; item_count = atomic_read(&pool->item_count); if (free_all) return item_count; return 0; } /* * given an xlist of mrs, put them all into the list_head for more processing */ static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list) { struct rds_ib_mr *ibmr; struct xlist_head splice; struct xlist_head *cur; struct xlist_head *next; splice.next = NULL; xlist_splice(xlist, &splice); cur = splice.next; while (cur) { next = cur->next; ibmr = list_entry(cur, struct rds_ib_mr, xlist); list_add_tail(&ibmr->unmap_list, list); cur = next; } } /* * this takes a list head of mrs and turns it into an xlist of clusters. * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for * reuse. */ static void list_append_to_xlist(struct rds_ib_mr_pool *pool, struct list_head *list, struct xlist_head *xlist, struct xlist_head **tail_ret) { struct rds_ib_mr *ibmr; struct xlist_head *cur_mr = xlist; struct xlist_head *tail_mr = NULL; list_for_each_entry(ibmr, list, unmap_list) { tail_mr = &ibmr->xlist; tail_mr->next = NULL; cur_mr->next = tail_mr; cur_mr = tail_mr; } *tail_ret = tail_mr; } /* * Flush our pool of MRs. * At a minimum, all currently unused MRs are unmapped. * If the number of MRs allocated exceeds the limit, we also try * to free as many MRs as needed to get back to this limit. */ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **ibmr_ret) { struct rds_ib_mr *ibmr, *next; struct xlist_head clean_xlist; struct xlist_head *clean_tail; LIST_HEAD(unmap_list); LIST_HEAD(fmr_list); unsigned long unpinned = 0; unsigned int nfreed = 0, ncleaned = 0, free_goal; int ret = 0; rds_ib_stats_inc(s_ib_rdma_mr_pool_flush); if (ibmr_ret) { DEFINE_WAIT(wait); while(!mutex_trylock(&pool->flush_lock)) { ibmr = rds_ib_reuse_fmr(pool); if (ibmr) { *ibmr_ret = ibmr; finish_wait(&pool->flush_wait, &wait); goto out_nolock; } prepare_to_wait(&pool->flush_wait, &wait, TASK_UNINTERRUPTIBLE); if (xlist_empty(&pool->clean_list)) schedule(); ibmr = rds_ib_reuse_fmr(pool); if (ibmr) { *ibmr_ret = ibmr; finish_wait(&pool->flush_wait, &wait); goto out_nolock; } } finish_wait(&pool->flush_wait, &wait); } else mutex_lock(&pool->flush_lock); if (ibmr_ret) { ibmr = rds_ib_reuse_fmr(pool); if (ibmr) { *ibmr_ret = ibmr; goto out; } } /* Get the list of all MRs to be dropped. Ordering matters - * we want to put drop_list ahead of free_list. */ xlist_append_to_list(&pool->drop_list, &unmap_list); xlist_append_to_list(&pool->free_list, &unmap_list); if (free_all) xlist_append_to_list(&pool->clean_list, &unmap_list); free_goal = rds_ib_flush_goal(pool, free_all); if (list_empty(&unmap_list)) goto out; /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */ list_for_each_entry(ibmr, &unmap_list, unmap_list) list_add(&ibmr->fmr->list, &fmr_list); ret = ib_unmap_fmr(&fmr_list); if (ret) printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret); /* Now we can destroy the DMA mapping and unpin any pages */ list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) { unpinned += ibmr->sg_len; __rds_ib_teardown_mr(ibmr); if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { rds_ib_stats_inc(s_ib_rdma_mr_free); list_del(&ibmr->unmap_list); ib_dealloc_fmr(ibmr->fmr); kfree(ibmr); nfreed++; } ncleaned++; } if (!list_empty(&unmap_list)) { /* we have to make sure that none of the things we're about * to put on the clean list would race with other cpus trying * to pull items off. The xlist would explode if we managed to * remove something from the clean list and then add it back again * while another CPU was spinning on that same item in xlist_del_head. * * This is pretty unlikely, but just in case wait for an xlist grace period * here before adding anything back into the clean list. */ wait_clean_list_grace(); list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail); if (ibmr_ret) refill_local(pool, &clean_xlist, ibmr_ret); /* refill_local may have emptied our list */ if (!xlist_empty(&clean_xlist)) xlist_add(clean_xlist.next, clean_tail, &pool->clean_list); } atomic_sub(unpinned, &pool->free_pinned); atomic_sub(ncleaned, &pool->dirty_count); atomic_sub(nfreed, &pool->item_count); out: mutex_unlock(&pool->flush_lock); if (waitqueue_active(&pool->flush_wait)) wake_up(&pool->flush_wait); out_nolock: return ret; } static void rds_ib_mr_pool_flush_worker(struct work_struct *work) { struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); rds_ib_flush_mr_pool(pool, 0, NULL); } void rds_ib_free_mr(void *trans_private, int invalidate) { struct rds_ib_mr *ibmr = trans_private; struct rds_ib_device *rds_ibdev = ibmr->device; struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); /* Return it to the pool's free list */ if (ibmr->remap_count >= pool->fmr_attr.max_maps) xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list); else xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list); atomic_add(ibmr->sg_len, &pool->free_pinned); atomic_inc(&pool->dirty_count); /* If we've pinned too many pages, request a flush */ if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || atomic_read(&pool->dirty_count) >= pool->max_items / 10) schedule_delayed_work(&pool->flush_worker, 10); if (invalidate) { if (likely(!in_interrupt())) { rds_ib_flush_mr_pool(pool, 0, NULL); } else { /* We get here if the user created a MR marked * as use_once and invalidate at the same time. */ schedule_delayed_work(&pool->flush_worker, 10); } } rds_ib_dev_put(rds_ibdev); } void rds_ib_flush_mrs(void) { struct rds_ib_device *rds_ibdev; down_read(&rds_ib_devices_lock); list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; if (pool) rds_ib_flush_mr_pool(pool, 0, NULL); } up_read(&rds_ib_devices_lock); } void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, struct rds_sock *rs, u32 *key_ret) { struct rds_ib_device *rds_ibdev; struct rds_ib_mr *ibmr = NULL; int ret; rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); if (!rds_ibdev) { ret = -ENODEV; goto out; } if (!rds_ibdev->mr_pool) { ret = -ENODEV; goto out; } ibmr = rds_ib_alloc_fmr(rds_ibdev); if (IS_ERR(ibmr)) return ibmr; ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); if (ret == 0) *key_ret = ibmr->fmr->rkey; else printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret); ibmr->device = rds_ibdev; rds_ibdev = NULL; out: if (ret) { if (ibmr) rds_ib_free_mr(ibmr, 0); ibmr = ERR_PTR(ret); } if (rds_ibdev) rds_ib_dev_put(rds_ibdev); return ibmr; }
gpl-2.0
McBane87/Sony_Tablet_Z_LP.454_Kernel
kernel/irq/spurious.c
3184
8807
/* * linux/kernel/irq/spurious.c * * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar * * This file contains spurious interrupt handling. */ #include <linux/jiffies.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/timer.h> #include "internals.h" static int irqfixup __read_mostly; #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) static void poll_spurious_irqs(unsigned long dummy); static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); static int irq_poll_cpu; static atomic_t irq_poll_active; /* * We wait here for a poller to finish. * * If the poll runs on this CPU, then we yell loudly and return * false. That will leave the interrupt line disabled in the worst * case, but it should never happen. * * We wait until the poller is done and then recheck disabled and * action (about to be disabled). Only if it's still active, we return * true and let the handler run. */ bool irq_wait_for_poll(struct irq_desc *desc) { if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), "irq poll in progress on cpu %d for irq %d\n", smp_processor_id(), desc->irq_data.irq)) return false; #ifdef CONFIG_SMP do { raw_spin_unlock(&desc->lock); while (irqd_irq_inprogress(&desc->irq_data)) cpu_relax(); raw_spin_lock(&desc->lock); } while (irqd_irq_inprogress(&desc->irq_data)); /* Might have been disabled in meantime */ return !irqd_irq_disabled(&desc->irq_data) && desc->action; #else return false; #endif } /* * Recovery handler for misrouted interrupts. */ static int try_one_irq(int irq, struct irq_desc *desc, bool force) { irqreturn_t ret = IRQ_NONE; struct irqaction *action; raw_spin_lock(&desc->lock); /* PER_CPU and nested thread interrupts are never polled */ if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) goto out; /* * Do not poll disabled interrupts unless the spurious * disabled poller asks explicitely. */ if (irqd_irq_disabled(&desc->irq_data) && !force) goto out; /* * All handlers must agree on IRQF_SHARED, so we test just the * first. Check for action->next as well. */ action = desc->action; if (!action || !(action->flags & IRQF_SHARED) || (action->flags & __IRQF_TIMER) || (action->handler(irq, action->dev_id) == IRQ_HANDLED) || !action->next) goto out; /* Already running on another processor */ if (irqd_irq_inprogress(&desc->irq_data)) { /* * Already running: If it is shared get the other * CPU to go looking for our mystery interrupt too */ desc->istate |= IRQS_PENDING; goto out; } /* Mark it poll in progress */ desc->istate |= IRQS_POLL_INPROGRESS; do { if (handle_irq_event(desc) == IRQ_HANDLED) ret = IRQ_HANDLED; action = desc->action; } while ((desc->istate & IRQS_PENDING) && action); desc->istate &= ~IRQS_POLL_INPROGRESS; out: raw_spin_unlock(&desc->lock); return ret == IRQ_HANDLED; } static int misrouted_irq(int irq) { struct irq_desc *desc; int i, ok = 0; if (atomic_inc_return(&irq_poll_active) != 1) goto out; irq_poll_cpu = smp_processor_id(); for_each_irq_desc(i, desc) { if (!i) continue; if (i == irq) /* Already tried */ continue; if (try_one_irq(i, desc, false)) ok = 1; } out: atomic_dec(&irq_poll_active); /* So the caller can adjust the irq error counts */ return ok; } static void poll_spurious_irqs(unsigned long dummy) { struct irq_desc *desc; int i; if (atomic_inc_return(&irq_poll_active) != 1) goto out; irq_poll_cpu = smp_processor_id(); for_each_irq_desc(i, desc) { unsigned int state; if (!i) continue; /* Racy but it doesn't matter */ state = desc->istate; barrier(); if (!(state & IRQS_SPURIOUS_DISABLED)) continue; local_irq_disable(); try_one_irq(i, desc, true); local_irq_enable(); } out: atomic_dec(&irq_poll_active); mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); } static inline int bad_action_ret(irqreturn_t action_ret) { if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD))) return 0; return 1; } /* * If 99,900 of the previous 100,000 interrupts have not been handled * then assume that the IRQ is stuck in some manner. Drop a diagnostic * and try to turn the IRQ off. * * (The other 100-of-100,000 interrupts may have been a correctly * functioning device sharing an IRQ with the failing one) */ static void __report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) { struct irqaction *action; unsigned long flags; if (bad_action_ret(action_ret)) { printk(KERN_ERR "irq event %d: bogus return value %x\n", irq, action_ret); } else { printk(KERN_ERR "irq %d: nobody cared (try booting with " "the \"irqpoll\" option)\n", irq); } dump_stack(); printk(KERN_ERR "handlers:\n"); /* * We need to take desc->lock here. note_interrupt() is called * w/o desc->lock held, but IRQ_PROGRESS set. We might race * with something else removing an action. It's ok to take * desc->lock here. See synchronize_irq(). */ raw_spin_lock_irqsave(&desc->lock, flags); action = desc->action; while (action) { printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler); if (action->thread_fn) printk(KERN_CONT " threaded [<%p>] %pf", action->thread_fn, action->thread_fn); printk(KERN_CONT "\n"); action = action->next; } raw_spin_unlock_irqrestore(&desc->lock, flags); } static void report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) { static int count = 100; if (count > 0) { count--; __report_bad_irq(irq, desc, action_ret); } } static inline int try_misrouted_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) { struct irqaction *action; if (!irqfixup) return 0; /* We didn't actually handle the IRQ - see if it was misrouted? */ if (action_ret == IRQ_NONE) return 1; /* * But for 'irqfixup == 2' we also do it for handled interrupts if * they are marked as IRQF_IRQPOLL (or for irq zero, which is the * traditional PC timer interrupt.. Legacy) */ if (irqfixup < 2) return 0; if (!irq) return 1; /* * Since we don't get the descriptor lock, "action" can * change under us. We don't really care, but we don't * want to follow a NULL pointer. So tell the compiler to * just load it once by using a barrier. */ action = desc->action; barrier(); return action && (action->flags & IRQF_IRQPOLL); } void note_interrupt(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) { if (desc->istate & IRQS_POLL_INPROGRESS) return; /* we get here again via the threaded handler */ if (action_ret == IRQ_WAKE_THREAD) return; if (bad_action_ret(action_ret)) { report_bad_irq(irq, desc, action_ret); return; } if (unlikely(action_ret == IRQ_NONE)) { /* * If we are seeing only the odd spurious IRQ caused by * bus asynchronicity then don't eventually trigger an error, * otherwise the counter becomes a doomsday timer for otherwise * working systems */ if (time_after(jiffies, desc->last_unhandled + HZ/10)) desc->irqs_unhandled = 1; else desc->irqs_unhandled++; desc->last_unhandled = jiffies; } if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { int ok = misrouted_irq(irq); if (action_ret == IRQ_NONE) desc->irqs_unhandled -= ok; } desc->irq_count++; if (likely(desc->irq_count < 100000)) return; desc->irq_count = 0; if (unlikely(desc->irqs_unhandled > 99900)) { /* * The interrupt is stuck */ __report_bad_irq(irq, desc, action_ret); /* * Now kill the IRQ */ printk(KERN_EMERG "Disabling IRQ #%d\n", irq); desc->istate |= IRQS_SPURIOUS_DISABLED; desc->depth++; irq_disable(desc); mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); } desc->irqs_unhandled = 0; } bool noirqdebug __read_mostly; int noirqdebug_setup(char *str) { noirqdebug = 1; printk(KERN_INFO "IRQ lockup detection disabled\n"); return 1; } __setup("noirqdebug", noirqdebug_setup); module_param(noirqdebug, bool, 0644); MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); static int __init irqfixup_setup(char *str) { irqfixup = 1; printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); printk(KERN_WARNING "This may impact system performance.\n"); return 1; } __setup("irqfixup", irqfixup_setup); module_param(irqfixup, int, 0644); static int __init irqpoll_setup(char *str) { irqfixup = 2; printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); printk(KERN_WARNING "This may significantly impact system " "performance\n"); return 1; } __setup("irqpoll", irqpoll_setup);
gpl-2.0
grogg/platform_device_asus_flo-kernel_kernel
arch/x86/kernel/cpu/intel.c
4208
14699
#include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/thread_info.h> #include <linux/module.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <asm/pgtable.h> #include <asm/msr.h> #include <asm/bugs.h> #include <asm/cpu.h> #ifdef CONFIG_X86_64 #include <linux/topology.h> #include <asm/numa_64.h> #endif #include "cpu.h" #ifdef CONFIG_X86_LOCAL_APIC #include <asm/mpspec.h> #include <asm/apic.h> #endif static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); c->cpuid_level = cpuid_eax(0); get_cpu_cap(c); } } if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) { unsigned lower_word; wrmsr(MSR_IA32_UCODE_REV, 0, 0); /* Required by the SDM */ sync_core(); rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode); } /* * Atom erratum AAE44/AAF40/AAG38/AAH41: * * A race condition between speculative fetches and invalidating * a large page. This is worked around in microcode, but we * need the microcode to have already been loaded... so if it is * not, recommend a BIOS update and disable large pages. */ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && c->microcode < 0x20e) { printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n"); clear_cpu_cap(c, X86_FEATURE_PSE); } #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #else /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ if (c->x86 == 15 && c->x86_cache_alignment == 64) c->x86_cache_alignment = 128; #endif /* CPUID workaround for 0F33/0F34 CPU */ if (c->x86 == 0xF && c->x86_model == 0x3 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) c->x86_phys_bits = 36; /* * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate * with P/T states and does not stop in deep C-states. * * It is also reliable across cores and sockets. (but not across * cabinets - we turn it off in that case explicitly.) */ if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) sched_clock_stable = 1; } /* * There is a known erratum on Pentium III and Core Solo * and Core Duo CPUs. * " Page with PAT set to WC while associated MTRR is UC * may consolidate to UC " * Because of this erratum, it is better to stick with * setting WC in MTRR rather than using PAT on these CPUs. * * Enable PAT WC only on P4, Core 2 or later CPUs. */ if (c->x86 == 6 && c->x86_model < 15) clear_cpu_cap(c, X86_FEATURE_PAT); #ifdef CONFIG_KMEMCHECK /* * P4s have a "fast strings" feature which causes single- * stepping REP instructions to only generate a #DB on * cache-line boundaries. * * Ingo Molnar reported a Pentium D (model 6) and a Xeon * (model 2) with the same problem. */ if (c->x86 == 15) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); } } #endif /* * If fast string is not enabled in IA32_MISC_ENABLE for any reason, * clear the fast string and enhanced fast string CPU capabilities. */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { printk(KERN_INFO "Disabled fast string operations\n"); setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); setup_clear_cpu_cap(X86_FEATURE_ERMS); } } } #ifdef CONFIG_X86_32 /* * Early probe support logic for ppro memory erratum #50 * * This is called before we do cpu ident work */ int __cpuinit ppro_with_ram_bug(void) { /* Uses data from early_cpu_detect now */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_mask < 8) { printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); return 1; } return 0; } #ifdef CONFIG_X86_F00F_BUG static void __cpuinit trap_init_f00f_bug(void) { __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); /* * Update the IDT descriptor and reload the IDT so that * it uses the read-only mapped virtual address. */ idt_descr.address = fix_to_virt(FIX_F00F_IDT); load_idt(&idt_descr); } #endif static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) { /* calling is from identify_secondary_cpu() ? */ if (!c->cpu_index) return; /* * Mask B, Pentium, but not Pentium MMX */ if (c->x86 == 5 && c->x86_mask >= 1 && c->x86_mask <= 4 && c->x86_model <= 3) { /* * Remember we have B step Pentia with bugs */ WARN_ONCE(1, "WARNING: SMP operation may be unreliable" "with B stepping processors.\n"); } } static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) { unsigned long lo, hi; #ifdef CONFIG_X86_F00F_BUG /* * All current models of Pentium and Pentium with MMX technology CPUs * have the F0 0F bug, which lets nonprivileged users lock up the * system. * Note that the workaround only should be initialized once... */ c->f00f_bug = 0; if (!paravirt_enabled() && c->x86 == 5) { static int f00f_workaround_enabled; c->f00f_bug = 1; if (!f00f_workaround_enabled) { trap_init_f00f_bug(); printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); f00f_workaround_enabled = 1; } } #endif /* * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * model 3 mask 3 */ if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); /* * P4 Xeon errata 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); } } /* * See if we have a good local APIC by checking for buggy Pentia, * i.e. all B steppings and the C2 stepping of P54C when using their * integrated APIC (see 11AP erratum in "Pentium Processor * Specification Update"). */ if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && (c->x86_mask < 0x6 || c->x86_mask == 0xb)) set_cpu_cap(c, X86_FEATURE_11AP); #ifdef CONFIG_X86_INTEL_USERCOPY /* * Set up the preferred alignment for movsl bulk memory moves */ switch (c->x86) { case 4: /* 486: untested */ break; case 5: /* Old Pentia: untested */ break; case 6: /* PII/PIII only like movsl with 8-byte alignment */ movsl_mask.mask = 7; break; case 15: /* P4 is OK down to 8-byte alignment */ movsl_mask.mask = 7; break; } #endif #ifdef CONFIG_X86_NUMAQ numaq_tsc_disable(); #endif intel_smp_check(c); } #else static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) { } #endif static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) { #ifdef CONFIG_NUMA unsigned node; int cpu = smp_processor_id(); /* Don't do the funky fallback heuristics the AMD version employs for now. */ node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE || !node_online(node)) { /* reuse the value from init_cpu_to_node() */ node = cpu_to_node(cpu); } numa_set_node(cpu, node); #endif } /* * find out the number of processor cores on the die */ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) { unsigned int eax, ebx, ecx, edx; if (c->cpuid_level < 4) return 1; /* Intel has a non-standard dependency on %ecx for this CPUID level. */ cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); if (eax & 0x1f) return (eax >> 26) + 1; else return 1; } static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) { /* Intel VMX MSR indicated features */ #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); clear_cpu_cap(c, X86_FEATURE_VNMI); clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); clear_cpu_cap(c, X86_FEATURE_EPT); clear_cpu_cap(c, X86_FEATURE_VPID); rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); msr_ctl = vmx_msr_high | vmx_msr_low; if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) set_cpu_cap(c, X86_FEATURE_VNMI); if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, vmx_msr_low, vmx_msr_high); msr_ctl2 = vmx_msr_high | vmx_msr_low; if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) set_cpu_cap(c, X86_FEATURE_EPT); if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) set_cpu_cap(c, X86_FEATURE_VPID); } } static void __cpuinit init_intel(struct cpuinfo_x86 *c) { unsigned int l2 = 0; early_init_intel(c); intel_workarounds(c); /* * Detect the extended topology information if available. This * will reinitialise the initial_apicid which will be used * in init_intel_cacheinfo() */ detect_extended_topology(c); l2 = init_intel_cacheinfo(c); if (c->cpuid_level > 9) { unsigned eax = cpuid_eax(10); /* Check for version and the number of counters */ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } if (cpu_has_xmm2) set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); if (cpu_has_ds) { unsigned int l1; rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); if (!(l1 & (1<<11))) set_cpu_cap(c, X86_FEATURE_BTS); if (!(l1 & (1<<12))) set_cpu_cap(c, X86_FEATURE_PEBS); } if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); #ifdef CONFIG_X86_64 if (c->x86 == 15) c->x86_cache_alignment = c->x86_clflush_size * 2; if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_REP_GOOD); #else /* * Names for the Pentium II/Celeron processors * detectable only by also checking the cache size. * Dixon is NOT a Celeron. */ if (c->x86 == 6) { char *p = NULL; switch (c->x86_model) { case 5: if (l2 == 0) p = "Celeron (Covington)"; else if (l2 == 256) p = "Mobile Pentium II (Dixon)"; break; case 6: if (l2 == 128) p = "Celeron (Mendocino)"; else if (c->x86_mask == 0 || c->x86_mask == 5) p = "Celeron-A"; break; case 8: if (l2 == 128) p = "Celeron (Coppermine)"; break; } if (p) strcpy(c->x86_model_id, p); } if (c->x86 == 15) set_cpu_cap(c, X86_FEATURE_P4); if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_P3); #endif if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { /* * let's use the legacy cpuid vector 0x1 and 0x4 for topology * detection. */ c->x86_max_cores = intel_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif } /* Work around errata */ srat_detect_node(c); if (cpu_has(c, X86_FEATURE_VMX)) detect_vmx_virtcap(c); /* * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not. * x86_energy_perf_policy(8) is available to change it at run-time */ if (cpu_has(c, X86_FEATURE_EPB)) { u64 epb; rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) { printk_once(KERN_WARNING "ENERGY_PERF_BIAS:" " Set to 'normal', was 'performance'\n" "ENERGY_PERF_BIAS: View and update with" " x86_energy_perf_policy(8)\n"); epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); } } } #ifdef CONFIG_X86_32 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* * Intel PIII Tualatin. This comes in two flavours. * One has 256kb of cache, the other 512. We have no way * to determine which, so we use a boottime override * for the 512kb model, and assume 256 otherwise. */ if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) size = 256; return size; } #endif static const struct cpu_dev __cpuinitconst intel_cpu_dev = { .c_vendor = "Intel", .c_ident = { "GenuineIntel" }, #ifdef CONFIG_X86_32 .c_models = { { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = { [0] = "486 DX-25/33", [1] = "486 DX-50", [2] = "486 SX", [3] = "486 DX/2", [4] = "486 SL", [5] = "486 SX/2", [7] = "486 DX/2-WB", [8] = "486 DX/4", [9] = "486 DX/4-WB" } }, { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = { [0] = "Pentium 60/66 A-step", [1] = "Pentium 60/66", [2] = "Pentium 75 - 200", [3] = "OverDrive PODP5V83", [4] = "Pentium MMX", [7] = "Mobile Pentium 75 - 200", [8] = "Mobile Pentium MMX" } }, { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = { [0] = "Pentium Pro A-step", [1] = "Pentium Pro", [3] = "Pentium II (Klamath)", [4] = "Pentium II (Deschutes)", [5] = "Pentium II (Deschutes)", [6] = "Mobile Pentium II", [7] = "Pentium III (Katmai)", [8] = "Pentium III (Coppermine)", [10] = "Pentium III (Cascades)", [11] = "Pentium III (Tualatin)", } }, { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = { [0] = "Pentium 4 (Unknown)", [1] = "Pentium 4 (Willamette)", [2] = "Pentium 4 (Northwood)", [4] = "Pentium 4 (Foster)", [5] = "Pentium 4 (Foster)", } }, }, .c_size_cache = intel_size_cache, #endif .c_early_init = early_init_intel, .c_init = init_intel, .c_x86_vendor = X86_VENDOR_INTEL, }; cpu_dev_register(intel_cpu_dev);
gpl-2.0
Alexiis337/Test
arch/arm/mach-ux500/devices-db8500.c
4720
5924
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson * License terms: GNU General Public License (GPL) version 2 */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/amba/bus.h> #include <linux/amba/pl022.h> #include <plat/ste_dma40.h> #include <mach/hardware.h> #include <mach/setup.h> #include "ste-dma40-db8500.h" static struct resource dma40_resources[] = { [0] = { .start = U8500_DMA_BASE, .end = U8500_DMA_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, .name = "base", }, [1] = { .start = U8500_DMA_LCPA_BASE, .end = U8500_DMA_LCPA_BASE + 2 * SZ_1K - 1, .flags = IORESOURCE_MEM, .name = "lcpa", }, [2] = { .start = IRQ_DB8500_DMA, .end = IRQ_DB8500_DMA, .flags = IORESOURCE_IRQ, } }; /* Default configuration for physcial memcpy */ struct stedma40_chan_cfg dma40_memcpy_conf_phy = { .mode = STEDMA40_MODE_PHYSICAL, .dir = STEDMA40_MEM_TO_MEM, .src_info.data_width = STEDMA40_BYTE_WIDTH, .src_info.psize = STEDMA40_PSIZE_PHY_1, .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, .dst_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.psize = STEDMA40_PSIZE_PHY_1, .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, }; /* Default configuration for logical memcpy */ struct stedma40_chan_cfg dma40_memcpy_conf_log = { .dir = STEDMA40_MEM_TO_MEM, .src_info.data_width = STEDMA40_BYTE_WIDTH, .src_info.psize = STEDMA40_PSIZE_LOG_1, .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, .dst_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.psize = STEDMA40_PSIZE_LOG_1, .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, }; /* * Mapping between destination event lines and physical device address. * The event line is tied to a device and therefore the address is constant. * When the address comes from a primecell it will be configured in runtime * and we set the address to -1 as a placeholder. */ static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV] = { /* MUSB - these will be runtime-reconfigured */ [DB8500_DMA_DEV39_USB_OTG_OEP_8] = -1, [DB8500_DMA_DEV16_USB_OTG_OEP_7_15] = -1, [DB8500_DMA_DEV17_USB_OTG_OEP_6_14] = -1, [DB8500_DMA_DEV18_USB_OTG_OEP_5_13] = -1, [DB8500_DMA_DEV19_USB_OTG_OEP_4_12] = -1, [DB8500_DMA_DEV36_USB_OTG_OEP_3_11] = -1, [DB8500_DMA_DEV37_USB_OTG_OEP_2_10] = -1, [DB8500_DMA_DEV38_USB_OTG_OEP_1_9] = -1, /* PrimeCells - run-time configured */ [DB8500_DMA_DEV0_SPI0_TX] = -1, [DB8500_DMA_DEV1_SD_MMC0_TX] = -1, [DB8500_DMA_DEV2_SD_MMC1_TX] = -1, [DB8500_DMA_DEV3_SD_MMC2_TX] = -1, [DB8500_DMA_DEV8_SSP0_TX] = -1, [DB8500_DMA_DEV9_SSP1_TX] = -1, [DB8500_DMA_DEV11_UART2_TX] = -1, [DB8500_DMA_DEV12_UART1_TX] = -1, [DB8500_DMA_DEV13_UART0_TX] = -1, [DB8500_DMA_DEV28_SD_MM2_TX] = -1, [DB8500_DMA_DEV29_SD_MM0_TX] = -1, [DB8500_DMA_DEV32_SD_MM1_TX] = -1, [DB8500_DMA_DEV33_SPI2_TX] = -1, [DB8500_DMA_DEV35_SPI1_TX] = -1, [DB8500_DMA_DEV40_SPI3_TX] = -1, [DB8500_DMA_DEV41_SD_MM3_TX] = -1, [DB8500_DMA_DEV42_SD_MM4_TX] = -1, [DB8500_DMA_DEV43_SD_MM5_TX] = -1, [DB8500_DMA_DEV14_MSP2_TX] = U8500_MSP2_BASE + MSP_TX_RX_REG_OFFSET, [DB8500_DMA_DEV30_MSP1_TX] = U8500_MSP1_BASE + MSP_TX_RX_REG_OFFSET, [DB8500_DMA_DEV31_MSP0_TX_SLIM0_CH0_TX] = U8500_MSP0_BASE + MSP_TX_RX_REG_OFFSET, }; /* Mapping between source event lines and physical device address */ static const dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV] = { /* MUSB - these will be runtime-reconfigured */ [DB8500_DMA_DEV39_USB_OTG_IEP_8] = -1, [DB8500_DMA_DEV16_USB_OTG_IEP_7_15] = -1, [DB8500_DMA_DEV17_USB_OTG_IEP_6_14] = -1, [DB8500_DMA_DEV18_USB_OTG_IEP_5_13] = -1, [DB8500_DMA_DEV19_USB_OTG_IEP_4_12] = -1, [DB8500_DMA_DEV36_USB_OTG_IEP_3_11] = -1, [DB8500_DMA_DEV37_USB_OTG_IEP_2_10] = -1, [DB8500_DMA_DEV38_USB_OTG_IEP_1_9] = -1, /* PrimeCells */ [DB8500_DMA_DEV0_SPI0_RX] = -1, [DB8500_DMA_DEV1_SD_MMC0_RX] = -1, [DB8500_DMA_DEV2_SD_MMC1_RX] = -1, [DB8500_DMA_DEV3_SD_MMC2_RX] = -1, [DB8500_DMA_DEV8_SSP0_RX] = -1, [DB8500_DMA_DEV9_SSP1_RX] = -1, [DB8500_DMA_DEV11_UART2_RX] = -1, [DB8500_DMA_DEV12_UART1_RX] = -1, [DB8500_DMA_DEV13_UART0_RX] = -1, [DB8500_DMA_DEV28_SD_MM2_RX] = -1, [DB8500_DMA_DEV29_SD_MM0_RX] = -1, [DB8500_DMA_DEV32_SD_MM1_RX] = -1, [DB8500_DMA_DEV33_SPI2_RX] = -1, [DB8500_DMA_DEV35_SPI1_RX] = -1, [DB8500_DMA_DEV40_SPI3_RX] = -1, [DB8500_DMA_DEV41_SD_MM3_RX] = -1, [DB8500_DMA_DEV42_SD_MM4_RX] = -1, [DB8500_DMA_DEV43_SD_MM5_RX] = -1, [DB8500_DMA_DEV14_MSP2_RX] = U8500_MSP2_BASE + MSP_TX_RX_REG_OFFSET, [DB8500_DMA_DEV30_MSP3_RX] = U8500_MSP3_BASE + MSP_TX_RX_REG_OFFSET, [DB8500_DMA_DEV31_MSP0_RX_SLIM0_CH0_RX] = U8500_MSP0_BASE + MSP_TX_RX_REG_OFFSET, }; /* Reserved event lines for memcpy only */ static int dma40_memcpy_event[] = { DB8500_DMA_MEMCPY_TX_0, DB8500_DMA_MEMCPY_TX_1, DB8500_DMA_MEMCPY_TX_2, DB8500_DMA_MEMCPY_TX_3, DB8500_DMA_MEMCPY_TX_4, DB8500_DMA_MEMCPY_TX_5, }; static struct stedma40_platform_data dma40_plat_data = { .dev_len = DB8500_DMA_NR_DEV, .dev_rx = dma40_rx_map, .dev_tx = dma40_tx_map, .memcpy = dma40_memcpy_event, .memcpy_len = ARRAY_SIZE(dma40_memcpy_event), .memcpy_conf_phy = &dma40_memcpy_conf_phy, .memcpy_conf_log = &dma40_memcpy_conf_log, .disabled_channels = {-1}, }; struct platform_device u8500_dma40_device = { .dev = { .platform_data = &dma40_plat_data, }, .name = "dma40", .id = 0, .num_resources = ARRAY_SIZE(dma40_resources), .resource = dma40_resources }; struct resource keypad_resources[] = { [0] = { .start = U8500_SKE_BASE, .end = U8500_SKE_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_DB8500_KB, .end = IRQ_DB8500_KB, .flags = IORESOURCE_IRQ, }, }; struct platform_device u8500_ske_keypad_device = { .name = "nmk-ske-keypad", .id = -1, .num_resources = ARRAY_SIZE(keypad_resources), .resource = keypad_resources, };
gpl-2.0
lordeko/Alucard-Kernel-jfltexx
drivers/uwb/reset.c
5232
11232
/* * Ultra Wide Band * UWB basic command support and radio reset * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: * * - docs * * - Now we are serializing (using the uwb_dev->mutex) the command * execution; it should be parallelized as much as possible some * day. */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/export.h> #include "uwb-internal.h" /** * Command result codes (WUSB1.0[T8-69]) */ static const char *__strerror[] = { "success", "failure", "hardware failure", "no more slots", "beacon is too large", "invalid parameter", "unsupported power level", "time out (wa) or invalid ie data (whci)", "beacon size exceeded", "cancelled", "invalid state", "invalid size", "ack not received", "no more asie notification", }; /** Return a string matching the given error code */ const char *uwb_rc_strerror(unsigned code) { if (code == 255) return "time out"; if (code >= ARRAY_SIZE(__strerror)) return "unknown error"; return __strerror[code]; } int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, struct uwb_rccb *cmd, size_t cmd_size, u8 expected_type, u16 expected_event, uwb_rc_cmd_cb_f cb, void *arg) { struct device *dev = &rc->uwb_dev.dev; struct uwb_rc_neh *neh; int needtofree = 0; int result; uwb_dev_lock(&rc->uwb_dev); /* Protect against rc->priv being removed */ if (rc->priv == NULL) { uwb_dev_unlock(&rc->uwb_dev); return -ESHUTDOWN; } if (rc->filter_cmd) { needtofree = rc->filter_cmd(rc, &cmd, &cmd_size); if (needtofree < 0 && needtofree != -ENOANO) { dev_err(dev, "%s: filter error: %d\n", cmd_name, needtofree); uwb_dev_unlock(&rc->uwb_dev); return needtofree; } } neh = uwb_rc_neh_add(rc, cmd, expected_type, expected_event, cb, arg); if (IS_ERR(neh)) { result = PTR_ERR(neh); goto out; } result = rc->cmd(rc, cmd, cmd_size); uwb_dev_unlock(&rc->uwb_dev); if (result < 0) uwb_rc_neh_rm(rc, neh); else uwb_rc_neh_arm(rc, neh); uwb_rc_neh_put(neh); out: if (needtofree == 1) kfree(cmd); return result < 0 ? result : 0; } EXPORT_SYMBOL_GPL(uwb_rc_cmd_async); struct uwb_rc_cmd_done_params { struct completion completion; struct uwb_rceb *reply; ssize_t reply_size; }; static void uwb_rc_cmd_done(struct uwb_rc *rc, void *arg, struct uwb_rceb *reply, ssize_t reply_size) { struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg; if (reply_size > 0) { if (p->reply) reply_size = min(p->reply_size, reply_size); else p->reply = kmalloc(reply_size, GFP_ATOMIC); if (p->reply) memcpy(p->reply, reply, reply_size); else reply_size = -ENOMEM; } p->reply_size = reply_size; complete(&p->completion); } /** * Generic function for issuing commands to the Radio Control Interface * * @rc: UWB Radio Control descriptor * @cmd_name: Name of the command being issued (for error messages) * @cmd: Pointer to rccb structure containing the command; * normally you embed this structure as the first member of * the full command structure. * @cmd_size: Size of the whole command buffer pointed to by @cmd. * @reply: Pointer to where to store the reply * @reply_size: @reply's size * @expected_type: Expected type in the return event * @expected_event: Expected event code in the return event * @preply: Here a pointer to where the event data is received will * be stored. Once done with the data, free with kfree(). * * This function is generic; it works for commands that return a fixed * and known size or for commands that return a variable amount of data. * * If a buffer is provided, that is used, although it could be chopped * to the maximum size of the buffer. If the buffer is NULL, then one * be allocated in *preply with the whole contents of the reply. * * @rc needs to be referenced */ static ssize_t __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, struct uwb_rccb *cmd, size_t cmd_size, struct uwb_rceb *reply, size_t reply_size, u8 expected_type, u16 expected_event, struct uwb_rceb **preply) { ssize_t result = 0; struct device *dev = &rc->uwb_dev.dev; struct uwb_rc_cmd_done_params params; init_completion(&params.completion); params.reply = reply; params.reply_size = reply_size; result = uwb_rc_cmd_async(rc, cmd_name, cmd, cmd_size, expected_type, expected_event, uwb_rc_cmd_done, &params); if (result) return result; wait_for_completion(&params.completion); if (preply) *preply = params.reply; if (params.reply_size < 0) dev_err(dev, "%s: confirmation event 0x%02x/%04x/%02x " "reception failed: %d\n", cmd_name, expected_type, expected_event, cmd->bCommandContext, (int)params.reply_size); return params.reply_size; } /** * Generic function for issuing commands to the Radio Control Interface * * @rc: UWB Radio Control descriptor * @cmd_name: Name of the command being issued (for error messages) * @cmd: Pointer to rccb structure containing the command; * normally you embed this structure as the first member of * the full command structure. * @cmd_size: Size of the whole command buffer pointed to by @cmd. * @reply: Pointer to the beginning of the confirmation event * buffer. Normally bigger than an 'struct hwarc_rceb'. * You need to fill out reply->bEventType and reply->wEvent (in * cpu order) as the function will use them to verify the * confirmation event. * @reply_size: Size of the reply buffer * * The function checks that the length returned in the reply is at * least as big as @reply_size; if not, it will be deemed an error and * -EIO returned. * * @rc needs to be referenced */ ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, struct uwb_rccb *cmd, size_t cmd_size, struct uwb_rceb *reply, size_t reply_size) { struct device *dev = &rc->uwb_dev.dev; ssize_t result; result = __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, reply, reply_size, reply->bEventType, reply->wEvent, NULL); if (result > 0 && result < reply_size) { dev_err(dev, "%s: not enough data returned for decoding reply " "(%zu bytes received vs at least %zu needed)\n", cmd_name, result, reply_size); result = -EIO; } return result; } EXPORT_SYMBOL_GPL(uwb_rc_cmd); /** * Generic function for issuing commands to the Radio Control * Interface that return an unknown amount of data * * @rc: UWB Radio Control descriptor * @cmd_name: Name of the command being issued (for error messages) * @cmd: Pointer to rccb structure containing the command; * normally you embed this structure as the first member of * the full command structure. * @cmd_size: Size of the whole command buffer pointed to by @cmd. * @expected_type: Expected type in the return event * @expected_event: Expected event code in the return event * @preply: Here a pointer to where the event data is received will * be stored. Once done with the data, free with kfree(). * * The function checks that the length returned in the reply is at * least as big as a 'struct uwb_rceb *'; if not, it will be deemed an * error and -EIO returned. * * @rc needs to be referenced */ ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, struct uwb_rccb *cmd, size_t cmd_size, u8 expected_type, u16 expected_event, struct uwb_rceb **preply) { return __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, NULL, 0, expected_type, expected_event, preply); } EXPORT_SYMBOL_GPL(uwb_rc_vcmd); /** * Reset a UWB Host Controller (and all radio settings) * * @rc: Host Controller descriptor * @returns: 0 if ok, < 0 errno code on error * * We put the command on kmalloc'ed memory as some arches cannot do * USB from the stack. The reply event is copied from an stage buffer, * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. */ int uwb_rc_reset(struct uwb_rc *rc) { int result = -ENOMEM; struct uwb_rc_evt_confirm reply; struct uwb_rccb *cmd; size_t cmd_size = sizeof(*cmd); mutex_lock(&rc->uwb_dev.mutex); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_kzalloc; cmd->bCommandType = UWB_RC_CET_GENERAL; cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET); reply.rceb.bEventType = UWB_RC_CET_GENERAL; reply.rceb.wEvent = UWB_RC_CMD_RESET; result = uwb_rc_cmd(rc, "RESET", cmd, cmd_size, &reply.rceb, sizeof(reply)); if (result < 0) goto error_cmd; if (reply.bResultCode != UWB_RC_RES_SUCCESS) { dev_err(&rc->uwb_dev.dev, "RESET: command execution failed: %s (%d)\n", uwb_rc_strerror(reply.bResultCode), reply.bResultCode); result = -EIO; } error_cmd: kfree(cmd); error_kzalloc: mutex_unlock(&rc->uwb_dev.mutex); return result; } int uwbd_msg_handle_reset(struct uwb_event *evt) { struct uwb_rc *rc = evt->rc; int ret; dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); ret = rc->reset(rc); if (ret < 0) { dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); goto error; } return 0; error: /* Nothing can be done except try the reset again. Wait a bit to avoid reset loops during probe() or remove(). */ msleep(1000); uwb_rc_reset_all(rc); return ret; } /** * uwb_rc_reset_all - request a reset of the radio controller and PALs * @rc: the radio controller of the hardware device to be reset. * * The full hardware reset of the radio controller and all the PALs * will be scheduled. */ void uwb_rc_reset_all(struct uwb_rc *rc) { struct uwb_event *evt; evt = kzalloc(sizeof(struct uwb_event), GFP_ATOMIC); if (unlikely(evt == NULL)) return; evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */ evt->ts_jiffies = jiffies; evt->type = UWB_EVT_TYPE_MSG; evt->message = UWB_EVT_MSG_RESET; uwbd_event_queue(evt); } EXPORT_SYMBOL_GPL(uwb_rc_reset_all); void uwb_rc_pre_reset(struct uwb_rc *rc) { rc->stop(rc); uwbd_flush(rc); uwb_radio_reset_state(rc); uwb_rsv_remove_all(rc); } EXPORT_SYMBOL_GPL(uwb_rc_pre_reset); int uwb_rc_post_reset(struct uwb_rc *rc) { int ret; ret = rc->start(rc); if (ret) goto out; ret = uwb_rc_mac_addr_set(rc, &rc->uwb_dev.mac_addr); if (ret) goto out; ret = uwb_rc_dev_addr_set(rc, &rc->uwb_dev.dev_addr); if (ret) goto out; out: return ret; } EXPORT_SYMBOL_GPL(uwb_rc_post_reset);
gpl-2.0
AzraelsKiss/android_kernel_samsung_smdk4412
arch/arm/mach-bcmring/csp/chipc/chipcHw.c
8048
26628
/***************************************************************************** * Copyright 2003 - 2008 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. *****************************************************************************/ /****************************************************************************/ /** * @file chipcHw.c * * @brief Low level Various CHIP clock controlling routines * * @note * * These routines provide basic clock controlling functionality only. */ /****************************************************************************/ /* ---- Include Files ---------------------------------------------------- */ #include <csp/errno.h> #include <csp/stdint.h> #include <csp/module.h> #include <mach/csp/chipcHw_def.h> #include <mach/csp/chipcHw_inline.h> #include <csp/reg.h> #include <csp/delay.h> /* ---- Private Constants and Types --------------------------------------- */ /* VPM alignment algorithm uses this */ #define MAX_PHASE_ADJUST_COUNT 0xFFFF /* Max number of times allowed to adjust the phase */ #define MAX_PHASE_ALIGN_ATTEMPTS 10 /* Max number of attempt to align the phase */ /* Local definition of clock type */ #define PLL_CLOCK 1 /* PLL Clock */ #define NON_PLL_CLOCK 2 /* Divider clock */ static int chipcHw_divide(int num, int denom) __attribute__ ((section(".aramtext"))); /****************************************************************************/ /** * @brief Set clock fequency for miscellaneous configurable clocks * * This function sets clock frequency * * @return Configured clock frequency in hertz * */ /****************************************************************************/ chipcHw_freq chipcHw_getClockFrequency(chipcHw_CLOCK_e clock /* [ IN ] Configurable clock */ ) { volatile uint32_t *pPLLReg = (uint32_t *) 0x0; volatile uint32_t *pClockCtrl = (uint32_t *) 0x0; volatile uint32_t *pDependentClock = (uint32_t *) 0x0; uint32_t vcoFreqPll1Hz = 0; /* Effective VCO frequency for PLL1 in Hz */ uint32_t vcoFreqPll2Hz = 0; /* Effective VCO frequency for PLL2 in Hz */ uint32_t dependentClockType = 0; uint32_t vcoHz = 0; /* Get VCO frequencies */ if ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_MASK) != chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_INTEGER) { uint64_t adjustFreq = 0; vcoFreqPll1Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); /* Adjusted frequency due to chipcHw_REG_PLL_DIVIDER_NDIV_f_SS */ adjustFreq = (uint64_t) chipcHw_XTAL_FREQ_Hz * (uint64_t) chipcHw_REG_PLL_DIVIDER_NDIV_f_SS * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, (chipcHw_REG_PLL_PREDIVIDER_P2 * (uint64_t) chipcHw_REG_PLL_DIVIDER_FRAC)); vcoFreqPll1Hz += (uint32_t) adjustFreq; } else { vcoFreqPll1Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); } vcoFreqPll2Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider2 & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); switch (clock) { case chipcHw_CLOCK_DDR: pPLLReg = &pChipcHw->DDRClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_ARM: pPLLReg = &pChipcHw->ARMClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_ESW: pPLLReg = &pChipcHw->ESWClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_VPM: pPLLReg = &pChipcHw->VPMClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_ESW125: pPLLReg = &pChipcHw->ESW125Clock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_UART: pPLLReg = &pChipcHw->UARTClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_SDIO0: pPLLReg = &pChipcHw->SDIO0Clock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_SDIO1: pPLLReg = &pChipcHw->SDIO1Clock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_SPI: pPLLReg = &pChipcHw->SPIClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_ETM: pPLLReg = &pChipcHw->ETMClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_USB: pPLLReg = &pChipcHw->USBClock; vcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_LCD: pPLLReg = &pChipcHw->LCDClock; vcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_APM: pPLLReg = &pChipcHw->APMClock; vcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_BUS: pClockCtrl = &pChipcHw->ACLKClock; pDependentClock = &pChipcHw->ARMClock; vcoHz = vcoFreqPll1Hz; dependentClockType = PLL_CLOCK; break; case chipcHw_CLOCK_OTP: pClockCtrl = &pChipcHw->OTPClock; break; case chipcHw_CLOCK_I2C: pClockCtrl = &pChipcHw->I2CClock; break; case chipcHw_CLOCK_I2S0: pClockCtrl = &pChipcHw->I2S0Clock; break; case chipcHw_CLOCK_RTBUS: pClockCtrl = &pChipcHw->RTBUSClock; pDependentClock = &pChipcHw->ACLKClock; dependentClockType = NON_PLL_CLOCK; break; case chipcHw_CLOCK_APM100: pClockCtrl = &pChipcHw->APM100Clock; pDependentClock = &pChipcHw->APMClock; vcoHz = vcoFreqPll2Hz; dependentClockType = PLL_CLOCK; break; case chipcHw_CLOCK_TSC: pClockCtrl = &pChipcHw->TSCClock; break; case chipcHw_CLOCK_LED: pClockCtrl = &pChipcHw->LEDClock; break; case chipcHw_CLOCK_I2S1: pClockCtrl = &pChipcHw->I2S1Clock; break; } if (pPLLReg) { /* Obtain PLL clock frequency */ if (*pPLLReg & chipcHw_REG_PLL_CLOCK_BYPASS_SELECT) { /* Return crystal clock frequency when bypassed */ return chipcHw_XTAL_FREQ_Hz; } else if (clock == chipcHw_CLOCK_DDR) { /* DDR frequency is configured in PLLDivider register */ return chipcHw_divide (vcoHz, (((pChipcHw->PLLDivider & 0xFF000000) >> 24) ? ((pChipcHw->PLLDivider & 0xFF000000) >> 24) : 256)); } else { /* From chip revision number B0, LCD clock is internally divided by 2 */ if ((pPLLReg == &pChipcHw->LCDClock) && (chipcHw_getChipRevisionNumber() != chipcHw_REV_NUMBER_A0)) { vcoHz >>= 1; } /* Obtain PLL clock frequency using VCO dividers */ return chipcHw_divide(vcoHz, ((*pPLLReg & chipcHw_REG_PLL_CLOCK_MDIV_MASK) ? (*pPLLReg & chipcHw_REG_PLL_CLOCK_MDIV_MASK) : 256)); } } else if (pClockCtrl) { /* Obtain divider clock frequency */ uint32_t div; uint32_t freq = 0; if (*pClockCtrl & chipcHw_REG_DIV_CLOCK_BYPASS_SELECT) { /* Return crystal clock frequency when bypassed */ return chipcHw_XTAL_FREQ_Hz; } else if (pDependentClock) { /* Identify the dependent clock frequency */ switch (dependentClockType) { case PLL_CLOCK: if (*pDependentClock & chipcHw_REG_PLL_CLOCK_BYPASS_SELECT) { /* Use crystal clock frequency when dependent PLL clock is bypassed */ freq = chipcHw_XTAL_FREQ_Hz; } else { /* Obtain PLL clock frequency using VCO dividers */ div = *pDependentClock & chipcHw_REG_PLL_CLOCK_MDIV_MASK; freq = div ? chipcHw_divide(vcoHz, div) : 0; } break; case NON_PLL_CLOCK: if (pDependentClock == (uint32_t *) &pChipcHw->ACLKClock) { freq = chipcHw_getClockFrequency (chipcHw_CLOCK_BUS); } else { if (*pDependentClock & chipcHw_REG_DIV_CLOCK_BYPASS_SELECT) { /* Use crystal clock frequency when dependent divider clock is bypassed */ freq = chipcHw_XTAL_FREQ_Hz; } else { /* Obtain divider clock frequency using XTAL dividers */ div = *pDependentClock & chipcHw_REG_DIV_CLOCK_DIV_MASK; freq = chipcHw_divide (chipcHw_XTAL_FREQ_Hz, (div ? div : 256)); } } break; } } else { /* Dependent on crystal clock */ freq = chipcHw_XTAL_FREQ_Hz; } div = *pClockCtrl & chipcHw_REG_DIV_CLOCK_DIV_MASK; return chipcHw_divide(freq, (div ? div : 256)); } return 0; } /****************************************************************************/ /** * @brief Set clock fequency for miscellaneous configurable clocks * * This function sets clock frequency * * @return Configured clock frequency in Hz * */ /****************************************************************************/ chipcHw_freq chipcHw_setClockFrequency(chipcHw_CLOCK_e clock, /* [ IN ] Configurable clock */ uint32_t freq /* [ IN ] Clock frequency in Hz */ ) { volatile uint32_t *pPLLReg = (uint32_t *) 0x0; volatile uint32_t *pClockCtrl = (uint32_t *) 0x0; volatile uint32_t *pDependentClock = (uint32_t *) 0x0; uint32_t vcoFreqPll1Hz = 0; /* Effective VCO frequency for PLL1 in Hz */ uint32_t desVcoFreqPll1Hz = 0; /* Desired VCO frequency for PLL1 in Hz */ uint32_t vcoFreqPll2Hz = 0; /* Effective VCO frequency for PLL2 in Hz */ uint32_t dependentClockType = 0; uint32_t vcoHz = 0; uint32_t desVcoHz = 0; /* Get VCO frequencies */ if ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_MASK) != chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_INTEGER) { uint64_t adjustFreq = 0; vcoFreqPll1Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); /* Adjusted frequency due to chipcHw_REG_PLL_DIVIDER_NDIV_f_SS */ adjustFreq = (uint64_t) chipcHw_XTAL_FREQ_Hz * (uint64_t) chipcHw_REG_PLL_DIVIDER_NDIV_f_SS * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, (chipcHw_REG_PLL_PREDIVIDER_P2 * (uint64_t) chipcHw_REG_PLL_DIVIDER_FRAC)); vcoFreqPll1Hz += (uint32_t) adjustFreq; /* Desired VCO frequency */ desVcoFreqPll1Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * (((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT) + 1); } else { vcoFreqPll1Hz = desVcoFreqPll1Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); } vcoFreqPll2Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider2 & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); switch (clock) { case chipcHw_CLOCK_DDR: /* Configure the DDR_ctrl:BUS ratio settings */ { REG_LOCAL_IRQ_SAVE; /* Dvide DDR_phy by two to obtain DDR_ctrl clock */ pChipcHw->DDRClock = (pChipcHw->DDRClock & ~chipcHw_REG_PLL_CLOCK_TO_BUS_RATIO_MASK) | ((((freq / 2) / chipcHw_getClockFrequency(chipcHw_CLOCK_BUS)) - 1) << chipcHw_REG_PLL_CLOCK_TO_BUS_RATIO_SHIFT); REG_LOCAL_IRQ_RESTORE; } pPLLReg = &pChipcHw->DDRClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_ARM: pPLLReg = &pChipcHw->ARMClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_ESW: pPLLReg = &pChipcHw->ESWClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_VPM: /* Configure the VPM:BUS ratio settings */ { REG_LOCAL_IRQ_SAVE; pChipcHw->VPMClock = (pChipcHw->VPMClock & ~chipcHw_REG_PLL_CLOCK_TO_BUS_RATIO_MASK) | ((chipcHw_divide (freq, chipcHw_getClockFrequency(chipcHw_CLOCK_BUS)) - 1) << chipcHw_REG_PLL_CLOCK_TO_BUS_RATIO_SHIFT); REG_LOCAL_IRQ_RESTORE; } pPLLReg = &pChipcHw->VPMClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_ESW125: pPLLReg = &pChipcHw->ESW125Clock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_UART: pPLLReg = &pChipcHw->UARTClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_SDIO0: pPLLReg = &pChipcHw->SDIO0Clock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_SDIO1: pPLLReg = &pChipcHw->SDIO1Clock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_SPI: pPLLReg = &pChipcHw->SPIClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_ETM: pPLLReg = &pChipcHw->ETMClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_USB: pPLLReg = &pChipcHw->USBClock; vcoHz = vcoFreqPll2Hz; desVcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_LCD: pPLLReg = &pChipcHw->LCDClock; vcoHz = vcoFreqPll2Hz; desVcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_APM: pPLLReg = &pChipcHw->APMClock; vcoHz = vcoFreqPll2Hz; desVcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_BUS: pClockCtrl = &pChipcHw->ACLKClock; pDependentClock = &pChipcHw->ARMClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; dependentClockType = PLL_CLOCK; break; case chipcHw_CLOCK_OTP: pClockCtrl = &pChipcHw->OTPClock; break; case chipcHw_CLOCK_I2C: pClockCtrl = &pChipcHw->I2CClock; break; case chipcHw_CLOCK_I2S0: pClockCtrl = &pChipcHw->I2S0Clock; break; case chipcHw_CLOCK_RTBUS: pClockCtrl = &pChipcHw->RTBUSClock; pDependentClock = &pChipcHw->ACLKClock; dependentClockType = NON_PLL_CLOCK; break; case chipcHw_CLOCK_APM100: pClockCtrl = &pChipcHw->APM100Clock; pDependentClock = &pChipcHw->APMClock; vcoHz = vcoFreqPll2Hz; desVcoHz = vcoFreqPll2Hz; dependentClockType = PLL_CLOCK; break; case chipcHw_CLOCK_TSC: pClockCtrl = &pChipcHw->TSCClock; break; case chipcHw_CLOCK_LED: pClockCtrl = &pChipcHw->LEDClock; break; case chipcHw_CLOCK_I2S1: pClockCtrl = &pChipcHw->I2S1Clock; break; } if (pPLLReg) { /* Select XTAL as bypass source */ reg32_modify_and(pPLLReg, ~chipcHw_REG_PLL_CLOCK_SOURCE_GPIO); reg32_modify_or(pPLLReg, chipcHw_REG_PLL_CLOCK_BYPASS_SELECT); /* For DDR settings use only the PLL divider clock */ if (pPLLReg == &pChipcHw->DDRClock) { /* Set M1DIV for PLL1, which controls the DDR clock */ reg32_write(&pChipcHw->PLLDivider, (pChipcHw->PLLDivider & 0x00FFFFFF) | ((chipcHw_REG_PLL_DIVIDER_MDIV (desVcoHz, freq)) << 24)); /* Calculate expected frequency */ freq = chipcHw_divide(vcoHz, (((pChipcHw->PLLDivider & 0xFF000000) >> 24) ? ((pChipcHw->PLLDivider & 0xFF000000) >> 24) : 256)); } else { /* From chip revision number B0, LCD clock is internally divided by 2 */ if ((pPLLReg == &pChipcHw->LCDClock) && (chipcHw_getChipRevisionNumber() != chipcHw_REV_NUMBER_A0)) { desVcoHz >>= 1; vcoHz >>= 1; } /* Set MDIV to change the frequency */ reg32_modify_and(pPLLReg, ~(chipcHw_REG_PLL_CLOCK_MDIV_MASK)); reg32_modify_or(pPLLReg, chipcHw_REG_PLL_DIVIDER_MDIV(desVcoHz, freq)); /* Calculate expected frequency */ freq = chipcHw_divide(vcoHz, ((*(pPLLReg) & chipcHw_REG_PLL_CLOCK_MDIV_MASK) ? (*(pPLLReg) & chipcHw_REG_PLL_CLOCK_MDIV_MASK) : 256)); } /* Wait for for atleast 200ns as per the protocol to change frequency */ udelay(1); /* Do not bypass */ reg32_modify_and(pPLLReg, ~chipcHw_REG_PLL_CLOCK_BYPASS_SELECT); /* Return the configured frequency */ return freq; } else if (pClockCtrl) { uint32_t divider = 0; /* Divider clock should not be bypassed */ reg32_modify_and(pClockCtrl, ~chipcHw_REG_DIV_CLOCK_BYPASS_SELECT); /* Identify the clock source */ if (pDependentClock) { switch (dependentClockType) { case PLL_CLOCK: divider = chipcHw_divide(chipcHw_divide (desVcoHz, (*pDependentClock & chipcHw_REG_PLL_CLOCK_MDIV_MASK)), freq); break; case NON_PLL_CLOCK: { uint32_t sourceClock = 0; if (pDependentClock == (uint32_t *) &pChipcHw->ACLKClock) { sourceClock = chipcHw_getClockFrequency (chipcHw_CLOCK_BUS); } else { uint32_t div = *pDependentClock & chipcHw_REG_DIV_CLOCK_DIV_MASK; sourceClock = chipcHw_divide (chipcHw_XTAL_FREQ_Hz, ((div) ? div : 256)); } divider = chipcHw_divide(sourceClock, freq); } break; } } else { divider = chipcHw_divide(chipcHw_XTAL_FREQ_Hz, freq); } if (divider) { REG_LOCAL_IRQ_SAVE; /* Set the divider to obtain the required frequency */ *pClockCtrl = (*pClockCtrl & (~chipcHw_REG_DIV_CLOCK_DIV_MASK)) | (((divider > 256) ? chipcHw_REG_DIV_CLOCK_DIV_256 : divider) & chipcHw_REG_DIV_CLOCK_DIV_MASK); REG_LOCAL_IRQ_RESTORE; return freq; } } return 0; } EXPORT_SYMBOL(chipcHw_setClockFrequency); /****************************************************************************/ /** * @brief Set VPM clock in sync with BUS clock for Chip Rev #A0 * * This function does the phase adjustment between VPM and BUS clock * * @return >= 0 : On success (# of adjustment required) * -1 : On failure * */ /****************************************************************************/ static int vpmPhaseAlignA0(void) { uint32_t phaseControl; uint32_t phaseValue; uint32_t prevPhaseComp; int iter = 0; int adjustCount = 0; int count = 0; for (iter = 0; (iter < MAX_PHASE_ALIGN_ATTEMPTS) && (adjustCount < MAX_PHASE_ADJUST_COUNT); iter++) { phaseControl = (pChipcHw->VPMClock & chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK) >> chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT; phaseValue = 0; prevPhaseComp = 0; /* Step 1: Look for falling PH_COMP transition */ /* Read the contents of VPM Clock resgister */ phaseValue = pChipcHw->VPMClock; do { /* Store previous value of phase comparator */ prevPhaseComp = phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP; /* Change the value of PH_CTRL. */ reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; /* Read the contents of VPM Clock resgister. */ phaseValue = pChipcHw->VPMClock; if ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) == 0x0) { phaseControl = (0x3F & (phaseControl - 1)); } else { /* Increment to the Phase count value for next write, if Phase is not stable. */ phaseControl = (0x3F & (phaseControl + 1)); } /* Count number of adjustment made */ adjustCount++; } while (((prevPhaseComp == (phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP)) || /* Look for a transition */ ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) != 0x0)) && /* Look for a falling edge */ (adjustCount < MAX_PHASE_ADJUST_COUNT) /* Do not exceed the limit while trying */ ); if (adjustCount >= MAX_PHASE_ADJUST_COUNT) { /* Failed to align VPM phase after MAX_PHASE_ADJUST_COUNT tries */ return -1; } /* Step 2: Keep moving forward to make sure falling PH_COMP transition was valid */ for (count = 0; (count < 5) && ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) == 0); count++) { phaseControl = (0x3F & (phaseControl + 1)); reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; phaseValue = pChipcHw->VPMClock; /* Count number of adjustment made */ adjustCount++; } if (adjustCount >= MAX_PHASE_ADJUST_COUNT) { /* Failed to align VPM phase after MAX_PHASE_ADJUST_COUNT tries */ return -1; } if (count != 5) { /* Detected false transition */ continue; } /* Step 3: Keep moving backward to make sure falling PH_COMP transition was stable */ for (count = 0; (count < 3) && ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) == 0); count++) { phaseControl = (0x3F & (phaseControl - 1)); reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; phaseValue = pChipcHw->VPMClock; /* Count number of adjustment made */ adjustCount++; } if (adjustCount >= MAX_PHASE_ADJUST_COUNT) { /* Failed to align VPM phase after MAX_PHASE_ADJUST_COUNT tries */ return -1; } if (count != 3) { /* Detected noisy transition */ continue; } /* Step 4: Keep moving backward before the original transition took place. */ for (count = 0; (count < 5); count++) { phaseControl = (0x3F & (phaseControl - 1)); reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; phaseValue = pChipcHw->VPMClock; /* Count number of adjustment made */ adjustCount++; } if (adjustCount >= MAX_PHASE_ADJUST_COUNT) { /* Failed to align VPM phase after MAX_PHASE_ADJUST_COUNT tries */ return -1; } if ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) == 0) { /* Detected false transition */ continue; } /* Step 5: Re discover the valid transition */ do { /* Store previous value of phase comparator */ prevPhaseComp = phaseValue; /* Change the value of PH_CTRL. */ reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; /* Read the contents of VPM Clock resgister. */ phaseValue = pChipcHw->VPMClock; if ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) == 0x0) { phaseControl = (0x3F & (phaseControl - 1)); } else { /* Increment to the Phase count value for next write, if Phase is not stable. */ phaseControl = (0x3F & (phaseControl + 1)); } /* Count number of adjustment made */ adjustCount++; } while (((prevPhaseComp == (phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP)) || ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) != 0x0)) && (adjustCount < MAX_PHASE_ADJUST_COUNT)); if (adjustCount >= MAX_PHASE_ADJUST_COUNT) { /* Failed to align VPM phase after MAX_PHASE_ADJUST_COUNT tries */ return -1; } else { /* Valid phase must have detected */ break; } } /* For VPM Phase should be perfectly aligned. */ phaseControl = (((pChipcHw->VPMClock >> chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT) - 1) & 0x3F); { REG_LOCAL_IRQ_SAVE; pChipcHw->VPMClock = (pChipcHw->VPMClock & ~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT); /* Load new phase value */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; REG_LOCAL_IRQ_RESTORE; } /* Return the status */ return (int)adjustCount; } /****************************************************************************/ /** * @brief Set VPM clock in sync with BUS clock * * This function does the phase adjustment between VPM and BUS clock * * @return >= 0 : On success (# of adjustment required) * -1 : On failure * */ /****************************************************************************/ int chipcHw_vpmPhaseAlign(void) { if (chipcHw_getChipRevisionNumber() == chipcHw_REV_NUMBER_A0) { return vpmPhaseAlignA0(); } else { uint32_t phaseControl = chipcHw_getVpmPhaseControl(); uint32_t phaseValue = 0; int adjustCount = 0; /* Disable VPM access */ pChipcHw->Spare1 &= ~chipcHw_REG_SPARE1_VPM_BUS_ACCESS_ENABLE; /* Disable HW VPM phase alignment */ chipcHw_vpmHwPhaseAlignDisable(); /* Enable SW VPM phase alignment */ chipcHw_vpmSwPhaseAlignEnable(); /* Adjust VPM phase */ while (adjustCount < MAX_PHASE_ADJUST_COUNT) { phaseValue = chipcHw_getVpmHwPhaseAlignStatus(); /* Adjust phase control value */ if (phaseValue > 0xF) { /* Increment phase control value */ phaseControl++; } else if (phaseValue < 0xF) { /* Decrement phase control value */ phaseControl--; } else { /* Enable VPM access */ pChipcHw->Spare1 |= chipcHw_REG_SPARE1_VPM_BUS_ACCESS_ENABLE; /* Return adjust count */ return adjustCount; } /* Change the value of PH_CTRL. */ reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; /* Count adjustment */ adjustCount++; } } /* Disable VPM access */ pChipcHw->Spare1 &= ~chipcHw_REG_SPARE1_VPM_BUS_ACCESS_ENABLE; return -1; } /****************************************************************************/ /** * @brief Local Divide function * * This function does the divide * * @return divide value * */ /****************************************************************************/ static int chipcHw_divide(int num, int denom) { int r; int t = 1; /* Shift denom and t up to the largest value to optimize algorithm */ /* t contains the units of each divide */ while ((denom & 0x40000000) == 0) { /* fails if denom=0 */ denom = denom << 1; t = t << 1; } /* Initialize the result */ r = 0; do { /* Determine if there exists a positive remainder */ if ((num - denom) >= 0) { /* Accumlate t to the result and calculate a new remainder */ num = num - denom; r = r + t; } /* Continue to shift denom and shift t down to 0 */ denom = denom >> 1; t = t >> 1; } while (t != 0); return r; }
gpl-2.0
budi79/deka-kernel-msm7x30-3.0
arch/arm/plat-mxc/devices/platform-mxc_nand.c
8048
2388
/* * Copyright (C) 2009-2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <asm/sizes.h> #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_mxc_nand_data_entry_single(soc, _size) \ { \ .iobase = soc ## _NFC_BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_NFC \ } #define imx_mxc_nandv3_data_entry_single(soc, _size) \ { \ .id = -1, \ .iobase = soc ## _NFC_BASE_ADDR, \ .iosize = _size, \ .axibase = soc ## _NFC_AXI_BASE_ADDR, \ .irq = soc ## _INT_NFC \ } #ifdef CONFIG_SOC_IMX21 const struct imx_mxc_nand_data imx21_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX21, SZ_4K); #endif /* ifdef CONFIG_SOC_IMX21 */ #ifdef CONFIG_SOC_IMX25 const struct imx_mxc_nand_data imx25_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX25, SZ_8K); #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_mxc_nand_data imx27_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX27, SZ_4K); #endif /* ifdef CONFIG_SOC_IMX27 */ #ifdef CONFIG_SOC_IMX31 const struct imx_mxc_nand_data imx31_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX31, SZ_4K); #endif #ifdef CONFIG_SOC_IMX35 const struct imx_mxc_nand_data imx35_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX35, SZ_8K); #endif #ifdef CONFIG_SOC_IMX51 const struct imx_mxc_nand_data imx51_mxc_nand_data __initconst = imx_mxc_nandv3_data_entry_single(MX51, SZ_16K); #endif struct platform_device *__init imx_add_mxc_nand( const struct imx_mxc_nand_data *data, const struct mxc_nand_platform_data *pdata) { /* AXI has to come first, that's how the mxc_nand driver expect it */ struct resource res[] = { { .start = data->axibase, .end = data->axibase + SZ_16K - 1, .flags = IORESOURCE_MEM, }, { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("mxc_nand", data->id, res + !data->axibase, ARRAY_SIZE(res) - !data->axibase, pdata, sizeof(*pdata)); }
gpl-2.0
sycolon/android_kernel_lge_g3
arch/sh/mm/mmap.c
9072
6288
/* * arch/sh/mm/mmap.c * * Copyright (C) 2008 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/io.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/module.h> #include <asm/page.h> #include <asm/processor.h> unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); #ifdef CONFIG_MMU /* * To avoid cache aliases, we map the shared page with same color. */ static inline unsigned long COLOUR_ALIGN(unsigned long addr, unsigned long pgoff) { unsigned long base = (addr + shm_align_mask) & ~shm_align_mask; unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; return base + off; } static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, unsigned long pgoff) { unsigned long base = addr & ~shm_align_mask; unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; if (base + off <= addr) return base + off; return base - off; } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; int do_colour_align; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } if (unlikely(len > TASK_SIZE)) return -ENOMEM; do_colour_align = 0; if (filp || (flags & MAP_SHARED)) do_colour_align = 1; if (addr) { if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; } else { mm->cached_hole_size = 0; start_addr = addr = TASK_UNMAPPED_BASE; } full_search: if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(mm->free_area_cache); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (unlikely(TASK_SIZE - len < addr)) { /* * Start a new search - just in case we missed * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } if (likely(!vma || addr + len <= vma->vm_start)) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); } } unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_colour_align; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } if (unlikely(len > TASK_SIZE)) return -ENOMEM; do_colour_align = 0; if (filp || (flags & MAP_SHARED)) do_colour_align = 1; /* requesting a specific address */ if (addr) { if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } /* check if free_area_cache is useful for us */ if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = mm->mmap_base; } /* either no address requested or can't fit in requested address hole */ addr = mm->free_area_cache; if (do_colour_align) { unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); addr = base + len; } /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); if (!vma || addr <= vma->vm_start) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } } if (unlikely(mm->mmap_base < len)) goto bottomup; addr = mm->mmap_base-len; if (do_colour_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); do { /* * Lookup failure means no vma is above this address, * else if new region fits below vma->vm_start, * return with success: */ vma = find_vma(mm, addr); if (likely(!vma || addr+len <= vma->vm_start)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } /* remember the largest hole we saw so far */ if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = vma->vm_start-len; if (do_colour_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); } while (likely(len < vma->vm_start)); bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ mm->cached_hole_size = ~0UL; mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; return addr; } #endif /* CONFIG_MMU */ /* * You really shouldn't be using read() or write() on /dev/mem. This * might go away in the future. */ int valid_phys_addr_range(unsigned long addr, size_t count) { if (addr < __MEMORY_START) return 0; if (addr + count > __pa(high_memory)) return 0; return 1; } int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return 1; }
gpl-2.0
boa19861105/android_443_KitKat_kernel_htc_dlxub1
drivers/ide/falconide.c
14704
3961
/* * Atari Falcon IDE Driver * * Created 12 Jul 1997 by Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/setup.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stdma.h> #include <asm/ide.h> #define DRV_NAME "falconide" /* * Base of the IDE interface */ #define ATA_HD_BASE 0xfff00000 /* * Offsets from the above base */ #define ATA_HD_CONTROL 0x39 /* * falconide_intr_lock is used to obtain access to the IDE interrupt, * which is shared between several drivers. */ static int falconide_intr_lock; static void falconide_release_lock(void) { if (falconide_intr_lock == 0) { printk(KERN_ERR "%s: bug\n", __func__); return; } falconide_intr_lock = 0; stdma_release(); } static void falconide_get_lock(irq_handler_t handler, void *data) { if (falconide_intr_lock == 0) { if (in_interrupt() > 0) panic("Falcon IDE hasn't ST-DMA lock in interrupt"); stdma_lock(handler, data); falconide_intr_lock = 1; } } static void falconide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { unsigned long data_addr = drive->hwif->io_ports.data_addr; if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { __ide_mm_insw(data_addr, buf, (len + 1) / 2); return; } raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); } static void falconide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { unsigned long data_addr = drive->hwif->io_ports.data_addr; if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { __ide_mm_outsw(data_addr, buf, (len + 1) / 2); return; } raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); } /* Atari has a byte-swapped IDE interface */ static const struct ide_tp_ops falconide_tp_ops = { .exec_command = ide_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = ide_write_devctl, .dev_select = ide_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = falconide_input_data, .output_data = falconide_output_data, }; static const struct ide_port_info falconide_port_info = { .get_lock = falconide_get_lock, .release_lock = falconide_release_lock, .tp_ops = &falconide_tp_ops, .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, .chipset = ide_generic, }; static void __init falconide_setup_ports(struct ide_hw *hw) { int i; memset(hw, 0, sizeof(*hw)); hw->io_ports.data_addr = ATA_HD_BASE; for (i = 1; i < 8; i++) hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4; hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL; hw->irq = IRQ_MFP_IDE; } /* * Probe for a Falcon IDE interface */ static int __init falconide_init(void) { struct ide_host *host; struct ide_hw hw, *hws[] = { &hw }; int rc; if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) return -ENODEV; printk(KERN_INFO "ide: Falcon IDE controller\n"); if (!request_mem_region(ATA_HD_BASE, 0x40, DRV_NAME)) { printk(KERN_ERR "%s: resources busy\n", DRV_NAME); return -EBUSY; } falconide_setup_ports(&hw); host = ide_host_alloc(&falconide_port_info, hws, 1); if (host == NULL) { rc = -ENOMEM; goto err; } falconide_get_lock(NULL, NULL); rc = ide_host_register(host, &falconide_port_info, hws); falconide_release_lock(); if (rc) goto err_free; return 0; err_free: ide_host_free(host); err: release_mem_region(ATA_HD_BASE, 0x40); return rc; } module_init(falconide_init); MODULE_LICENSE("GPL");
gpl-2.0
mayli/unionfs-2.6.32.y
drivers/char/vt_ioctl.c
113
40978
/* * linux/drivers/char/vt_ioctl.c * * Copyright (C) 1992 obz under the linux copyright * * Dynamic diacritical handling - aeb@cwi.nl - Dec 1993 * Dynamic keymap and string allocation - aeb@cwi.nl - May 1994 * Restrict VT switching via ioctl() - grif@cs.ucr.edu - Dec 1995 * Some code moved for less code duplication - Andi Kleen - Mar 1997 * Check put/get_user, cleanups - acme@conectiva.com.br - Jun 2001 */ #include <linux/types.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/timer.h> #include <linux/kernel.h> #include <linux/compat.h> #include <linux/module.h> #include <linux/kd.h> #include <linux/vt.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/console.h> #include <linux/consolemap.h> #include <linux/signal.h> #include <linux/smp_lock.h> #include <linux/timex.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/kbd_kern.h> #include <linux/vt_kern.h> #include <linux/kbd_diacr.h> #include <linux/selection.h> char vt_dont_switch; extern struct tty_driver *console_driver; #define VT_IS_IN_USE(i) (console_driver->ttys[i] && console_driver->ttys[i]->count) #define VT_BUSY(i) (VT_IS_IN_USE(i) || i == fg_console || vc_cons[i].d == sel_cons) /* * Console (vt and kd) routines, as defined by USL SVR4 manual, and by * experimentation and study of X386 SYSV handling. * * One point of difference: SYSV vt's are /dev/vtX, which X >= 0, and * /dev/console is a separate ttyp. Under Linux, /dev/tty0 is /dev/console, * and the vc start at /dev/ttyX, X >= 1. We maintain that here, so we will * always treat our set of vt as numbered 1..MAX_NR_CONSOLES (corresponding to * ttys 0..MAX_NR_CONSOLES-1). Explicitly naming VT 0 is illegal, but using * /dev/tty0 (fg_console) as a target is legal, since an implicit aliasing * to the current console is done by the main ioctl code. */ #ifdef CONFIG_X86 #include <linux/syscalls.h> #endif static void complete_change_console(struct vc_data *vc); /* * User space VT_EVENT handlers */ struct vt_event_wait { struct list_head list; struct vt_event event; int done; }; static LIST_HEAD(vt_events); static DEFINE_SPINLOCK(vt_event_lock); static DECLARE_WAIT_QUEUE_HEAD(vt_event_waitqueue); /** * vt_event_post * @event: the event that occurred * @old: old console * @new: new console * * Post an VT event to interested VT handlers */ void vt_event_post(unsigned int event, unsigned int old, unsigned int new) { struct list_head *pos, *head; unsigned long flags; int wake = 0; spin_lock_irqsave(&vt_event_lock, flags); head = &vt_events; list_for_each(pos, head) { struct vt_event_wait *ve = list_entry(pos, struct vt_event_wait, list); if (!(ve->event.event & event)) continue; ve->event.event = event; /* kernel view is consoles 0..n-1, user space view is console 1..n with 0 meaning current, so we must bias */ ve->event.oldev = old + 1; ve->event.newev = new + 1; wake = 1; ve->done = 1; } spin_unlock_irqrestore(&vt_event_lock, flags); if (wake) wake_up_interruptible(&vt_event_waitqueue); } /** * vt_event_wait - wait for an event * @vw: our event * * Waits for an event to occur which completes our vt_event_wait * structure. On return the structure has wv->done set to 1 for success * or 0 if some event such as a signal ended the wait. */ static void vt_event_wait(struct vt_event_wait *vw) { unsigned long flags; /* Prepare the event */ INIT_LIST_HEAD(&vw->list); vw->done = 0; /* Queue our event */ spin_lock_irqsave(&vt_event_lock, flags); list_add(&vw->list, &vt_events); spin_unlock_irqrestore(&vt_event_lock, flags); /* Wait for it to pass */ wait_event_interruptible(vt_event_waitqueue, vw->done); /* Dequeue it */ spin_lock_irqsave(&vt_event_lock, flags); list_del(&vw->list); spin_unlock_irqrestore(&vt_event_lock, flags); } /** * vt_event_wait_ioctl - event ioctl handler * @arg: argument to ioctl * * Implement the VT_WAITEVENT ioctl using the VT event interface */ static int vt_event_wait_ioctl(struct vt_event __user *event) { struct vt_event_wait vw; if (copy_from_user(&vw.event, event, sizeof(struct vt_event))) return -EFAULT; /* Highest supported event for now */ if (vw.event.event & ~VT_MAX_EVENT) return -EINVAL; vt_event_wait(&vw); /* If it occurred report it */ if (vw.done) { if (copy_to_user(event, &vw.event, sizeof(struct vt_event))) return -EFAULT; return 0; } return -EINTR; } /** * vt_waitactive - active console wait * @event: event code * @n: new console * * Helper for event waits. Used to implement the legacy * event waiting ioctls in terms of events */ int vt_waitactive(int n) { struct vt_event_wait vw; do { if (n == fg_console + 1) break; vw.event.event = VT_EVENT_SWITCH; vt_event_wait(&vw); if (vw.done == 0) return -EINTR; } while (vw.event.newev != n); return 0; } /* * these are the valid i/o ports we're allowed to change. they map all the * video ports */ #define GPFIRST 0x3b4 #define GPLAST 0x3df #define GPNUM (GPLAST - GPFIRST + 1) #define i (tmp.kb_index) #define s (tmp.kb_table) #define v (tmp.kb_value) static inline int do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_struct *kbd) { struct kbentry tmp; ushort *key_map, val, ov; if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) return -EFAULT; if (!capable(CAP_SYS_TTY_CONFIG)) perm = 0; switch (cmd) { case KDGKBENT: key_map = key_maps[s]; if (key_map) { val = U(key_map[i]); if (kbd->kbdmode != VC_UNICODE && KTYP(val) >= NR_TYPES) val = K_HOLE; } else val = (i ? K_HOLE : K_NOSUCHMAP); return put_user(val, &user_kbe->kb_value); case KDSKBENT: if (!perm) return -EPERM; if (!i && v == K_NOSUCHMAP) { /* deallocate map */ key_map = key_maps[s]; if (s && key_map) { key_maps[s] = NULL; if (key_map[0] == U(K_ALLOCATED)) { kfree(key_map); keymap_count--; } } break; } if (KTYP(v) < NR_TYPES) { if (KVAL(v) > max_vals[KTYP(v)]) return -EINVAL; } else if (kbd->kbdmode != VC_UNICODE) return -EINVAL; /* ++Geert: non-PC keyboards may generate keycode zero */ #if !defined(__mc68000__) && !defined(__powerpc__) /* assignment to entry 0 only tests validity of args */ if (!i) break; #endif if (!(key_map = key_maps[s])) { int j; if (keymap_count >= MAX_NR_OF_USER_KEYMAPS && !capable(CAP_SYS_RESOURCE)) return -EPERM; key_map = kmalloc(sizeof(plain_map), GFP_KERNEL); if (!key_map) return -ENOMEM; key_maps[s] = key_map; key_map[0] = U(K_ALLOCATED); for (j = 1; j < NR_KEYS; j++) key_map[j] = U(K_HOLE); keymap_count++; } ov = U(key_map[i]); if (v == ov) break; /* nothing to do */ /* * Attention Key. */ if (((ov == K_SAK) || (v == K_SAK)) && !capable(CAP_SYS_ADMIN)) return -EPERM; key_map[i] = U(v); if (!s && (KTYP(ov) == KT_SHIFT || KTYP(v) == KT_SHIFT)) compute_shiftstate(); break; } return 0; } #undef i #undef s #undef v static inline int do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc, int perm) { struct kbkeycode tmp; int kc = 0; if (copy_from_user(&tmp, user_kbkc, sizeof(struct kbkeycode))) return -EFAULT; switch (cmd) { case KDGETKEYCODE: kc = getkeycode(tmp.scancode); if (kc >= 0) kc = put_user(kc, &user_kbkc->keycode); break; case KDSETKEYCODE: if (!perm) return -EPERM; kc = setkeycode(tmp.scancode, tmp.keycode); break; } return kc; } static inline int do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) { struct kbsentry *kbs; char *p; u_char *q; u_char __user *up; int sz; int delta; char *first_free, *fj, *fnw; int i, j, k; int ret; if (!capable(CAP_SYS_TTY_CONFIG)) perm = 0; kbs = kmalloc(sizeof(*kbs), GFP_KERNEL); if (!kbs) { ret = -ENOMEM; goto reterr; } /* we mostly copy too much here (512bytes), but who cares ;) */ if (copy_from_user(kbs, user_kdgkb, sizeof(struct kbsentry))) { ret = -EFAULT; goto reterr; } kbs->kb_string[sizeof(kbs->kb_string)-1] = '\0'; i = kbs->kb_func; switch (cmd) { case KDGKBSENT: sz = sizeof(kbs->kb_string) - 1; /* sz should have been a struct member */ up = user_kdgkb->kb_string; p = func_table[i]; if(p) for ( ; *p && sz; p++, sz--) if (put_user(*p, up++)) { ret = -EFAULT; goto reterr; } if (put_user('\0', up)) { ret = -EFAULT; goto reterr; } kfree(kbs); return ((p && *p) ? -EOVERFLOW : 0); case KDSKBSENT: if (!perm) { ret = -EPERM; goto reterr; } q = func_table[i]; first_free = funcbufptr + (funcbufsize - funcbufleft); for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++) ; if (j < MAX_NR_FUNC) fj = func_table[j]; else fj = first_free; delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string); if (delta <= funcbufleft) { /* it fits in current buf */ if (j < MAX_NR_FUNC) { memmove(fj + delta, fj, first_free - fj); for (k = j; k < MAX_NR_FUNC; k++) if (func_table[k]) func_table[k] += delta; } if (!q) func_table[i] = fj; funcbufleft -= delta; } else { /* allocate a larger buffer */ sz = 256; while (sz < funcbufsize - funcbufleft + delta) sz <<= 1; fnw = kmalloc(sz, GFP_KERNEL); if(!fnw) { ret = -ENOMEM; goto reterr; } if (!q) func_table[i] = fj; if (fj > funcbufptr) memmove(fnw, funcbufptr, fj - funcbufptr); for (k = 0; k < j; k++) if (func_table[k]) func_table[k] = fnw + (func_table[k] - funcbufptr); if (first_free > fj) { memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj); for (k = j; k < MAX_NR_FUNC; k++) if (func_table[k]) func_table[k] = fnw + (func_table[k] - funcbufptr) + delta; } if (funcbufptr != func_buf) kfree(funcbufptr); funcbufptr = fnw; funcbufleft = funcbufleft - delta + sz - funcbufsize; funcbufsize = sz; } strcpy(func_table[i], kbs->kb_string); break; } ret = 0; reterr: kfree(kbs); return ret; } static inline int do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) { struct consolefontdesc cfdarg; int i; if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc))) return -EFAULT; switch (cmd) { case PIO_FONTX: if (!perm) return -EPERM; op->op = KD_FONT_OP_SET; op->flags = KD_FONT_FLAG_OLD; op->width = 8; op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = cfdarg.chardata; return con_font_op(vc_cons[fg_console].d, op); case GIO_FONTX: { op->op = KD_FONT_OP_GET; op->flags = KD_FONT_FLAG_OLD; op->width = 8; op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = cfdarg.chardata; i = con_font_op(vc_cons[fg_console].d, op); if (i) return i; cfdarg.charheight = op->height; cfdarg.charcount = op->charcount; if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc))) return -EFAULT; return 0; } } return -EINVAL; } static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_data *vc) { struct unimapdesc tmp; if (copy_from_user(&tmp, user_ud, sizeof tmp)) return -EFAULT; if (tmp.entries) if (!access_ok(VERIFY_WRITE, tmp.entries, tmp.entry_ct*sizeof(struct unipair))) return -EFAULT; switch (cmd) { case PIO_UNIMAP: if (!perm) return -EPERM; return con_set_unimap(vc, tmp.entry_ct, tmp.entries); case GIO_UNIMAP: if (!perm && fg_console != vc->vc_num) return -EPERM; return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct), tmp.entries); } return 0; } /* * We handle the console-specific ioctl's here. We allow the * capability to modify any console, not just the fg_console. */ int vt_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg) { struct vc_data *vc = tty->driver_data; struct console_font_op op; /* used in multiple places here */ struct kbd_struct * kbd; unsigned int console; unsigned char ucval; unsigned int uival; void __user *up = (void __user *)arg; int i, perm; int ret = 0; console = vc->vc_num; lock_kernel(); if (!vc_cons_allocated(console)) { /* impossible? */ ret = -ENOIOCTLCMD; goto out; } /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. */ perm = 0; if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG)) perm = 1; kbd = kbd_table + console; switch (cmd) { case TIOCLINUX: ret = tioclinux(tty, arg); break; case KIOCSOUND: if (!perm) goto eperm; /* FIXME: This is an old broken API but we need to keep it supported and somehow separate the historic advertised tick rate from any real one */ if (arg) arg = CLOCK_TICK_RATE / arg; kd_mksound(arg, 0); break; case KDMKTONE: if (!perm) goto eperm; { unsigned int ticks, count; /* * Generate the tone for the appropriate number of ticks. * If the time is zero, turn off sound ourselves. */ ticks = HZ * ((arg >> 16) & 0xffff) / 1000; count = ticks ? (arg & 0xffff) : 0; /* FIXME: This is an old broken API but we need to keep it supported and somehow separate the historic advertised tick rate from any real one */ if (count) count = CLOCK_TICK_RATE / count; kd_mksound(count, ticks); break; } case KDGKBTYPE: /* * this is naive. */ ucval = KB_101; goto setchar; /* * These cannot be implemented on any machine that implements * ioperm() in user level (such as Alpha PCs) or not at all. * * XXX: you should never use these, just call ioperm directly.. */ #ifdef CONFIG_X86 case KDADDIO: case KDDELIO: /* * KDADDIO and KDDELIO may be able to add ports beyond what * we reject here, but to be safe... */ if (arg < GPFIRST || arg > GPLAST) { ret = -EINVAL; break; } ret = sys_ioperm(arg, 1, (cmd == KDADDIO)) ? -ENXIO : 0; break; case KDENABIO: case KDDISABIO: ret = sys_ioperm(GPFIRST, GPNUM, (cmd == KDENABIO)) ? -ENXIO : 0; break; #endif /* Linux m68k/i386 interface for setting the keyboard delay/repeat rate */ case KDKBDREP: { struct kbd_repeat kbrep; if (!capable(CAP_SYS_TTY_CONFIG)) goto eperm; if (copy_from_user(&kbrep, up, sizeof(struct kbd_repeat))) { ret = -EFAULT; break; } ret = kbd_rate(&kbrep); if (ret) break; if (copy_to_user(up, &kbrep, sizeof(struct kbd_repeat))) ret = -EFAULT; break; } case KDSETMODE: /* * currently, setting the mode from KD_TEXT to KD_GRAPHICS * doesn't do a whole lot. i'm not sure if it should do any * restoration of modes or what... * * XXX It should at least call into the driver, fbdev's definitely * need to restore their engine state. --BenH */ if (!perm) goto eperm; switch (arg) { case KD_GRAPHICS: break; case KD_TEXT0: case KD_TEXT1: arg = KD_TEXT; case KD_TEXT: break; default: ret = -EINVAL; goto out; } if (vc->vc_mode == (unsigned char) arg) break; vc->vc_mode = (unsigned char) arg; if (console != fg_console) break; /* * explicitly blank/unblank the screen if switching modes */ acquire_console_sem(); if (arg == KD_TEXT) do_unblank_screen(1); else do_blank_screen(1); release_console_sem(); break; case KDGETMODE: uival = vc->vc_mode; goto setint; case KDMAPDISP: case KDUNMAPDISP: /* * these work like a combination of mmap and KDENABIO. * this could be easily finished. */ ret = -EINVAL; break; case KDSKBMODE: if (!perm) goto eperm; switch(arg) { case K_RAW: kbd->kbdmode = VC_RAW; break; case K_MEDIUMRAW: kbd->kbdmode = VC_MEDIUMRAW; break; case K_XLATE: kbd->kbdmode = VC_XLATE; compute_shiftstate(); break; case K_UNICODE: kbd->kbdmode = VC_UNICODE; compute_shiftstate(); break; default: ret = -EINVAL; goto out; } tty_ldisc_flush(tty); break; case KDGKBMODE: uival = ((kbd->kbdmode == VC_RAW) ? K_RAW : (kbd->kbdmode == VC_MEDIUMRAW) ? K_MEDIUMRAW : (kbd->kbdmode == VC_UNICODE) ? K_UNICODE : K_XLATE); goto setint; /* this could be folded into KDSKBMODE, but for compatibility reasons it is not so easy to fold KDGKBMETA into KDGKBMODE */ case KDSKBMETA: switch(arg) { case K_METABIT: clr_vc_kbd_mode(kbd, VC_META); break; case K_ESCPREFIX: set_vc_kbd_mode(kbd, VC_META); break; default: ret = -EINVAL; } break; case KDGKBMETA: uival = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT); setint: ret = put_user(uival, (int __user *)arg); break; case KDGETKEYCODE: case KDSETKEYCODE: if(!capable(CAP_SYS_TTY_CONFIG)) perm = 0; ret = do_kbkeycode_ioctl(cmd, up, perm); break; case KDGKBENT: case KDSKBENT: ret = do_kdsk_ioctl(cmd, up, perm, kbd); break; case KDGKBSENT: case KDSKBSENT: ret = do_kdgkb_ioctl(cmd, up, perm); break; case KDGKBDIACR: { struct kbdiacrs __user *a = up; struct kbdiacr diacr; int i; if (put_user(accent_table_size, &a->kb_cnt)) { ret = -EFAULT; break; } for (i = 0; i < accent_table_size; i++) { diacr.diacr = conv_uni_to_8bit(accent_table[i].diacr); diacr.base = conv_uni_to_8bit(accent_table[i].base); diacr.result = conv_uni_to_8bit(accent_table[i].result); if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr))) { ret = -EFAULT; break; } } break; } case KDGKBDIACRUC: { struct kbdiacrsuc __user *a = up; if (put_user(accent_table_size, &a->kb_cnt)) ret = -EFAULT; else if (copy_to_user(a->kbdiacruc, accent_table, accent_table_size*sizeof(struct kbdiacruc))) ret = -EFAULT; break; } case KDSKBDIACR: { struct kbdiacrs __user *a = up; struct kbdiacr diacr; unsigned int ct; int i; if (!perm) goto eperm; if (get_user(ct,&a->kb_cnt)) { ret = -EFAULT; break; } if (ct >= MAX_DIACR) { ret = -EINVAL; break; } accent_table_size = ct; for (i = 0; i < ct; i++) { if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr))) { ret = -EFAULT; break; } accent_table[i].diacr = conv_8bit_to_uni(diacr.diacr); accent_table[i].base = conv_8bit_to_uni(diacr.base); accent_table[i].result = conv_8bit_to_uni(diacr.result); } break; } case KDSKBDIACRUC: { struct kbdiacrsuc __user *a = up; unsigned int ct; if (!perm) goto eperm; if (get_user(ct,&a->kb_cnt)) { ret = -EFAULT; break; } if (ct >= MAX_DIACR) { ret = -EINVAL; break; } accent_table_size = ct; if (copy_from_user(accent_table, a->kbdiacruc, ct*sizeof(struct kbdiacruc))) ret = -EFAULT; break; } /* the ioctls below read/set the flags usually shown in the leds */ /* don't use them - they will go away without warning */ case KDGKBLED: ucval = kbd->ledflagstate | (kbd->default_ledflagstate << 4); goto setchar; case KDSKBLED: if (!perm) goto eperm; if (arg & ~0x77) { ret = -EINVAL; break; } kbd->ledflagstate = (arg & 7); kbd->default_ledflagstate = ((arg >> 4) & 7); set_leds(); break; /* the ioctls below only set the lights, not the functions */ /* for those, see KDGKBLED and KDSKBLED above */ case KDGETLED: ucval = getledstate(); setchar: ret = put_user(ucval, (char __user *)arg); break; case KDSETLED: if (!perm) goto eperm; setledstate(kbd, arg); break; /* * A process can indicate its willingness to accept signals * generated by pressing an appropriate key combination. * Thus, one can have a daemon that e.g. spawns a new console * upon a keypress and then changes to it. * See also the kbrequest field of inittab(5). */ case KDSIGACCEPT: { if (!perm || !capable(CAP_KILL)) goto eperm; if (!valid_signal(arg) || arg < 1 || arg == SIGKILL) ret = -EINVAL; else { spin_lock_irq(&vt_spawn_con.lock); put_pid(vt_spawn_con.pid); vt_spawn_con.pid = get_pid(task_pid(current)); vt_spawn_con.sig = arg; spin_unlock_irq(&vt_spawn_con.lock); } break; } case VT_SETMODE: { struct vt_mode tmp; if (!perm) goto eperm; if (copy_from_user(&tmp, up, sizeof(struct vt_mode))) { ret = -EFAULT; goto out; } if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) { ret = -EINVAL; goto out; } acquire_console_sem(); vc->vt_mode = tmp; /* the frsig is ignored, so we set it to 0 */ vc->vt_mode.frsig = 0; put_pid(vc->vt_pid); vc->vt_pid = get_pid(task_pid(current)); /* no switch is required -- saw@shade.msu.ru */ vc->vt_newvt = -1; release_console_sem(); break; } case VT_GETMODE: { struct vt_mode tmp; int rc; acquire_console_sem(); memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode)); release_console_sem(); rc = copy_to_user(up, &tmp, sizeof(struct vt_mode)); if (rc) ret = -EFAULT; break; } /* * Returns global vt state. Note that VT 0 is always open, since * it's an alias for the current VT, and people can't use it here. * We cannot return state for more than 16 VTs, since v_state is short. */ case VT_GETSTATE: { struct vt_stat __user *vtstat = up; unsigned short state, mask; if (put_user(fg_console + 1, &vtstat->v_active)) ret = -EFAULT; else { state = 1; /* /dev/tty0 is always open */ for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask; ++i, mask <<= 1) if (VT_IS_IN_USE(i)) state |= mask; ret = put_user(state, &vtstat->v_state); } break; } /* * Returns the first available (non-opened) console. */ case VT_OPENQRY: for (i = 0; i < MAX_NR_CONSOLES; ++i) if (! VT_IS_IN_USE(i)) break; uival = i < MAX_NR_CONSOLES ? (i+1) : -1; goto setint; /* * ioctl(fd, VT_ACTIVATE, num) will cause us to switch to vt # num, * with num >= 1 (switches to vt 0, our console, are not allowed, just * to preserve sanity). */ case VT_ACTIVATE: if (!perm) goto eperm; if (arg == 0 || arg > MAX_NR_CONSOLES) ret = -ENXIO; else { arg--; acquire_console_sem(); ret = vc_allocate(arg); release_console_sem(); if (ret) break; set_console(arg); } break; case VT_SETACTIVATE: { struct vt_setactivate vsa; if (!perm) goto eperm; if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg, sizeof(struct vt_setactivate))) { ret = -EFAULT; goto out; } if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) ret = -ENXIO; else { vsa.console--; acquire_console_sem(); ret = vc_allocate(vsa.console); if (ret == 0) { struct vc_data *nvc; /* This is safe providing we don't drop the console sem between vc_allocate and finishing referencing nvc */ nvc = vc_cons[vsa.console].d; nvc->vt_mode = vsa.mode; nvc->vt_mode.frsig = 0; put_pid(nvc->vt_pid); nvc->vt_pid = get_pid(task_pid(current)); } release_console_sem(); if (ret) break; /* Commence switch and lock */ set_console(arg); } } /* * wait until the specified VT has been activated */ case VT_WAITACTIVE: if (!perm) goto eperm; if (arg == 0 || arg > MAX_NR_CONSOLES) ret = -ENXIO; else ret = vt_waitactive(arg); break; /* * If a vt is under process control, the kernel will not switch to it * immediately, but postpone the operation until the process calls this * ioctl, allowing the switch to complete. * * According to the X sources this is the behavior: * 0: pending switch-from not OK * 1: pending switch-from OK * 2: completed switch-to OK */ case VT_RELDISP: if (!perm) goto eperm; if (vc->vt_mode.mode != VT_PROCESS) { ret = -EINVAL; break; } /* * Switching-from response */ acquire_console_sem(); if (vc->vt_newvt >= 0) { if (arg == 0) /* * Switch disallowed, so forget we were trying * to do it. */ vc->vt_newvt = -1; else { /* * The current vt has been released, so * complete the switch. */ int newvt; newvt = vc->vt_newvt; vc->vt_newvt = -1; ret = vc_allocate(newvt); if (ret) { release_console_sem(); break; } /* * When we actually do the console switch, * make sure we are atomic with respect to * other console switches.. */ complete_change_console(vc_cons[newvt].d); } } else { /* * Switched-to response */ /* * If it's just an ACK, ignore it */ if (arg != VT_ACKACQ) ret = -EINVAL; } release_console_sem(); break; /* * Disallocate memory associated to VT (but leave VT1) */ case VT_DISALLOCATE: if (arg > MAX_NR_CONSOLES) { ret = -ENXIO; break; } if (arg == 0) { /* deallocate all unused consoles, but leave 0 */ acquire_console_sem(); for (i=1; i<MAX_NR_CONSOLES; i++) if (! VT_BUSY(i)) vc_deallocate(i); release_console_sem(); } else { /* deallocate a single console, if possible */ arg--; if (VT_BUSY(arg)) ret = -EBUSY; else if (arg) { /* leave 0 */ acquire_console_sem(); vc_deallocate(arg); release_console_sem(); } } break; case VT_RESIZE: { struct vt_sizes __user *vtsizes = up; struct vc_data *vc; ushort ll,cc; if (!perm) goto eperm; if (get_user(ll, &vtsizes->v_rows) || get_user(cc, &vtsizes->v_cols)) ret = -EFAULT; else { acquire_console_sem(); for (i = 0; i < MAX_NR_CONSOLES; i++) { vc = vc_cons[i].d; if (vc) { vc->vc_resize_user = 1; vc_resize(vc_cons[i].d, cc, ll); } } release_console_sem(); } break; } case VT_RESIZEX: { struct vt_consize __user *vtconsize = up; ushort ll,cc,vlin,clin,vcol,ccol; if (!perm) goto eperm; if (!access_ok(VERIFY_READ, vtconsize, sizeof(struct vt_consize))) { ret = -EFAULT; break; } /* FIXME: Should check the copies properly */ __get_user(ll, &vtconsize->v_rows); __get_user(cc, &vtconsize->v_cols); __get_user(vlin, &vtconsize->v_vlin); __get_user(clin, &vtconsize->v_clin); __get_user(vcol, &vtconsize->v_vcol); __get_user(ccol, &vtconsize->v_ccol); vlin = vlin ? vlin : vc->vc_scan_lines; if (clin) { if (ll) { if (ll != vlin/clin) { /* Parameters don't add up */ ret = -EINVAL; break; } } else ll = vlin/clin; } if (vcol && ccol) { if (cc) { if (cc != vcol/ccol) { ret = -EINVAL; break; } } else cc = vcol/ccol; } if (clin > 32) { ret = -EINVAL; break; } for (i = 0; i < MAX_NR_CONSOLES; i++) { if (!vc_cons[i].d) continue; acquire_console_sem(); if (vlin) vc_cons[i].d->vc_scan_lines = vlin; if (clin) vc_cons[i].d->vc_font.height = clin; vc_cons[i].d->vc_resize_user = 1; vc_resize(vc_cons[i].d, cc, ll); release_console_sem(); } break; } case PIO_FONT: { if (!perm) goto eperm; op.op = KD_FONT_OP_SET; op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */ op.width = 8; op.height = 0; op.charcount = 256; op.data = up; ret = con_font_op(vc_cons[fg_console].d, &op); break; } case GIO_FONT: { op.op = KD_FONT_OP_GET; op.flags = KD_FONT_FLAG_OLD; op.width = 8; op.height = 32; op.charcount = 256; op.data = up; ret = con_font_op(vc_cons[fg_console].d, &op); break; } case PIO_CMAP: if (!perm) ret = -EPERM; else ret = con_set_cmap(up); break; case GIO_CMAP: ret = con_get_cmap(up); break; case PIO_FONTX: case GIO_FONTX: ret = do_fontx_ioctl(cmd, up, perm, &op); break; case PIO_FONTRESET: { if (!perm) goto eperm; #ifdef BROKEN_GRAPHICS_PROGRAMS /* With BROKEN_GRAPHICS_PROGRAMS defined, the default font is not saved. */ ret = -ENOSYS; break; #else { op.op = KD_FONT_OP_SET_DEFAULT; op.data = NULL; ret = con_font_op(vc_cons[fg_console].d, &op); if (ret) break; con_set_default_unimap(vc_cons[fg_console].d); break; } #endif } case KDFONTOP: { if (copy_from_user(&op, up, sizeof(op))) { ret = -EFAULT; break; } if (!perm && op.op != KD_FONT_OP_GET) goto eperm; ret = con_font_op(vc, &op); if (ret) break; if (copy_to_user(up, &op, sizeof(op))) ret = -EFAULT; break; } case PIO_SCRNMAP: if (!perm) ret = -EPERM; else ret = con_set_trans_old(up); break; case GIO_SCRNMAP: ret = con_get_trans_old(up); break; case PIO_UNISCRNMAP: if (!perm) ret = -EPERM; else ret = con_set_trans_new(up); break; case GIO_UNISCRNMAP: ret = con_get_trans_new(up); break; case PIO_UNIMAPCLR: { struct unimapinit ui; if (!perm) goto eperm; ret = copy_from_user(&ui, up, sizeof(struct unimapinit)); if (!ret) con_clear_unimap(vc, &ui); break; } case PIO_UNIMAP: case GIO_UNIMAP: ret = do_unimap_ioctl(cmd, up, perm, vc); break; case VT_LOCKSWITCH: if (!capable(CAP_SYS_TTY_CONFIG)) goto eperm; vt_dont_switch = 1; break; case VT_UNLOCKSWITCH: if (!capable(CAP_SYS_TTY_CONFIG)) goto eperm; vt_dont_switch = 0; break; case VT_GETHIFONTMASK: ret = put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg); break; case VT_WAITEVENT: ret = vt_event_wait_ioctl((struct vt_event __user *)arg); break; default: ret = -ENOIOCTLCMD; } out: unlock_kernel(); return ret; eperm: ret = -EPERM; goto out; } void reset_vc(struct vc_data *vc) { vc->vc_mode = KD_TEXT; kbd_table[vc->vc_num].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE; vc->vt_mode.mode = VT_AUTO; vc->vt_mode.waitv = 0; vc->vt_mode.relsig = 0; vc->vt_mode.acqsig = 0; vc->vt_mode.frsig = 0; put_pid(vc->vt_pid); vc->vt_pid = NULL; vc->vt_newvt = -1; if (!in_interrupt()) /* Via keyboard.c:SAK() - akpm */ reset_palette(vc); } void vc_SAK(struct work_struct *work) { struct vc *vc_con = container_of(work, struct vc, SAK_work); struct vc_data *vc; struct tty_struct *tty; acquire_console_sem(); vc = vc_con->d; if (vc) { tty = vc->vc_tty; /* * SAK should also work in all raw modes and reset * them properly. */ if (tty) __do_SAK(tty); reset_vc(vc); } release_console_sem(); } #ifdef CONFIG_COMPAT struct compat_consolefontdesc { unsigned short charcount; /* characters in font (256 or 512) */ unsigned short charheight; /* scan lines per character (1-32) */ compat_caddr_t chardata; /* font data in expanded form */ }; static inline int compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) { struct compat_consolefontdesc cfdarg; int i; if (copy_from_user(&cfdarg, user_cfd, sizeof(struct compat_consolefontdesc))) return -EFAULT; switch (cmd) { case PIO_FONTX: if (!perm) return -EPERM; op->op = KD_FONT_OP_SET; op->flags = KD_FONT_FLAG_OLD; op->width = 8; op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = compat_ptr(cfdarg.chardata); return con_font_op(vc_cons[fg_console].d, op); case GIO_FONTX: op->op = KD_FONT_OP_GET; op->flags = KD_FONT_FLAG_OLD; op->width = 8; op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = compat_ptr(cfdarg.chardata); i = con_font_op(vc_cons[fg_console].d, op); if (i) return i; cfdarg.charheight = op->height; cfdarg.charcount = op->charcount; if (copy_to_user(user_cfd, &cfdarg, sizeof(struct compat_consolefontdesc))) return -EFAULT; return 0; } return -EINVAL; } struct compat_console_font_op { compat_uint_t op; /* operation code KD_FONT_OP_* */ compat_uint_t flags; /* KD_FONT_FLAG_* */ compat_uint_t width, height; /* font size */ compat_uint_t charcount; compat_caddr_t data; /* font data with height fixed to 32 */ }; static inline int compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop, int perm, struct console_font_op *op, struct vc_data *vc) { int i; if (copy_from_user(op, fontop, sizeof(struct compat_console_font_op))) return -EFAULT; if (!perm && op->op != KD_FONT_OP_GET) return -EPERM; op->data = compat_ptr(((struct compat_console_font_op *)op)->data); op->flags |= KD_FONT_FLAG_OLD; i = con_font_op(vc, op); if (i) return i; ((struct compat_console_font_op *)op)->data = (unsigned long)op->data; if (copy_to_user(fontop, op, sizeof(struct compat_console_font_op))) return -EFAULT; return 0; } struct compat_unimapdesc { unsigned short entry_ct; compat_caddr_t entries; }; static inline int compat_unimap_ioctl(unsigned int cmd, struct compat_unimapdesc __user *user_ud, int perm, struct vc_data *vc) { struct compat_unimapdesc tmp; struct unipair __user *tmp_entries; if (copy_from_user(&tmp, user_ud, sizeof tmp)) return -EFAULT; tmp_entries = compat_ptr(tmp.entries); if (tmp_entries) if (!access_ok(VERIFY_WRITE, tmp_entries, tmp.entry_ct*sizeof(struct unipair))) return -EFAULT; switch (cmd) { case PIO_UNIMAP: if (!perm) return -EPERM; return con_set_unimap(vc, tmp.entry_ct, tmp_entries); case GIO_UNIMAP: if (!perm && fg_console != vc->vc_num) return -EPERM; return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct), tmp_entries); } return 0; } long vt_compat_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg) { struct vc_data *vc = tty->driver_data; struct console_font_op op; /* used in multiple places here */ struct kbd_struct *kbd; unsigned int console; void __user *up = (void __user *)arg; int perm; int ret = 0; console = vc->vc_num; lock_kernel(); if (!vc_cons_allocated(console)) { /* impossible? */ ret = -ENOIOCTLCMD; goto out; } /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. */ perm = 0; if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG)) perm = 1; kbd = kbd_table + console; switch (cmd) { /* * these need special handlers for incompatible data structures */ case PIO_FONTX: case GIO_FONTX: ret = compat_fontx_ioctl(cmd, up, perm, &op); break; case KDFONTOP: ret = compat_kdfontop_ioctl(up, perm, &op, vc); break; case PIO_UNIMAP: case GIO_UNIMAP: ret = compat_unimap_ioctl(cmd, up, perm, vc); break; /* * all these treat 'arg' as an integer */ case KIOCSOUND: case KDMKTONE: #ifdef CONFIG_X86 case KDADDIO: case KDDELIO: #endif case KDSETMODE: case KDMAPDISP: case KDUNMAPDISP: case KDSKBMODE: case KDSKBMETA: case KDSKBLED: case KDSETLED: case KDSIGACCEPT: case VT_ACTIVATE: case VT_WAITACTIVE: case VT_RELDISP: case VT_DISALLOCATE: case VT_RESIZE: case VT_RESIZEX: goto fallback; /* * the rest has a compatible data structure behind arg, * but we have to convert it to a proper 64 bit pointer. */ default: arg = (unsigned long)compat_ptr(arg); goto fallback; } out: unlock_kernel(); return ret; fallback: unlock_kernel(); return vt_ioctl(tty, file, cmd, arg); } #endif /* CONFIG_COMPAT */ /* * Performs the back end of a vt switch. Called under the console * semaphore. */ static void complete_change_console(struct vc_data *vc) { unsigned char old_vc_mode; int old = fg_console; last_console = fg_console; /* * If we're switching, we could be going from KD_GRAPHICS to * KD_TEXT mode or vice versa, which means we need to blank or * unblank the screen later. */ old_vc_mode = vc_cons[fg_console].d->vc_mode; switch_screen(vc); /* * This can't appear below a successful kill_pid(). If it did, * then the *blank_screen operation could occur while X, having * received acqsig, is waking up on another processor. This * condition can lead to overlapping accesses to the VGA range * and the framebuffer (causing system lockups). * * To account for this we duplicate this code below only if the * controlling process is gone and we've called reset_vc. */ if (old_vc_mode != vc->vc_mode) { if (vc->vc_mode == KD_TEXT) do_unblank_screen(1); else do_blank_screen(1); } /* * If this new console is under process control, send it a signal * telling it that it has acquired. Also check if it has died and * clean up (similar to logic employed in change_console()) */ if (vc->vt_mode.mode == VT_PROCESS) { /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else * is awry */ if (kill_pid(vc->vt_pid, vc->vt_mode.acqsig, 1) != 0) { /* * The controlling process has died, so we revert back to * normal operation. In this case, we'll also change back * to KD_TEXT mode. I'm not sure if this is strictly correct * but it saves the agony when the X server dies and the screen * remains blanked due to KD_GRAPHICS! It would be nice to do * this outside of VT_PROCESS but there is no single process * to account for and tracking tty count may be undesirable. */ reset_vc(vc); if (old_vc_mode != vc->vc_mode) { if (vc->vc_mode == KD_TEXT) do_unblank_screen(1); else do_blank_screen(1); } } } /* * Wake anyone waiting for their VT to activate */ vt_event_post(VT_EVENT_SWITCH, old, vc->vc_num); return; } /* * Performs the front-end of a vt switch */ void change_console(struct vc_data *new_vc) { struct vc_data *vc; if (!new_vc || new_vc->vc_num == fg_console || vt_dont_switch) return; /* * If this vt is in process mode, then we need to handshake with * that process before switching. Essentially, we store where that * vt wants to switch to and wait for it to tell us when it's done * (via VT_RELDISP ioctl). * * We also check to see if the controlling process still exists. * If it doesn't, we reset this vt to auto mode and continue. * This is a cheap way to track process control. The worst thing * that can happen is: we send a signal to a process, it dies, and * the switch gets "lost" waiting for a response; hopefully, the * user will try again, we'll detect the process is gone (unless * the user waits just the right amount of time :-) and revert the * vt to auto control. */ vc = vc_cons[fg_console].d; if (vc->vt_mode.mode == VT_PROCESS) { /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else * is awry. * * We need to set vt_newvt *before* sending the signal or we * have a race. */ vc->vt_newvt = new_vc->vc_num; if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { /* * It worked. Mark the vt to switch to and * return. The process needs to send us a * VT_RELDISP ioctl to complete the switch. */ return; } /* * The controlling process has died, so we revert back to * normal operation. In this case, we'll also change back * to KD_TEXT mode. I'm not sure if this is strictly correct * but it saves the agony when the X server dies and the screen * remains blanked due to KD_GRAPHICS! It would be nice to do * this outside of VT_PROCESS but there is no single process * to account for and tracking tty count may be undesirable. */ reset_vc(vc); /* * Fall through to normal (VT_AUTO) handling of the switch... */ } /* * Ignore all switches in KD_GRAPHICS+VT_AUTO mode */ if (vc->vc_mode == KD_GRAPHICS) return; complete_change_console(new_vc); } /* Perform a kernel triggered VT switch for suspend/resume */ static int disable_vt_switch; int vt_move_to_console(unsigned int vt, int alloc) { int prev; acquire_console_sem(); /* Graphics mode - up to X */ if (disable_vt_switch) { release_console_sem(); return 0; } prev = fg_console; if (alloc && vc_allocate(vt)) { /* we can't have a free VC for now. Too bad, * we don't want to mess the screen for now. */ release_console_sem(); return -ENOSPC; } if (set_console(vt)) { /* * We're unable to switch to the SUSPEND_CONSOLE. * Let the calling function know so it can decide * what to do. */ release_console_sem(); return -EIO; } release_console_sem(); if (vt_waitactive(vt + 1)) { pr_debug("Suspend: Can't switch VCs."); return -EINTR; } return prev; } /* * Normally during a suspend, we allocate a new console and switch to it. * When we resume, we switch back to the original console. This switch * can be slow, so on systems where the framebuffer can handle restoration * of video registers anyways, there's little point in doing the console * switch. This function allows you to disable it by passing it '0'. */ void pm_set_vt_switch(int do_switch) { acquire_console_sem(); disable_vt_switch = !do_switch; release_console_sem(); } EXPORT_SYMBOL(pm_set_vt_switch);
gpl-2.0
sycolon/android_kernel_lge_g3
drivers/net/wireless/bcmdhd/src/wl/sys/wl_iw.c
113
96009
/* * Linux Wireless Extensions support * * Copyright (C) 1999-2014, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: wl_iw.c 396420 2013-04-12 06:55:45Z $ */ #if defined(USE_IW) #define LINUX_PORT #include <typedefs.h> #include <linuxver.h> #include <osl.h> #include <bcmutils.h> #include <bcmendian.h> #include <proto/ethernet.h> #include <linux/if_arp.h> #include <asm/uaccess.h> typedef const struct si_pub si_t; #include <wlioctl.h> #include <wl_dbg.h> #include <wl_iw.h> #ifdef BCMWAPI_WPI /* these items should evetually go into wireless.h of the linux system headfile dir */ #ifndef IW_ENCODE_ALG_SM4 #define IW_ENCODE_ALG_SM4 0x20 #endif #ifndef IW_AUTH_WAPI_ENABLED #define IW_AUTH_WAPI_ENABLED 0x20 #endif #ifndef IW_AUTH_WAPI_VERSION_1 #define IW_AUTH_WAPI_VERSION_1 0x00000008 #endif #ifndef IW_AUTH_CIPHER_SMS4 #define IW_AUTH_CIPHER_SMS4 0x00000020 #endif #ifndef IW_AUTH_KEY_MGMT_WAPI_PSK #define IW_AUTH_KEY_MGMT_WAPI_PSK 4 #endif #ifndef IW_AUTH_KEY_MGMT_WAPI_CERT #define IW_AUTH_KEY_MGMT_WAPI_CERT 8 #endif #endif /* BCMWAPI_WPI */ /* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */ #ifndef IW_AUTH_KEY_MGMT_FT_802_1X #define IW_AUTH_KEY_MGMT_FT_802_1X 0x04 #endif #ifndef IW_AUTH_KEY_MGMT_FT_PSK #define IW_AUTH_KEY_MGMT_FT_PSK 0x08 #endif #ifndef IW_ENC_CAPA_FW_ROAM_ENABLE #define IW_ENC_CAPA_FW_ROAM_ENABLE 0x00000020 #endif /* FC9: wireless.h 2.6.25-14.fc9.i686 is missing these, even though WIRELESS_EXT is set to latest * version 22. */ #ifndef IW_ENCODE_ALG_PMK #define IW_ENCODE_ALG_PMK 4 #endif #ifndef IW_ENC_CAPA_4WAY_HANDSHAKE #define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010 #endif /* End FC9. */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) #include <linux/rtnetlink.h> #endif #if defined(SOFTAP) struct net_device *ap_net_dev = NULL; tsk_ctl_t ap_eth_ctl; /* apsta AP netdev waiter thread */ #endif /* SOFTAP */ extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason, char* stringBuf, uint buflen); uint wl_msg_level = WL_ERROR_VAL; #define MAX_WLIW_IOCTL_LEN 1024 /* IOCTL swapping mode for Big Endian host with Little Endian dongle. Default to off */ #define htod32(i) i #define htod16(i) i #define dtoh32(i) i #define dtoh16(i) i #define htodchanspec(i) i #define dtohchanspec(i) i extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); extern int dhd_wait_pend8021x(struct net_device *dev); #if WIRELESS_EXT < 19 #define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST) #define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST) #endif /* WIRELESS_EXT < 19 */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) #define DAEMONIZE(a) #elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \ (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))) #define DAEMONIZE(a) daemonize(a); \ allow_signal(SIGKILL); \ allow_signal(SIGTERM); #else /* Linux 2.4 (w/o preemption patch) */ #define RAISE_RX_SOFTIRQ() \ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) #define DAEMONIZE(a) daemonize(); \ do { if (a) \ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \ } while (0); #endif /* LINUX_VERSION_CODE */ #define ISCAN_STATE_IDLE 0 #define ISCAN_STATE_SCANING 1 /* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */ #define WLC_IW_ISCAN_MAXLEN 2048 typedef struct iscan_buf { struct iscan_buf * next; char iscan_buf[WLC_IW_ISCAN_MAXLEN]; } iscan_buf_t; typedef struct iscan_info { struct net_device *dev; struct timer_list timer; uint32 timer_ms; uint32 timer_on; int iscan_state; iscan_buf_t * list_hdr; iscan_buf_t * list_cur; /* Thread to work on iscan */ long sysioc_pid; struct semaphore sysioc_sem; struct completion sysioc_exited; char ioctlbuf[WLC_IOCTL_SMLEN]; } iscan_info_t; iscan_info_t *g_iscan = NULL; static void wl_iw_timerfunc(ulong data); static void wl_iw_set_event_mask(struct net_device *dev); static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action); /* priv_link becomes netdev->priv and is the link between netdev and wlif struct */ typedef struct priv_link { wl_iw_t *wliw; } priv_link_t; /* dev to priv_link */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) #define WL_DEV_LINK(dev) (priv_link_t*)(dev->priv) #else #define WL_DEV_LINK(dev) (priv_link_t*)netdev_priv(dev) #endif /* dev to wl_iw_t */ #define IW_DEV_IF(dev) ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw) static void swap_key_from_BE( wl_wsec_key_t *key ) { key->index = htod32(key->index); key->len = htod32(key->len); key->algo = htod32(key->algo); key->flags = htod32(key->flags); key->rxiv.hi = htod32(key->rxiv.hi); key->rxiv.lo = htod16(key->rxiv.lo); key->iv_initialized = htod32(key->iv_initialized); } static void swap_key_to_BE( wl_wsec_key_t *key ) { key->index = dtoh32(key->index); key->len = dtoh32(key->len); key->algo = dtoh32(key->algo); key->flags = dtoh32(key->flags); key->rxiv.hi = dtoh32(key->rxiv.hi); key->rxiv.lo = dtoh16(key->rxiv.lo); key->iv_initialized = dtoh32(key->iv_initialized); } static int dev_wlc_ioctl( struct net_device *dev, int cmd, void *arg, int len ) { struct ifreq ifr; wl_ioctl_t ioc; mm_segment_t fs; int ret; memset(&ioc, 0, sizeof(ioc)); ioc.cmd = cmd; ioc.buf = arg; ioc.len = len; strcpy(ifr.ifr_name, dev->name); ifr.ifr_data = (caddr_t) &ioc; #ifndef LINUX_HYBRID /* Causes an extraneous 'up'. If specific ioctls are failing due to device down, then we can investigate those ioctls. */ dev_open(dev); #endif fs = get_fs(); set_fs(get_ds()); #if defined(WL_USE_NETDEV_OPS) ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE); #else ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE); #endif set_fs(fs); return ret; } /* set named driver variable to int value and return error indication calling example: dev_wlc_intvar_set(dev, "arate", rate) */ static int dev_wlc_intvar_set( struct net_device *dev, char *name, int val) { char buf[WLC_IOCTL_SMLEN]; uint len; val = htod32(val); len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf)); ASSERT(len); return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len)); } static int dev_iw_iovar_setbuf( struct net_device *dev, char *iovar, void *param, int paramlen, void *bufptr, int buflen) { int iolen; iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); ASSERT(iolen); BCM_REFERENCE(iolen); return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen)); } static int dev_iw_iovar_getbuf( struct net_device *dev, char *iovar, void *param, int paramlen, void *bufptr, int buflen) { int iolen; iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); ASSERT(iolen); BCM_REFERENCE(iolen); return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen)); } #if WIRELESS_EXT > 17 static int dev_wlc_bufvar_set( struct net_device *dev, char *name, char *buf, int len) { char *ioctlbuf; uint buflen; int error; ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL); if (!ioctlbuf) return -ENOMEM; buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN); ASSERT(buflen); error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen); kfree(ioctlbuf); return error; } #endif /* WIRELESS_EXT > 17 */ /* get named driver variable to int value and return error indication calling example: dev_wlc_bufvar_get(dev, "arate", &rate) */ static int dev_wlc_bufvar_get( struct net_device *dev, char *name, char *buf, int buflen) { char *ioctlbuf; int error; uint len; ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL); if (!ioctlbuf) return -ENOMEM; len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN); ASSERT(len); BCM_REFERENCE(len); error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN); if (!error) bcopy(ioctlbuf, buf, buflen); kfree(ioctlbuf); return (error); } /* get named driver variable to int value and return error indication calling example: dev_wlc_intvar_get(dev, "arate", &rate) */ static int dev_wlc_intvar_get( struct net_device *dev, char *name, int *retval) { union { char buf[WLC_IOCTL_SMLEN]; int val; } var; int error; uint len; uint data_null; len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf)); ASSERT(len); error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len); *retval = dtoh32(var.val); return (error); } /* Maintain backward compatibility */ #if WIRELESS_EXT < 13 struct iw_request_info { __u16 cmd; /* Wireless Extension command */ __u16 flags; /* More to come ;-) */ }; typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info, void *wrqu, char *extra); #endif /* WIRELESS_EXT < 13 */ #if WIRELESS_EXT > 12 static int wl_iw_set_leddc( struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra ) { int dc = *(int *)extra; int error; error = dev_wlc_intvar_set(dev, "leddc", dc); return error; } static int wl_iw_set_vlanmode( struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra ) { int mode = *(int *)extra; int error; mode = htod32(mode); error = dev_wlc_intvar_set(dev, "vlan_mode", mode); return error; } static int wl_iw_set_pm( struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra ) { int pm = *(int *)extra; int error; pm = htod32(pm); error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)); return error; } #if WIRELESS_EXT > 17 #endif /* WIRELESS_EXT > 17 */ #endif /* WIRELESS_EXT > 12 */ int wl_iw_send_priv_event( struct net_device *dev, char *flag ) { union iwreq_data wrqu; char extra[IW_CUSTOM_MAX + 1]; int cmd; cmd = IWEVCUSTOM; memset(&wrqu, 0, sizeof(wrqu)); if (strlen(flag) > sizeof(extra)) return -1; strcpy(extra, flag); wrqu.data.length = strlen(extra); wireless_send_event(dev, cmd, &wrqu, extra); WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra)); return 0; } static int wl_iw_config_commit( struct net_device *dev, struct iw_request_info *info, void *zwrq, char *extra ) { wlc_ssid_t ssid; int error; struct sockaddr bssid; WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name)); if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) return error; ssid.SSID_len = dtoh32(ssid.SSID_len); if (!ssid.SSID_len) return 0; bzero(&bssid, sizeof(struct sockaddr)); if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) { WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error)); return error; } return 0; } static int wl_iw_get_name( struct net_device *dev, struct iw_request_info *info, union iwreq_data *cwrq, char *extra ) { int phytype, err; uint band[3]; char cap[5]; WL_TRACE(("%s: SIOCGIWNAME\n", dev->name)); cap[0] = 0; if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0) goto done; if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0) goto done; band[0] = dtoh32(band[0]); switch (phytype) { case WLC_PHY_TYPE_A: strcpy(cap, "a"); break; case WLC_PHY_TYPE_B: strcpy(cap, "b"); break; case WLC_PHY_TYPE_LP: case WLC_PHY_TYPE_G: if (band[0] >= 2) strcpy(cap, "abg"); else strcpy(cap, "bg"); break; case WLC_PHY_TYPE_N: if (band[0] >= 2) strcpy(cap, "abgn"); else strcpy(cap, "bgn"); break; } done: snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap); return 0; } static int wl_iw_set_freq( struct net_device *dev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra ) { int error, chan; uint sf = 0; WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name)); /* Setting by channel number */ if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) { chan = fwrq->m; } /* Setting by frequency */ else { /* Convert to MHz as best we can */ if (fwrq->e >= 6) { fwrq->e -= 6; while (fwrq->e--) fwrq->m *= 10; } else if (fwrq->e < 6) { while (fwrq->e++ < 6) fwrq->m /= 10; } /* handle 4.9GHz frequencies as Japan 4 GHz based channelization */ if (fwrq->m > 4000 && fwrq->m < 5000) sf = WF_CHAN_FACTOR_4_G; /* start factor for 4 GHz */ chan = wf_mhz2channel(fwrq->m, sf); } chan = htod32(chan); if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) return error; /* -EINPROGRESS: Call commit handler */ return -EINPROGRESS; } static int wl_iw_get_freq( struct net_device *dev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra ) { channel_info_t ci; int error; WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name)); if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) return error; /* Return radio channel in channel form */ fwrq->m = dtoh32(ci.hw_channel); fwrq->e = dtoh32(0); return 0; } static int wl_iw_set_mode( struct net_device *dev, struct iw_request_info *info, __u32 *uwrq, char *extra ) { int infra = 0, ap = 0, error = 0; WL_TRACE(("%s: SIOCSIWMODE\n", dev->name)); switch (*uwrq) { case IW_MODE_MASTER: infra = ap = 1; break; case IW_MODE_ADHOC: case IW_MODE_AUTO: break; case IW_MODE_INFRA: infra = 1; break; default: return -EINVAL; } infra = htod32(infra); ap = htod32(ap); if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) || (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap)))) return error; /* -EINPROGRESS: Call commit handler */ return -EINPROGRESS; } static int wl_iw_get_mode( struct net_device *dev, struct iw_request_info *info, __u32 *uwrq, char *extra ) { int error, infra = 0, ap = 0; WL_TRACE(("%s: SIOCGIWMODE\n", dev->name)); if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) || (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap)))) return error; infra = dtoh32(infra); ap = dtoh32(ap); *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC; return 0; } static int wl_iw_get_range( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { struct iw_range *range = (struct iw_range *) extra; static int channels[MAXCHANNEL+1]; wl_uint32_list_t *list = (wl_uint32_list_t *) channels; wl_rateset_t rateset; int error, i, k; uint sf, ch; int phytype; int bw_cap = 0, sgi_tx = 0, nmode = 0; channel_info_t ci; uint8 nrate_list2copy = 0; uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130}, {14, 29, 43, 58, 87, 116, 130, 144}, {27, 54, 81, 108, 162, 216, 243, 270}, {30, 60, 90, 120, 180, 240, 270, 300}}; int fbt_cap = 0; WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name)); if (!extra) return -EINVAL; dwrq->length = sizeof(struct iw_range); memset(range, 0, sizeof(*range)); /* We don't use nwids */ range->min_nwid = range->max_nwid = 0; /* Set available channels/frequencies */ list->count = htod32(MAXCHANNEL); if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels)))) return error; for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) { range->freq[i].i = dtoh32(list->element[i]); ch = dtoh32(list->element[i]); if (ch <= CH_MAX_2G_CHANNEL) sf = WF_CHAN_FACTOR_2_4_G; else sf = WF_CHAN_FACTOR_5_G; range->freq[i].m = wf_channel2mhz(ch, sf); range->freq[i].e = 6; } range->num_frequency = range->num_channels = i; /* Link quality (use NDIS cutoffs) */ range->max_qual.qual = 5; /* Signal level (use RSSI) */ range->max_qual.level = 0x100 - 200; /* -200 dBm */ /* Noise level (use noise) */ range->max_qual.noise = 0x100 - 200; /* -200 dBm */ /* Signal level threshold range (?) */ range->sensitivity = 65535; #if WIRELESS_EXT > 11 /* Link quality (use NDIS cutoffs) */ range->avg_qual.qual = 3; /* Signal level (use RSSI) */ range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD; /* Noise level (use noise) */ range->avg_qual.noise = 0x100 - 75; /* -75 dBm */ #endif /* WIRELESS_EXT > 11 */ /* Set available bitrates */ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) return error; rateset.count = dtoh32(rateset.count); range->num_bitrates = rateset.count; for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++) range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000; /* convert to bps */ if ((error = dev_wlc_intvar_get(dev, "nmode", &nmode))) return error; if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype)))) return error; if (nmode == 1 && ((phytype == WLC_PHY_TYPE_SSN) || (phytype == WLC_PHY_TYPE_LCN) || (phytype == WLC_PHY_TYPE_LCN40))) { if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap))) return error; if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx))) return error; if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t)))) return error; ci.hw_channel = dtoh32(ci.hw_channel); if (bw_cap == 0 || (bw_cap == 2 && ci.hw_channel <= 14)) { if (sgi_tx == 0) nrate_list2copy = 0; else nrate_list2copy = 1; } if (bw_cap == 1 || (bw_cap == 2 && ci.hw_channel >= 36)) { if (sgi_tx == 0) nrate_list2copy = 2; else nrate_list2copy = 3; } range->num_bitrates += 8; ASSERT(range->num_bitrates < IW_MAX_BITRATES); for (k = 0; i < range->num_bitrates; k++, i++) { /* convert to bps */ range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000; } } /* Set an indication of the max TCP throughput * in bit/s that we can expect using this interface. * May be use for QoS stuff... Jean II */ if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i)))) return error; i = dtoh32(i); if (i == WLC_PHY_TYPE_A) range->throughput = 24000000; /* 24 Mbits/s */ else range->throughput = 1500000; /* 1.5 Mbits/s */ /* RTS and fragmentation thresholds */ range->min_rts = 0; range->max_rts = 2347; range->min_frag = 256; range->max_frag = 2346; range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS; range->num_encoding_sizes = 4; range->encoding_size[0] = WEP1_KEY_SIZE; range->encoding_size[1] = WEP128_KEY_SIZE; #if WIRELESS_EXT > 17 range->encoding_size[2] = TKIP_KEY_SIZE; #else range->encoding_size[2] = 0; #endif range->encoding_size[3] = AES_KEY_SIZE; /* Do not support power micro-management */ range->min_pmp = 0; range->max_pmp = 0; range->min_pmt = 0; range->max_pmt = 0; range->pmp_flags = 0; range->pm_capa = 0; /* Transmit Power - values are in mW */ range->num_txpower = 2; range->txpower[0] = 1; range->txpower[1] = 255; range->txpower_capa = IW_TXPOW_MWATT; #if WIRELESS_EXT > 10 range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 19; /* Only support retry limits */ range->retry_capa = IW_RETRY_LIMIT; range->retry_flags = IW_RETRY_LIMIT; range->r_time_flags = 0; /* SRL and LRL limits */ range->min_retry = 1; range->max_retry = 255; /* Retry lifetime limits unsupported */ range->min_r_time = 0; range->max_r_time = 0; #endif /* WIRELESS_EXT > 10 */ #if WIRELESS_EXT > 17 range->enc_capa = IW_ENC_CAPA_WPA; range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP; range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP; range->enc_capa |= IW_ENC_CAPA_WPA2; /* Determine driver FBT capability. */ if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) { if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) { /* Tell the host (e.g. wpa_supplicant) to let driver do the handshake */ range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE; } } #ifdef BCMFW_ROAM_ENABLE_WEXT /* Advertise firmware roam capability to the external supplicant */ range->enc_capa |= IW_ENC_CAPA_FW_ROAM_ENABLE; #endif /* BCMFW_ROAM_ENABLE_WEXT */ /* Event capability (kernel) */ IW_EVENT_CAPA_SET_KERNEL(range->event_capa); /* Event capability (driver) */ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP); IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE); IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE); IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE); IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND); #if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID) /* FC7 wireless.h defines EXT 22 but doesn't define scan_capa bits */ range->scan_capa = IW_SCAN_CAPA_ESSID; #endif #endif /* WIRELESS_EXT > 17 */ return 0; } static int rssi_to_qual(int rssi) { if (rssi <= WL_IW_RSSI_NO_SIGNAL) return 0; else if (rssi <= WL_IW_RSSI_VERY_LOW) return 1; else if (rssi <= WL_IW_RSSI_LOW) return 2; else if (rssi <= WL_IW_RSSI_GOOD) return 3; else if (rssi <= WL_IW_RSSI_VERY_GOOD) return 4; else return 5; } static int wl_iw_set_spy( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_iw_t *iw = IW_DEV_IF(dev); struct sockaddr *addr = (struct sockaddr *) extra; int i; WL_TRACE(("%s: SIOCSIWSPY\n", dev->name)); if (!extra) return -EINVAL; iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length); for (i = 0; i < iw->spy_num; i++) memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN); memset(iw->spy_qual, 0, sizeof(iw->spy_qual)); return 0; } static int wl_iw_get_spy( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_iw_t *iw = IW_DEV_IF(dev); struct sockaddr *addr = (struct sockaddr *) extra; struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num]; int i; WL_TRACE(("%s: SIOCGIWSPY\n", dev->name)); if (!extra) return -EINVAL; dwrq->length = iw->spy_num; for (i = 0; i < iw->spy_num; i++) { memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN); addr[i].sa_family = AF_UNIX; memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality)); iw->spy_qual[i].updated = 0; } return 0; } static int wl_iw_set_wap( struct net_device *dev, struct iw_request_info *info, struct sockaddr *awrq, char *extra ) { int error = -EINVAL; WL_TRACE(("%s: SIOCSIWAP\n", dev->name)); if (awrq->sa_family != ARPHRD_ETHER) { WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__)); return -EINVAL; } /* Ignore "auto" or "off" */ if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) { scb_val_t scbval; bzero(&scbval, sizeof(scb_val_t)); if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) { WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error)); } return 0; } /* WL_ASSOC(("Assoc to %s\n", bcm_ether_ntoa((struct ether_addr *)&(awrq->sa_data), * eabuf))); */ /* Reassociate to the specified AP */ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) { WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error)); return error; } return 0; } static int wl_iw_get_wap( struct net_device *dev, struct iw_request_info *info, struct sockaddr *awrq, char *extra ) { WL_TRACE(("%s: SIOCGIWAP\n", dev->name)); awrq->sa_family = ARPHRD_ETHER; memset(awrq->sa_data, 0, ETHER_ADDR_LEN); /* Ignore error (may be down or disassociated) */ (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN); return 0; } #if WIRELESS_EXT > 17 static int wl_iw_mlme( struct net_device *dev, struct iw_request_info *info, struct sockaddr *awrq, char *extra ) { struct iw_mlme *mlme; scb_val_t scbval; int error = -EINVAL; WL_TRACE(("%s: SIOCSIWMLME\n", dev->name)); mlme = (struct iw_mlme *)extra; if (mlme == NULL) { WL_ERROR(("Invalid ioctl data.\n")); return error; } scbval.val = mlme->reason_code; bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN); if (mlme->cmd == IW_MLME_DISASSOC) { scbval.val = htod32(scbval.val); error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)); } else if (mlme->cmd == IW_MLME_DEAUTH) { scbval.val = htod32(scbval.val); error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, sizeof(scb_val_t)); } else { WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__)); return error; } return error; } #endif /* WIRELESS_EXT > 17 */ static int wl_iw_get_aplist( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_scan_results_t *list; struct sockaddr *addr = (struct sockaddr *) extra; struct iw_quality qual[IW_MAX_AP]; wl_bss_info_t *bi = NULL; int error, i; uint buflen = dwrq->length; WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); if (!extra) return -EINVAL; /* Get scan results (too large to put on the stack) */ list = kmalloc(buflen, GFP_KERNEL); if (!list) return -ENOMEM; memset(list, 0, buflen); list->buflen = htod32(buflen); if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) { WL_ERROR(("%d: Scan results error %d\n", __LINE__, error)); kfree(list); return error; } list->buflen = dtoh32(list->buflen); list->version = dtoh32(list->version); list->count = dtoh32(list->count); ASSERT(list->version == WL_BSS_INFO_VERSION); for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + buflen)); /* Infrastructure only */ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) continue; /* BSSID */ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); addr[dwrq->length].sa_family = ARPHRD_ETHER; qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); qual[dwrq->length].noise = 0x100 + bi->phy_noise; /* Updated qual, level, and noise */ #if WIRELESS_EXT > 18 qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; #else qual[dwrq->length].updated = 7; #endif /* WIRELESS_EXT > 18 */ dwrq->length++; } kfree(list); if (dwrq->length) { memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); /* Provided qual */ dwrq->flags = 1; } return 0; } static int wl_iw_iscan_get_aplist( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_scan_results_t *list; iscan_buf_t * buf; iscan_info_t *iscan = g_iscan; struct sockaddr *addr = (struct sockaddr *) extra; struct iw_quality qual[IW_MAX_AP]; wl_bss_info_t *bi = NULL; int i; WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); if (!extra) return -EINVAL; if ((!iscan) || (iscan->sysioc_pid < 0)) { return wl_iw_get_aplist(dev, info, dwrq, extra); } buf = iscan->list_hdr; /* Get scan results (too large to put on the stack) */ while (buf) { list = &((wl_iscan_results_t*)buf->iscan_buf)->results; ASSERT(list->version == WL_BSS_INFO_VERSION); bi = NULL; for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + WLC_IW_ISCAN_MAXLEN)); /* Infrastructure only */ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) continue; /* BSSID */ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); addr[dwrq->length].sa_family = ARPHRD_ETHER; qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); qual[dwrq->length].noise = 0x100 + bi->phy_noise; /* Updated qual, level, and noise */ #if WIRELESS_EXT > 18 qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; #else qual[dwrq->length].updated = 7; #endif /* WIRELESS_EXT > 18 */ dwrq->length++; } buf = buf->next; } if (dwrq->length) { memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); /* Provided qual */ dwrq->flags = 1; } return 0; } #if WIRELESS_EXT > 13 static int wl_iw_set_scan( struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra ) { wlc_ssid_t ssid; WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name)); /* default Broadcast scan */ memset(&ssid, 0, sizeof(ssid)); #if WIRELESS_EXT > 17 /* check for given essid */ if (wrqu->data.length == sizeof(struct iw_scan_req)) { if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { struct iw_scan_req *req = (struct iw_scan_req *)extra; ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); memcpy(ssid.SSID, req->essid, ssid.SSID_len); ssid.SSID_len = htod32(ssid.SSID_len); } } #endif /* Ignore error (most likely scan in progress) */ (void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid)); return 0; } static int wl_iw_iscan_set_scan( struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra ) { wlc_ssid_t ssid; iscan_info_t *iscan = g_iscan; WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name)); /* use backup if our thread is not successful */ if ((!iscan) || (iscan->sysioc_pid < 0)) { return wl_iw_set_scan(dev, info, wrqu, extra); } if (iscan->iscan_state == ISCAN_STATE_SCANING) { return 0; } /* default Broadcast scan */ memset(&ssid, 0, sizeof(ssid)); #if WIRELESS_EXT > 17 /* check for given essid */ if (wrqu->data.length == sizeof(struct iw_scan_req)) { if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { struct iw_scan_req *req = (struct iw_scan_req *)extra; ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); memcpy(ssid.SSID, req->essid, ssid.SSID_len); ssid.SSID_len = htod32(ssid.SSID_len); } } #endif iscan->list_cur = iscan->list_hdr; iscan->iscan_state = ISCAN_STATE_SCANING; wl_iw_set_event_mask(dev); wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START); iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); add_timer(&iscan->timer); iscan->timer_on = 1; return 0; } #if WIRELESS_EXT > 17 static bool ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len) { /* Is this body of this tlvs entry a WPA entry? If */ /* not update the tlvs buffer pointer/length */ uint8 *ie = *wpaie; /* If the contents match the WPA_OUI and type=1 */ if ((ie[1] >= 6) && !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) { return TRUE; } /* point to the next ie */ ie += ie[1] + 2; /* calculate the length of the rest of the buffer */ *tlvs_len -= (int)(ie - *tlvs); /* update the pointer to the start of the buffer */ *tlvs = ie; return FALSE; } static bool ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len) { /* Is this body of this tlvs entry a WPS entry? If */ /* not update the tlvs buffer pointer/length */ uint8 *ie = *wpsie; /* If the contents match the WPA_OUI and type=4 */ if ((ie[1] >= 4) && !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) { return TRUE; } /* point to the next ie */ ie += ie[1] + 2; /* calculate the length of the rest of the buffer */ *tlvs_len -= (int)(ie - *tlvs); /* update the pointer to the start of the buffer */ *tlvs = ie; return FALSE; } #endif /* WIRELESS_EXT > 17 */ #ifdef BCMWAPI_WPI static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len, int uppercase) { size_t i; char *pos = buf, *end = buf + buf_size; int ret; if (buf_size == 0) return 0; for (i = 0; i < len; i++) { ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x", data[i]); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return pos - buf; } pos += ret; } end[-1] = '\0'; return pos - buf; } /** * wpa_snprintf_hex - Print data as a hex string into a buffer * @buf: Memory area to use as the output buffer * @buf_size: Maximum buffer size in bytes (should be at least 2 * len + 1) * @data: Data to be printed * @len: Length of data in bytes * Returns: Number of bytes written */ static int wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len) { return _wpa_snprintf_hex(buf, buf_size, data, len, 0); } #endif /* BCMWAPI_WPI */ static int wl_iw_handle_scanresults_ies(char **event_p, char *end, struct iw_request_info *info, wl_bss_info_t *bi) { #if WIRELESS_EXT > 17 struct iw_event iwe; char *event; #ifdef BCMWAPI_WPI char *buf; int custom_event_len; #endif event = *event_p; if (bi->ie_length) { /* look for wpa/rsn ies in the ie list... */ bcm_tlv_t *ie; uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); int ptr_len = bi->ie_length; if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); } ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); } ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { /* look for WPS IE */ if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); break; } } ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); ptr_len = bi->ie_length; while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); break; } } #ifdef BCMWAPI_WPI ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); ptr_len = bi->ie_length; while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) { WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__)); #ifdef WAPI_IE_USE_GENIE iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); #else /* using CUSTOM event */ iwe.cmd = IWEVCUSTOM; custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2); iwe.u.data.length = custom_event_len; buf = kmalloc(custom_event_len+1, GFP_KERNEL); if (buf == NULL) { WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len)); break; } memcpy(buf, "wapi_ie=", 8); wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1); wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1); wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len); event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf); kfree(buf); #endif /* WAPI_IE_USE_GENIE */ break; } #endif /* BCMWAPI_WPI */ *event_p = event; } #endif /* WIRELESS_EXT > 17 */ return 0; } static int wl_iw_get_scan( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { channel_info_t ci; wl_scan_results_t *list; struct iw_event iwe; wl_bss_info_t *bi = NULL; int error, i, j; char *event = extra, *end = extra + dwrq->length, *value; uint buflen = dwrq->length; WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name)); if (!extra) return -EINVAL; /* Check for scan in progress */ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) return error; ci.scan_channel = dtoh32(ci.scan_channel); if (ci.scan_channel) return -EAGAIN; /* Get scan results (too large to put on the stack) */ list = kmalloc(buflen, GFP_KERNEL); if (!list) return -ENOMEM; memset(list, 0, buflen); list->buflen = htod32(buflen); if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) { kfree(list); return error; } list->buflen = dtoh32(list->buflen); list->version = dtoh32(list->version); list->count = dtoh32(list->count); ASSERT(list->version == WL_BSS_INFO_VERSION); for (i = 0; i < list->count && i < IW_MAX_AP; i++) { bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + buflen)); /* First entry must be the BSSID */ iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); /* SSID */ iwe.u.data.length = dtoh32(bi->SSID_len); iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); /* Mode */ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { iwe.cmd = SIOCGIWMODE; if (dtoh16(bi->capability) & DOT11_CAP_ESS) iwe.u.mode = IW_MODE_INFRA; else iwe.u.mode = IW_MODE_ADHOC; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); } /* Channel */ iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ? WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); iwe.u.freq.e = 6; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); /* Channel quality */ iwe.cmd = IWEVQUAL; iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); iwe.u.qual.noise = 0x100 + bi->phy_noise; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); /* WPA, WPA2, WPS, WAPI IEs */ wl_iw_handle_scanresults_ies(&event, end, info, bi); /* Encryption */ iwe.cmd = SIOCGIWENCODE; if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); /* Rates */ if (bi->rateset.count) { value = event + IW_EV_LCP_LEN; iwe.cmd = SIOCGIWRATE; /* Those two flags are ignored... */ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, IW_EV_PARAM_LEN); } event = value; } } kfree(list); dwrq->length = event - extra; dwrq->flags = 0; /* todo */ return 0; } static int wl_iw_iscan_get_scan( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_scan_results_t *list; struct iw_event iwe; wl_bss_info_t *bi = NULL; int ii, j; int apcnt; char *event = extra, *end = extra + dwrq->length, *value; iscan_info_t *iscan = g_iscan; iscan_buf_t * p_buf; WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name)); if (!extra) return -EINVAL; /* use backup if our thread is not successful */ if ((!iscan) || (iscan->sysioc_pid < 0)) { return wl_iw_get_scan(dev, info, dwrq, extra); } /* Check for scan in progress */ if (iscan->iscan_state == ISCAN_STATE_SCANING) return -EAGAIN; apcnt = 0; p_buf = iscan->list_hdr; /* Get scan results */ while (p_buf != iscan->list_cur) { list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results; if (list->version != WL_BSS_INFO_VERSION) { WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version)); } bi = NULL; for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) { bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + WLC_IW_ISCAN_MAXLEN)); /* overflow check cover fields before wpa IEs */ if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN + IW_EV_QUAL_LEN >= end) return -E2BIG; /* First entry must be the BSSID */ iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); /* SSID */ iwe.u.data.length = dtoh32(bi->SSID_len); iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); /* Mode */ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { iwe.cmd = SIOCGIWMODE; if (dtoh16(bi->capability) & DOT11_CAP_ESS) iwe.u.mode = IW_MODE_INFRA; else iwe.u.mode = IW_MODE_ADHOC; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); } /* Channel */ iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ? WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); iwe.u.freq.e = 6; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); /* Channel quality */ iwe.cmd = IWEVQUAL; iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); iwe.u.qual.noise = 0x100 + bi->phy_noise; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); /* WPA, WPA2, WPS, WAPI IEs */ wl_iw_handle_scanresults_ies(&event, end, info, bi); /* Encryption */ iwe.cmd = SIOCGIWENCODE; if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); /* Rates */ if (bi->rateset.count <= sizeof(bi->rateset.rates)) { if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) return -E2BIG; value = event + IW_EV_LCP_LEN; iwe.cmd = SIOCGIWRATE; /* Those two flags are ignored... */ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, IW_EV_PARAM_LEN); } event = value; } } p_buf = p_buf->next; } /* while (p_buf) */ dwrq->length = event - extra; dwrq->flags = 0; /* todo */ return 0; } #endif /* WIRELESS_EXT > 13 */ static int wl_iw_set_essid( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wlc_ssid_t ssid; int error; WL_TRACE(("%s: SIOCSIWESSID\n", dev->name)); /* default Broadcast SSID */ memset(&ssid, 0, sizeof(ssid)); if (dwrq->length && extra) { #if WIRELESS_EXT > 20 ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length); #else ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1); #endif memcpy(ssid.SSID, extra, ssid.SSID_len); ssid.SSID_len = htod32(ssid.SSID_len); if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid)))) return error; } /* If essid null then it is "iwconfig <interface> essid off" command */ else { scb_val_t scbval; bzero(&scbval, sizeof(scb_val_t)); if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) return error; } return 0; } static int wl_iw_get_essid( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wlc_ssid_t ssid; int error; WL_TRACE(("%s: SIOCGIWESSID\n", dev->name)); if (!extra) return -EINVAL; if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) { WL_ERROR(("Error getting the SSID\n")); return error; } ssid.SSID_len = dtoh32(ssid.SSID_len); /* Get the current SSID */ memcpy(extra, ssid.SSID, ssid.SSID_len); dwrq->length = ssid.SSID_len; dwrq->flags = 1; /* active */ return 0; } static int wl_iw_set_nick( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_iw_t *iw = IW_DEV_IF(dev); WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name)); if (!extra) return -EINVAL; /* Check the size of the string */ if (dwrq->length > sizeof(iw->nickname)) return -E2BIG; memcpy(iw->nickname, extra, dwrq->length); iw->nickname[dwrq->length - 1] = '\0'; return 0; } static int wl_iw_get_nick( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_iw_t *iw = IW_DEV_IF(dev); WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name)); if (!extra) return -EINVAL; strcpy(extra, iw->nickname); dwrq->length = strlen(extra) + 1; return 0; } static int wl_iw_set_rate( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { wl_rateset_t rateset; int error, rate, i, error_bg, error_a; WL_TRACE(("%s: SIOCSIWRATE\n", dev->name)); /* Get current rateset */ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) return error; rateset.count = dtoh32(rateset.count); if (vwrq->value < 0) { /* Select maximum rate */ rate = rateset.rates[rateset.count - 1] & 0x7f; } else if (vwrq->value < rateset.count) { /* Select rate by rateset index */ rate = rateset.rates[vwrq->value] & 0x7f; } else { /* Specified rate in bps */ rate = vwrq->value / 500000; } if (vwrq->fixed) { /* Set rate override, Since the is a/b/g-blind, both a/bg_rate are enforced. */ error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate); error_a = dev_wlc_intvar_set(dev, "a_rate", rate); if (error_bg && error_a) return (error_bg | error_a); } else { /* clear rate override Since the is a/b/g-blind, both a/bg_rate are enforced. */ /* 0 is for clearing rate override */ error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0); /* 0 is for clearing rate override */ error_a = dev_wlc_intvar_set(dev, "a_rate", 0); if (error_bg && error_a) return (error_bg | error_a); /* Remove rates above selected rate */ for (i = 0; i < rateset.count; i++) if ((rateset.rates[i] & 0x7f) > rate) break; rateset.count = htod32(i); /* Set current rateset */ if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset)))) return error; } return 0; } static int wl_iw_get_rate( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, rate; WL_TRACE(("%s: SIOCGIWRATE\n", dev->name)); /* Report the current tx rate */ if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate)))) return error; rate = dtoh32(rate); vwrq->value = rate * 500000; return 0; } static int wl_iw_set_rts( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, rts; WL_TRACE(("%s: SIOCSIWRTS\n", dev->name)); if (vwrq->disabled) rts = DOT11_DEFAULT_RTS_LEN; else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN) return -EINVAL; else rts = vwrq->value; if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts))) return error; return 0; } static int wl_iw_get_rts( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, rts; WL_TRACE(("%s: SIOCGIWRTS\n", dev->name)); if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts))) return error; vwrq->value = rts; vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN); vwrq->fixed = 1; return 0; } static int wl_iw_set_frag( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, frag; WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name)); if (vwrq->disabled) frag = DOT11_DEFAULT_FRAG_LEN; else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN) return -EINVAL; else frag = vwrq->value; if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag))) return error; return 0; } static int wl_iw_get_frag( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, fragthreshold; WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name)); if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold))) return error; vwrq->value = fragthreshold; vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN); vwrq->fixed = 1; return 0; } static int wl_iw_set_txpow( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, disable; uint16 txpwrmw; WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name)); /* Make sure radio is off or on as far as software is concerned */ disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0; disable += WL_RADIO_SW_DISABLE << 16; disable = htod32(disable); if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable)))) return error; /* If Radio is off, nothing more to do */ if (disable & WL_RADIO_SW_DISABLE) return 0; /* Only handle mW */ if (!(vwrq->flags & IW_TXPOW_MWATT)) return -EINVAL; /* Value < 0 means just "on" or "off" */ if (vwrq->value < 0) return 0; if (vwrq->value > 0xffff) txpwrmw = 0xffff; else txpwrmw = (uint16)vwrq->value; error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw))); return error; } static int wl_iw_get_txpow( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, disable, txpwrdbm; uint8 result; WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name)); if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) || (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm))) return error; disable = dtoh32(disable); result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE); vwrq->value = (int32)bcm_qdbm_to_mw(result); vwrq->fixed = 0; vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0; vwrq->flags = IW_TXPOW_MWATT; return 0; } #if WIRELESS_EXT > 10 static int wl_iw_set_retry( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, lrl, srl; WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name)); /* Do not handle "off" or "lifetime" */ if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME)) return -EINVAL; /* Handle "[min|max] limit" */ if (vwrq->flags & IW_RETRY_LIMIT) { /* "max limit" or just "limit" */ #if WIRELESS_EXT > 20 if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) || !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) { #else if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) { #endif /* WIRELESS_EXT > 20 */ lrl = htod32(vwrq->value); if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl)))) return error; } /* "min limit" or just "limit" */ #if WIRELESS_EXT > 20 if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) || !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) { #else if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) { #endif /* WIRELESS_EXT > 20 */ srl = htod32(vwrq->value); if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl)))) return error; } } return 0; } static int wl_iw_get_retry( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, lrl, srl; WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name)); vwrq->disabled = 0; /* Can't be disabled */ /* Do not handle lifetime queries */ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) return -EINVAL; /* Get retry limits */ if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) || (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl)))) return error; lrl = dtoh32(lrl); srl = dtoh32(srl); /* Note : by default, display the min retry number */ if (vwrq->flags & IW_RETRY_MAX) { vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX; vwrq->value = lrl; } else { vwrq->flags = IW_RETRY_LIMIT; vwrq->value = srl; if (srl != lrl) vwrq->flags |= IW_RETRY_MIN; } return 0; } #endif /* WIRELESS_EXT > 10 */ static int wl_iw_set_encode( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_wsec_key_t key; int error, val, wsec; WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name)); memset(&key, 0, sizeof(key)); if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { /* Find the current key */ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { val = htod32(key.index); if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) return error; val = dtoh32(val); if (val) break; } /* Default to 0 */ if (key.index == DOT11_MAX_DEFAULT_KEYS) key.index = 0; } else { key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; if (key.index >= DOT11_MAX_DEFAULT_KEYS) return -EINVAL; } /* Interpret "off" to mean no encryption */ wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED; if ((error = dev_wlc_intvar_set(dev, "wsec", wsec))) return error; /* Old API used to pass a NULL pointer instead of IW_ENCODE_NOKEY */ if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) { /* Just select a new current key */ val = htod32(key.index); if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val)))) return error; } else { key.len = dwrq->length; if (dwrq->length > sizeof(key.data)) return -EINVAL; memcpy(key.data, extra, dwrq->length); key.flags = WL_PRIMARY_KEY; switch (key.len) { case WEP1_KEY_SIZE: key.algo = CRYPTO_ALGO_WEP1; break; case WEP128_KEY_SIZE: key.algo = CRYPTO_ALGO_WEP128; break; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) case TKIP_KEY_SIZE: key.algo = CRYPTO_ALGO_TKIP; break; #endif case AES_KEY_SIZE: key.algo = CRYPTO_ALGO_AES_CCM; break; default: return -EINVAL; } /* Set the new key/index */ swap_key_from_BE(&key); if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)))) return error; } /* Interpret "restricted" to mean shared key authentication */ val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0; val = htod32(val); if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val)))) return error; return 0; } static int wl_iw_get_encode( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_wsec_key_t key; int error, val, wsec, auth; WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name)); /* assure default values of zero for things we don't touch */ bzero(&key, sizeof(wl_wsec_key_t)); if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { /* Find the current key */ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { val = key.index; if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) return error; val = dtoh32(val); if (val) break; } } else key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; if (key.index >= DOT11_MAX_DEFAULT_KEYS) key.index = 0; /* Get info */ if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) || (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth)))) return error; swap_key_to_BE(&key); wsec = dtoh32(wsec); auth = dtoh32(auth); /* Get key length */ dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len); /* Get flags */ dwrq->flags = key.index + 1; if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) { /* Interpret "off" to mean no encryption */ dwrq->flags |= IW_ENCODE_DISABLED; } if (auth) { /* Interpret "restricted" to mean shared key authentication */ dwrq->flags |= IW_ENCODE_RESTRICTED; } /* Get key */ if (dwrq->length && extra) memcpy(extra, key.data, dwrq->length); return 0; } static int wl_iw_set_power( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, pm; WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name)); pm = vwrq->disabled ? PM_OFF : PM_MAX; pm = htod32(pm); if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)))) return error; return 0; } static int wl_iw_get_power( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error, pm; WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name)); if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm)))) return error; pm = dtoh32(pm); vwrq->disabled = pm ? 0 : 1; vwrq->flags = IW_POWER_ALL_R; return 0; } #if WIRELESS_EXT > 17 static int wl_iw_set_wpaie( struct net_device *dev, struct iw_request_info *info, struct iw_point *iwp, char *extra ) { #if defined(BCMWAPI_WPI) uchar buf[WLC_IOCTL_SMLEN] = {0}; uchar *p = buf; int wapi_ie_size; WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name)); if (extra[0] == DOT11_MNG_WAPI_ID) { wapi_ie_size = iwp->length; memcpy(p, extra, iwp->length); dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size); } else #endif dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length); return 0; } static int wl_iw_get_wpaie( struct net_device *dev, struct iw_request_info *info, struct iw_point *iwp, char *extra ) { WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name)); iwp->length = 64; dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length); return 0; } static int wl_iw_set_encodeext( struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra ) { wl_wsec_key_t key; int error; struct iw_encode_ext *iwe; WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name)); memset(&key, 0, sizeof(key)); iwe = (struct iw_encode_ext *)extra; /* disable encryption completely */ if (dwrq->flags & IW_ENCODE_DISABLED) { } /* get the key index */ key.index = 0; if (dwrq->flags & IW_ENCODE_INDEX) key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; key.len = iwe->key_len; /* Instead of bcast for ea address for default wep keys, driver needs it to be Null */ if (!ETHER_ISMULTI(iwe->addr.sa_data)) bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN); /* check for key index change */ if (key.len == 0) { if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { WL_WSEC(("Changing the the primary Key to %d\n", key.index)); /* change the key index .... */ key.index = htod32(key.index); error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &key.index, sizeof(key.index)); if (error) return error; } /* key delete */ else { swap_key_from_BE(&key); error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); if (error) return error; } } /* This case is used to allow an external 802.1x supplicant * to pass the PMK to the in-driver supplicant for use in * the 4-way handshake. */ else if (iwe->alg == IW_ENCODE_ALG_PMK) { int j; wsec_pmk_t pmk; char keystring[WSEC_MAX_PSK_LEN + 1]; char* charptr = keystring; uint len; /* copy the raw hex key to the appropriate format */ for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) { sprintf(charptr, "%02x", iwe->key[j]); charptr += 2; } len = strlen(keystring); pmk.key_len = htod16(len); bcopy(keystring, pmk.key, len); pmk.flags = htod16(WSEC_PASSPHRASE); error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk)); if (error) return error; } else { if (iwe->key_len > sizeof(key.data)) return -EINVAL; WL_WSEC(("Setting the key index %d\n", key.index)); if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { WL_WSEC(("key is a Primary Key\n")); key.flags = WL_PRIMARY_KEY; } bcopy((void *)iwe->key, key.data, iwe->key_len); if (iwe->alg == IW_ENCODE_ALG_TKIP) { uint8 keybuf[8]; bcopy(&key.data[24], keybuf, sizeof(keybuf)); bcopy(&key.data[16], &key.data[24], sizeof(keybuf)); bcopy(keybuf, &key.data[16], sizeof(keybuf)); } /* rx iv */ if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { uchar *ivptr; ivptr = (uchar *)iwe->rx_seq; key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | (ivptr[3] << 8) | ivptr[2]; key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; key.iv_initialized = TRUE; } switch (iwe->alg) { case IW_ENCODE_ALG_NONE: key.algo = CRYPTO_ALGO_OFF; break; case IW_ENCODE_ALG_WEP: if (iwe->key_len == WEP1_KEY_SIZE) key.algo = CRYPTO_ALGO_WEP1; else key.algo = CRYPTO_ALGO_WEP128; break; case IW_ENCODE_ALG_TKIP: key.algo = CRYPTO_ALGO_TKIP; break; case IW_ENCODE_ALG_CCMP: key.algo = CRYPTO_ALGO_AES_CCM; break; #ifdef BCMWAPI_WPI case IW_ENCODE_ALG_SM4: key.algo = CRYPTO_ALGO_SMS4; if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { key.flags &= ~WL_PRIMARY_KEY; } break; #endif default: break; } swap_key_from_BE(&key); dhd_wait_pend8021x(dev); error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); if (error) return error; } return 0; } #if WIRELESS_EXT > 17 struct { pmkid_list_t pmkids; pmkid_t foo[MAXPMKID-1]; } pmkid_list; static int wl_iw_set_pmksa( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { struct iw_pmksa *iwpmksa; uint i; char eabuf[ETHER_ADDR_STR_LEN]; pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid; WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name)); iwpmksa = (struct iw_pmksa *)extra; bzero((char *)eabuf, ETHER_ADDR_STR_LEN); if (iwpmksa->cmd == IW_PMKSA_FLUSH) { WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n")); bzero((char *)&pmkid_list, sizeof(pmkid_list)); } if (iwpmksa->cmd == IW_PMKSA_REMOVE) { pmkid_list_t pmkid, *pmkidptr; pmkidptr = &pmkid; bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN); bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN); { uint j; WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ", bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID, eabuf))); for (j = 0; j < WPA2_PMKID_LEN; j++) WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j])); WL_TRACE(("\n")); } for (i = 0; i < pmkid_list.pmkids.npmkid; i++) if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID, ETHER_ADDR_LEN)) break; for (; i < pmkid_list.pmkids.npmkid; i++) { bcopy(&pmkid_array[i+1].BSSID, &pmkid_array[i].BSSID, ETHER_ADDR_LEN); bcopy(&pmkid_array[i+1].PMKID, &pmkid_array[i].PMKID, WPA2_PMKID_LEN); } pmkid_list.pmkids.npmkid--; } if (iwpmksa->cmd == IW_PMKSA_ADD) { bcopy(&iwpmksa->bssid.sa_data[0], &pmkid_array[pmkid_list.pmkids.npmkid].BSSID, ETHER_ADDR_LEN); bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmkid_list.pmkids.npmkid].PMKID, WPA2_PMKID_LEN); { uint j; uint k; k = pmkid_list.pmkids.npmkid; BCM_REFERENCE(k); WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ", bcm_ether_ntoa(&pmkid_array[k].BSSID, eabuf))); for (j = 0; j < WPA2_PMKID_LEN; j++) WL_TRACE(("%02x ", pmkid_array[k].PMKID[j])); WL_TRACE(("\n")); } pmkid_list.pmkids.npmkid++; } WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmkid_list.pmkids.npmkid)); for (i = 0; i < pmkid_list.pmkids.npmkid; i++) { uint j; WL_TRACE(("PMKID[%d]: %s = ", i, bcm_ether_ntoa(&pmkid_array[i].BSSID, eabuf))); for (j = 0; j < WPA2_PMKID_LEN; j++) WL_TRACE(("%02x ", pmkid_array[i].PMKID[j])); printf("\n"); } WL_TRACE(("\n")); dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list)); return 0; } #endif /* WIRELESS_EXT > 17 */ static int wl_iw_get_encodeext( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name)); return 0; } static int wl_iw_set_wpaauth( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error = 0; int paramid; int paramval; uint32 cipher_combined; int val = 0; wl_iw_t *iw = IW_DEV_IF(dev); WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name)); paramid = vwrq->flags & IW_AUTH_INDEX; paramval = vwrq->value; WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n", dev->name, paramid, paramval)); switch (paramid) { case IW_AUTH_WPA_VERSION: /* supported wpa version disabled or wpa or wpa2 */ if (paramval & IW_AUTH_WPA_VERSION_DISABLED) val = WPA_AUTH_DISABLED; else if (paramval & (IW_AUTH_WPA_VERSION_WPA)) val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; else if (paramval & IW_AUTH_WPA_VERSION_WPA2) val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; #ifdef BCMWAPI_WPI else if (paramval & IW_AUTH_WAPI_VERSION_1) val = WAPI_AUTH_UNSPECIFIED; #endif WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val)); if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) return error; break; case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: { int fbt_cap = 0; if (paramid == IW_AUTH_CIPHER_PAIRWISE) { iw->pwsec = paramval; } else { iw->gwsec = paramval; } if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) return error; cipher_combined = iw->gwsec | iw->pwsec; val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED); if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104)) val |= WEP_ENABLED; if (cipher_combined & IW_AUTH_CIPHER_TKIP) val |= TKIP_ENABLED; if (cipher_combined & IW_AUTH_CIPHER_CCMP) val |= AES_ENABLED; #ifdef BCMWAPI_WPI val &= ~SMS4_ENABLED; if (cipher_combined & IW_AUTH_CIPHER_SMS4) val |= SMS4_ENABLED; #endif if (iw->privacy_invoked && !val) { WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming " "we're a WPS enrollee\n", dev->name, __FUNCTION__)); if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); return error; } } else if (val) { if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); return error; } } if ((error = dev_wlc_intvar_set(dev, "wsec", val))) return error; /* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way * handshake. */ if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) { if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) { if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) { if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1))) return error; } else if (val == 0) { if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0))) return error; } } } break; } case IW_AUTH_KEY_MGMT: if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) return error; if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK)) val = WPA_AUTH_PSK; else val = WPA_AUTH_UNSPECIFIED; if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK)) val |= WPA2_AUTH_FT; } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK)) val = WPA2_AUTH_PSK; else val = WPA2_AUTH_UNSPECIFIED; if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK)) val |= WPA2_AUTH_FT; } #ifdef BCMWAPI_WPI if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT)) val = WAPI_AUTH_UNSPECIFIED; #endif WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) return error; break; case IW_AUTH_TKIP_COUNTERMEASURES: dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1); break; case IW_AUTH_80211_AUTH_ALG: /* open shared */ WL_ERROR(("Setting the D11auth %d\n", paramval)); if (paramval & IW_AUTH_ALG_OPEN_SYSTEM) val = 0; else if (paramval & IW_AUTH_ALG_SHARED_KEY) val = 1; else error = 1; if (!error && (error = dev_wlc_intvar_set(dev, "auth", val))) return error; break; case IW_AUTH_WPA_ENABLED: if (paramval == 0) { val = 0; WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); error = dev_wlc_intvar_set(dev, "wpa_auth", val); return error; } else { /* If WPA is enabled, wpa_auth is set elsewhere */ } break; case IW_AUTH_DROP_UNENCRYPTED: dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)&paramval, 1); break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1); break; #if WIRELESS_EXT > 17 case IW_AUTH_ROAMING_CONTROL: WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); /* driver control or user space app control */ break; case IW_AUTH_PRIVACY_INVOKED: { int wsec; if (paramval == 0) { iw->privacy_invoked = FALSE; if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); return error; } } else { iw->privacy_invoked = TRUE; if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec))) return error; if (!WSEC_ENABLED(wsec)) { /* if privacy is true, but wsec is false, we are a WPS enrollee */ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); return error; } } else { if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); return error; } } } break; } #endif /* WIRELESS_EXT > 17 */ #ifdef BCMWAPI_WPI case IW_AUTH_WAPI_ENABLED: if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) return error; if (paramval) { val |= SMS4_ENABLED; if ((error = dev_wlc_intvar_set(dev, "wsec", val))) { WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n", __FUNCTION__, val, error)); return error; } if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) { WL_ERROR(("%s: setting wpa_auth(%d) returned %d\n", __FUNCTION__, WAPI_AUTH_UNSPECIFIED, error)); return error; } } break; #endif /* BCMWAPI_WPI */ default: break; } return 0; } #define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK)) static int wl_iw_get_wpaauth( struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra ) { int error; int paramid; int paramval = 0; int val; wl_iw_t *iw = IW_DEV_IF(dev); WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name)); paramid = vwrq->flags & IW_AUTH_INDEX; switch (paramid) { case IW_AUTH_WPA_VERSION: /* supported wpa version disabled or wpa or wpa2 */ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) return error; if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED)) paramval = IW_AUTH_WPA_VERSION_DISABLED; else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) paramval = IW_AUTH_WPA_VERSION_WPA; else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) paramval = IW_AUTH_WPA_VERSION_WPA2; break; case IW_AUTH_CIPHER_PAIRWISE: paramval = iw->pwsec; break; case IW_AUTH_CIPHER_GROUP: paramval = iw->gwsec; break; case IW_AUTH_KEY_MGMT: /* psk, 1x */ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) return error; if (VAL_PSK(val)) paramval = IW_AUTH_KEY_MGMT_PSK; else paramval = IW_AUTH_KEY_MGMT_802_1X; break; case IW_AUTH_TKIP_COUNTERMEASURES: dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1); break; case IW_AUTH_DROP_UNENCRYPTED: dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)&paramval, 1); break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1); break; case IW_AUTH_80211_AUTH_ALG: /* open, shared, leap */ if ((error = dev_wlc_intvar_get(dev, "auth", &val))) return error; if (!val) paramval = IW_AUTH_ALG_OPEN_SYSTEM; else paramval = IW_AUTH_ALG_SHARED_KEY; break; case IW_AUTH_WPA_ENABLED: if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) return error; if (val) paramval = TRUE; else paramval = FALSE; break; #if WIRELESS_EXT > 17 case IW_AUTH_ROAMING_CONTROL: WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); /* driver control or user space app control */ break; case IW_AUTH_PRIVACY_INVOKED: paramval = iw->privacy_invoked; break; #endif /* WIRELESS_EXT > 17 */ } vwrq->value = paramval; return 0; } #endif /* WIRELESS_EXT > 17 */ static const iw_handler wl_iw_handler[] = { (iw_handler) wl_iw_config_commit, /* SIOCSIWCOMMIT */ (iw_handler) wl_iw_get_name, /* SIOCGIWNAME */ (iw_handler) NULL, /* SIOCSIWNWID */ (iw_handler) NULL, /* SIOCGIWNWID */ (iw_handler) wl_iw_set_freq, /* SIOCSIWFREQ */ (iw_handler) wl_iw_get_freq, /* SIOCGIWFREQ */ (iw_handler) wl_iw_set_mode, /* SIOCSIWMODE */ (iw_handler) wl_iw_get_mode, /* SIOCGIWMODE */ (iw_handler) NULL, /* SIOCSIWSENS */ (iw_handler) NULL, /* SIOCGIWSENS */ (iw_handler) NULL, /* SIOCSIWRANGE */ (iw_handler) wl_iw_get_range, /* SIOCGIWRANGE */ (iw_handler) NULL, /* SIOCSIWPRIV */ (iw_handler) NULL, /* SIOCGIWPRIV */ (iw_handler) NULL, /* SIOCSIWSTATS */ (iw_handler) NULL, /* SIOCGIWSTATS */ (iw_handler) wl_iw_set_spy, /* SIOCSIWSPY */ (iw_handler) wl_iw_get_spy, /* SIOCGIWSPY */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) wl_iw_set_wap, /* SIOCSIWAP */ (iw_handler) wl_iw_get_wap, /* SIOCGIWAP */ #if WIRELESS_EXT > 17 (iw_handler) wl_iw_mlme, /* SIOCSIWMLME */ #else (iw_handler) NULL, /* -- hole -- */ #endif (iw_handler) wl_iw_iscan_get_aplist, /* SIOCGIWAPLIST */ #if WIRELESS_EXT > 13 (iw_handler) wl_iw_iscan_set_scan, /* SIOCSIWSCAN */ (iw_handler) wl_iw_iscan_get_scan, /* SIOCGIWSCAN */ #else /* WIRELESS_EXT > 13 */ (iw_handler) NULL, /* SIOCSIWSCAN */ (iw_handler) NULL, /* SIOCGIWSCAN */ #endif /* WIRELESS_EXT > 13 */ (iw_handler) wl_iw_set_essid, /* SIOCSIWESSID */ (iw_handler) wl_iw_get_essid, /* SIOCGIWESSID */ (iw_handler) wl_iw_set_nick, /* SIOCSIWNICKN */ (iw_handler) wl_iw_get_nick, /* SIOCGIWNICKN */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) wl_iw_set_rate, /* SIOCSIWRATE */ (iw_handler) wl_iw_get_rate, /* SIOCGIWRATE */ (iw_handler) wl_iw_set_rts, /* SIOCSIWRTS */ (iw_handler) wl_iw_get_rts, /* SIOCGIWRTS */ (iw_handler) wl_iw_set_frag, /* SIOCSIWFRAG */ (iw_handler) wl_iw_get_frag, /* SIOCGIWFRAG */ (iw_handler) wl_iw_set_txpow, /* SIOCSIWTXPOW */ (iw_handler) wl_iw_get_txpow, /* SIOCGIWTXPOW */ #if WIRELESS_EXT > 10 (iw_handler) wl_iw_set_retry, /* SIOCSIWRETRY */ (iw_handler) wl_iw_get_retry, /* SIOCGIWRETRY */ #endif /* WIRELESS_EXT > 10 */ (iw_handler) wl_iw_set_encode, /* SIOCSIWENCODE */ (iw_handler) wl_iw_get_encode, /* SIOCGIWENCODE */ (iw_handler) wl_iw_set_power, /* SIOCSIWPOWER */ (iw_handler) wl_iw_get_power, /* SIOCGIWPOWER */ #if WIRELESS_EXT > 17 (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) wl_iw_set_wpaie, /* SIOCSIWGENIE */ (iw_handler) wl_iw_get_wpaie, /* SIOCGIWGENIE */ (iw_handler) wl_iw_set_wpaauth, /* SIOCSIWAUTH */ (iw_handler) wl_iw_get_wpaauth, /* SIOCGIWAUTH */ (iw_handler) wl_iw_set_encodeext, /* SIOCSIWENCODEEXT */ (iw_handler) wl_iw_get_encodeext, /* SIOCGIWENCODEEXT */ (iw_handler) wl_iw_set_pmksa, /* SIOCSIWPMKSA */ #endif /* WIRELESS_EXT > 17 */ }; #if WIRELESS_EXT > 12 enum { WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV, WL_IW_SET_VLANMODE, WL_IW_SET_PM, #if WIRELESS_EXT > 17 #endif /* WIRELESS_EXT > 17 */ WL_IW_SET_LAST }; static iw_handler wl_iw_priv_handler[] = { wl_iw_set_leddc, wl_iw_set_vlanmode, wl_iw_set_pm, #if WIRELESS_EXT > 17 #endif /* WIRELESS_EXT > 17 */ NULL }; static struct iw_priv_args wl_iw_priv_args[] = { { WL_IW_SET_LEDDC, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_leddc" }, { WL_IW_SET_VLANMODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_vlanmode" }, { WL_IW_SET_PM, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_pm" }, #if WIRELESS_EXT > 17 #endif /* WIRELESS_EXT > 17 */ { 0, 0, 0, { 0 } } }; const struct iw_handler_def wl_iw_handler_def = { .num_standard = ARRAYSIZE(wl_iw_handler), .num_private = ARRAY_SIZE(wl_iw_priv_handler), .num_private_args = ARRAY_SIZE(wl_iw_priv_args), .standard = (iw_handler *) wl_iw_handler, .private = wl_iw_priv_handler, .private_args = wl_iw_priv_args, #if WIRELESS_EXT >= 19 get_wireless_stats: dhd_get_wireless_stats, #endif /* WIRELESS_EXT >= 19 */ }; #endif /* WIRELESS_EXT > 12 */ int wl_iw_ioctl( struct net_device *dev, struct ifreq *rq, int cmd ) { struct iwreq *wrq = (struct iwreq *) rq; struct iw_request_info info; iw_handler handler; char *extra = NULL; size_t token_size = 1; int max_tokens = 0, ret = 0; if (cmd < SIOCIWFIRST || IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) || !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)])) return -EOPNOTSUPP; switch (cmd) { case SIOCSIWESSID: case SIOCGIWESSID: case SIOCSIWNICKN: case SIOCGIWNICKN: max_tokens = IW_ESSID_MAX_SIZE + 1; break; case SIOCSIWENCODE: case SIOCGIWENCODE: #if WIRELESS_EXT > 17 case SIOCSIWENCODEEXT: case SIOCGIWENCODEEXT: #endif max_tokens = IW_ENCODING_TOKEN_MAX; break; case SIOCGIWRANGE: max_tokens = sizeof(struct iw_range); break; case SIOCGIWAPLIST: token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); max_tokens = IW_MAX_AP; break; #if WIRELESS_EXT > 13 case SIOCGIWSCAN: if (g_iscan) max_tokens = wrq->u.data.length; else max_tokens = IW_SCAN_MAX_DATA; break; #endif /* WIRELESS_EXT > 13 */ case SIOCSIWSPY: token_size = sizeof(struct sockaddr); max_tokens = IW_MAX_SPY; break; case SIOCGIWSPY: token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); max_tokens = IW_MAX_SPY; break; default: break; } if (max_tokens && wrq->u.data.pointer) { if (wrq->u.data.length > max_tokens) return -E2BIG; if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL))) return -ENOMEM; if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) { kfree(extra); return -EFAULT; } } info.cmd = cmd; info.flags = 0; ret = handler(dev, &info, &wrq->u, extra); if (extra) { if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) { kfree(extra); return -EFAULT; } kfree(extra); } return ret; } /* Convert a connection status event into a connection status string. * Returns TRUE if a matching connection status string was found. */ bool wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason, char* stringBuf, uint buflen) { typedef struct conn_fail_event_map_t { uint32 inEvent; /* input: event type to match */ uint32 inStatus; /* input: event status code to match */ uint32 inReason; /* input: event reason code to match */ const char* outName; /* output: failure type */ const char* outCause; /* output: failure cause */ } conn_fail_event_map_t; /* Map of WLC_E events to connection failure strings */ # define WL_IW_DONT_CARE 9999 const conn_fail_event_map_t event_map [] = { /* inEvent inStatus inReason */ /* outName outCause */ {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE, "Conn", "Success"}, {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE, "Conn", "NoNetworks"}, {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, "Conn", "ConfigMismatch"}, {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH, "Conn", "EncrypMismatch"}, {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH, "Conn", "RsnMismatch"}, {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, "Conn", "AuthTimeout"}, {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, "Conn", "AuthFail"}, {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE, "Conn", "AuthNoAck"}, {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, "Conn", "ReassocFail"}, {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, "Conn", "ReassocTimeout"}, {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE, "Conn", "ReassocAbort"}, {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE, "Sup", "ConnSuccess"}, {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE, "Sup", "WpaHandshakeFail"}, {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, "Conn", "Deauth"}, {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, "Conn", "DisassocInd"}, {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE, "Conn", "Disassoc"} }; const char* name = ""; const char* cause = NULL; int i; /* Search the event map table for a matching event */ for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) { const conn_fail_event_map_t* row = &event_map[i]; if (row->inEvent == event_type && (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) && (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) { name = row->outName; cause = row->outCause; break; } } /* If found, generate a connection failure string and return TRUE */ if (cause) { memset(stringBuf, 0, buflen); snprintf(stringBuf, buflen, "%s %s %02d %02d", name, cause, status, reason); WL_TRACE(("Connection status: %s\n", stringBuf)); return TRUE; } else { return FALSE; } } #if (WIRELESS_EXT > 14) /* Check if we have received an event that indicates connection failure * If so, generate a connection failure report string. * The caller supplies a buffer to hold the generated string. */ static bool wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen) { uint32 event = ntoh32(e->event_type); uint32 status = ntoh32(e->status); uint32 reason = ntoh32(e->reason); if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) { return TRUE; } else { return FALSE; } } #endif /* WIRELESS_EXT > 14 */ #ifndef IW_CUSTOM_MAX #define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */ #endif /* IW_CUSTOM_MAX */ void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) { #if WIRELESS_EXT > 13 union iwreq_data wrqu; char extra[IW_CUSTOM_MAX + 1]; int cmd = 0; uint32 event_type = ntoh32(e->event_type); uint16 flags = ntoh16(e->flags); uint32 datalen = ntoh32(e->datalen); uint32 status = ntoh32(e->status); memset(&wrqu, 0, sizeof(wrqu)); memset(extra, 0, sizeof(extra)); memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN); wrqu.addr.sa_family = ARPHRD_ETHER; switch (event_type) { case WLC_E_TXFAIL: cmd = IWEVTXDROP; break; #if WIRELESS_EXT > 14 case WLC_E_JOIN: case WLC_E_ASSOC_IND: case WLC_E_REASSOC_IND: cmd = IWEVREGISTERED; break; case WLC_E_DEAUTH_IND: case WLC_E_DISASSOC_IND: cmd = SIOCGIWAP; wrqu.data.length = strlen(extra); bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); bzero(&extra, ETHER_ADDR_LEN); break; case WLC_E_LINK: case WLC_E_NDIS_LINK: cmd = SIOCGIWAP; wrqu.data.length = strlen(extra); if (!(flags & WLC_EVENT_MSG_LINK)) { bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); bzero(&extra, ETHER_ADDR_LEN); } break; case WLC_E_ACTION_FRAME: cmd = IWEVCUSTOM; if (datalen + 1 <= sizeof(extra)) { wrqu.data.length = datalen + 1; extra[0] = WLC_E_ACTION_FRAME; memcpy(&extra[1], data, datalen); WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length)); } break; case WLC_E_ACTION_FRAME_COMPLETE: cmd = IWEVCUSTOM; if (sizeof(status) + 1 <= sizeof(extra)) { wrqu.data.length = sizeof(status) + 1; extra[0] = WLC_E_ACTION_FRAME_COMPLETE; memcpy(&extra[1], &status, sizeof(status)); WL_TRACE(("wl_iw_event status %d \n", status)); } break; #endif /* WIRELESS_EXT > 14 */ #if WIRELESS_EXT > 17 case WLC_E_MIC_ERROR: { struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra; cmd = IWEVMICHAELMICFAILURE; wrqu.data.length = sizeof(struct iw_michaelmicfailure); if (flags & WLC_EVENT_MSG_GROUP) micerrevt->flags |= IW_MICFAILURE_GROUP; else micerrevt->flags |= IW_MICFAILURE_PAIRWISE; memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN); micerrevt->src_addr.sa_family = ARPHRD_ETHER; break; } case WLC_E_ASSOC_REQ_IE: cmd = IWEVASSOCREQIE; wrqu.data.length = datalen; if (datalen < sizeof(extra)) memcpy(extra, data, datalen); break; case WLC_E_ASSOC_RESP_IE: cmd = IWEVASSOCRESPIE; wrqu.data.length = datalen; if (datalen < sizeof(extra)) memcpy(extra, data, datalen); break; case WLC_E_PMKID_CACHE: { struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra; pmkid_cand_list_t *pmkcandlist; pmkid_cand_t *pmkidcand; int count; if (data == NULL) break; cmd = IWEVPMKIDCAND; pmkcandlist = data; count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand); wrqu.data.length = sizeof(struct iw_pmkid_cand); pmkidcand = pmkcandlist->pmkid_cand; while (count) { bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand)); if (pmkidcand->preauth) iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH; bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data, ETHER_ADDR_LEN); wireless_send_event(dev, cmd, &wrqu, extra); pmkidcand++; count--; } break; } #endif /* WIRELESS_EXT > 17 */ case WLC_E_SCAN_COMPLETE: #if WIRELESS_EXT > 14 cmd = SIOCGIWSCAN; #endif WL_TRACE(("event WLC_E_SCAN_COMPLETE\n")); if ((g_iscan) && (g_iscan->sysioc_pid >= 0) && (g_iscan->iscan_state != ISCAN_STATE_IDLE)) up(&g_iscan->sysioc_sem); break; default: /* Cannot translate event */ break; } if (cmd) { if (cmd == SIOCGIWSCAN) wireless_send_event(dev, cmd, &wrqu, NULL); else wireless_send_event(dev, cmd, &wrqu, extra); } #if WIRELESS_EXT > 14 /* Look for WLC events that indicate a connection failure. * If found, generate an IWEVCUSTOM event. */ memset(extra, 0, sizeof(extra)); if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) { cmd = IWEVCUSTOM; wrqu.data.length = strlen(extra); wireless_send_event(dev, cmd, &wrqu, extra); } #endif /* WIRELESS_EXT > 14 */ #endif /* WIRELESS_EXT > 13 */ } int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats) { int res = 0; wl_cnt_t cnt; int phy_noise; int rssi; scb_val_t scb_val; phy_noise = 0; if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) goto done; phy_noise = dtoh32(phy_noise); WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise)); scb_val.val = 0; if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)))) goto done; rssi = dtoh32(scb_val.val); WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi)); if (rssi <= WL_IW_RSSI_NO_SIGNAL) wstats->qual.qual = 0; else if (rssi <= WL_IW_RSSI_VERY_LOW) wstats->qual.qual = 1; else if (rssi <= WL_IW_RSSI_LOW) wstats->qual.qual = 2; else if (rssi <= WL_IW_RSSI_GOOD) wstats->qual.qual = 3; else if (rssi <= WL_IW_RSSI_VERY_GOOD) wstats->qual.qual = 4; else wstats->qual.qual = 5; /* Wraps to 0 if RSSI is 0 */ wstats->qual.level = 0x100 + rssi; wstats->qual.noise = 0x100 + phy_noise; #if WIRELESS_EXT > 18 wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM); #else wstats->qual.updated |= 7; #endif /* WIRELESS_EXT > 18 */ #if WIRELESS_EXT > 11 WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", (int)sizeof(wl_cnt_t))); memset(&cnt, 0, sizeof(wl_cnt_t)); res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t)); if (res) { WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res)); goto done; } cnt.version = dtoh16(cnt.version); if (cnt.version != WL_CNT_T_VERSION) { WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n", WL_CNT_T_VERSION, cnt.version)); goto done; } wstats->discard.nwid = 0; wstats->discard.code = dtoh32(cnt.rxundec); wstats->discard.fragment = dtoh32(cnt.rxfragerr); wstats->discard.retries = dtoh32(cnt.txfail); wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant); wstats->miss.beacon = 0; WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n", dtoh32(cnt.txframe), dtoh32(cnt.txbyte))); WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong))); WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp))); WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec))); WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr))); WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail))); WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt))); WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant))); #endif /* WIRELESS_EXT > 11 */ done: return res; } static void wl_iw_timerfunc(ulong data) { iscan_info_t *iscan = (iscan_info_t *)data; iscan->timer_on = 0; if (iscan->iscan_state != ISCAN_STATE_IDLE) { WL_TRACE(("timer trigger\n")); up(&iscan->sysioc_sem); } } static void wl_iw_set_event_mask(struct net_device *dev) { char eventmask[WL_EVENTING_MASK_LEN]; char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf)); bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); setbit(eventmask, WLC_E_SCAN_COMPLETE); dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); } static int wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid) { int err = 0; memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN); params->bss_type = DOT11_BSSTYPE_ANY; params->scan_type = 0; params->nprobes = -1; params->active_time = -1; params->passive_time = -1; params->home_time = -1; params->channel_num = 0; params->nprobes = htod32(params->nprobes); params->active_time = htod32(params->active_time); params->passive_time = htod32(params->passive_time); params->home_time = htod32(params->home_time); if (ssid && ssid->SSID_len) memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t)); return err; } static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action) { int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)); wl_iscan_params_t *params; int err = 0; if (ssid && ssid->SSID_len) { params_size += sizeof(wlc_ssid_t); } params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL); if (params == NULL) { return -ENOMEM; } memset(params, 0, params_size); ASSERT(params_size < WLC_IOCTL_SMLEN); err = wl_iw_iscan_prep(&params->params, ssid); if (!err) { params->version = htod32(ISCAN_REQ_VERSION); params->action = htod16(action); params->scan_duration = htod16(0); /* params_size += OFFSETOF(wl_iscan_params_t, params); */ (void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size, iscan->ioctlbuf, WLC_IOCTL_SMLEN); } kfree(params); return err; } static uint32 wl_iw_iscan_get(iscan_info_t *iscan) { iscan_buf_t * buf; iscan_buf_t * ptr; wl_iscan_results_t * list_buf; wl_iscan_results_t list; wl_scan_results_t *results; uint32 status; /* buffers are allocated on demand */ if (iscan->list_cur) { buf = iscan->list_cur; iscan->list_cur = buf->next; } else { buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL); if (!buf) return WL_SCAN_RESULTS_ABORTED; buf->next = NULL; if (!iscan->list_hdr) iscan->list_hdr = buf; else { ptr = iscan->list_hdr; while (ptr->next) { ptr = ptr->next; } ptr->next = buf; } } memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN); list_buf = (wl_iscan_results_t*)buf->iscan_buf; results = &list_buf->results; results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; results->version = 0; results->count = 0; memset(&list, 0, sizeof(list)); list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN); (void) dev_iw_iovar_getbuf( iscan->dev, "iscanresults", &list, WL_ISCAN_RESULTS_FIXED_SIZE, buf->iscan_buf, WLC_IW_ISCAN_MAXLEN); results->buflen = dtoh32(results->buflen); results->version = dtoh32(results->version); results->count = dtoh32(results->count); WL_TRACE(("results->count = %d\n", results->count)); WL_TRACE(("results->buflen = %d\n", results->buflen)); status = dtoh32(list_buf->status); return status; } static void wl_iw_send_scan_complete(iscan_info_t *iscan) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); /* wext expects to get no data for SIOCGIWSCAN Event */ wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL); } static int _iscan_sysioc_thread(void *data) { uint32 status; iscan_info_t *iscan = (iscan_info_t *)data; DAEMONIZE("iscan_sysioc"); status = WL_SCAN_RESULTS_PARTIAL; while (down_interruptible(&iscan->sysioc_sem) == 0) { if (iscan->timer_on) { del_timer(&iscan->timer); iscan->timer_on = 0; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) rtnl_lock(); #endif status = wl_iw_iscan_get(iscan); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) rtnl_unlock(); #endif switch (status) { case WL_SCAN_RESULTS_PARTIAL: WL_TRACE(("iscanresults incomplete\n")); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) rtnl_lock(); #endif /* make sure our buffer size is enough before going next round */ wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) rtnl_unlock(); #endif /* Reschedule the timer */ iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); add_timer(&iscan->timer); iscan->timer_on = 1; break; case WL_SCAN_RESULTS_SUCCESS: WL_TRACE(("iscanresults complete\n")); iscan->iscan_state = ISCAN_STATE_IDLE; wl_iw_send_scan_complete(iscan); break; case WL_SCAN_RESULTS_PENDING: WL_TRACE(("iscanresults pending\n")); /* Reschedule the timer */ iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); add_timer(&iscan->timer); iscan->timer_on = 1; break; case WL_SCAN_RESULTS_ABORTED: WL_TRACE(("iscanresults aborted\n")); iscan->iscan_state = ISCAN_STATE_IDLE; wl_iw_send_scan_complete(iscan); break; default: WL_TRACE(("iscanresults returned unknown status %d\n", status)); break; } } complete_and_exit(&iscan->sysioc_exited, 0); } int wl_iw_attach(struct net_device *dev, void * dhdp) { iscan_info_t *iscan = NULL; if (!dev) return 0; iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL); if (!iscan) return -ENOMEM; memset(iscan, 0, sizeof(iscan_info_t)); iscan->sysioc_pid = -1; /* we only care about main interface so save a global here */ g_iscan = iscan; iscan->dev = dev; iscan->iscan_state = ISCAN_STATE_IDLE; /* Set up the timer */ iscan->timer_ms = 2000; init_timer(&iscan->timer); iscan->timer.data = (ulong)iscan; iscan->timer.function = wl_iw_timerfunc; sema_init(&iscan->sysioc_sem, 0); init_completion(&iscan->sysioc_exited); iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0); if (iscan->sysioc_pid < 0) return -ENOMEM; return 0; } void wl_iw_detach(void) { iscan_buf_t *buf; iscan_info_t *iscan = g_iscan; if (!iscan) return; if (iscan->sysioc_pid >= 0) { KILL_PROC(iscan->sysioc_pid, SIGTERM); wait_for_completion(&iscan->sysioc_exited); } while (iscan->list_hdr) { buf = iscan->list_hdr->next; kfree(iscan->list_hdr); iscan->list_hdr = buf; } kfree(iscan); g_iscan = NULL; } #endif /* USE_IW */
gpl-2.0
ZdrowyGosciu/kernel_lge_d802_v30d
drivers/staging/prima/CORE/BAP/src/bapModule.c
625
46287
/* * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /*=========================================================================== b a p M o d u l e . C OVERVIEW: This software unit holds the implementation of the WLAN BAP modules Module support functions. It is also where the global BAP module context, and per-instance (returned in BAP_Open device open) contexts. The functions externalized by this module are to be called by the device specific BAP Shim Layer (BSL) (in HDD) which implements a stream device on a particular platform. DEPENDENCIES: Are listed for each API below. ===========================================================================*/ /*=========================================================================== EDIT HISTORY FOR FILE This section contains comments describing changes made to the module. Notice that changes are listed in reverse chronological order. $Header: /home/labuser/ampBlueZ_2/CORE/BAP/src/bapModule.c,v 1.1 2010/07/12 19:05:35 labuser Exp labuser $$DateTime$$Author: labuser $ when who what, where, why ---------- --- -------------------------------------------------------- 2008-09-15 jez Created module ===========================================================================*/ /*---------------------------------------------------------------------------- * Include Files * -------------------------------------------------------------------------*/ // Pull in some message types used by BTC #include "sirParams.h" //#include "halFwApi.h" #include "wlan_qct_tl.h" #include "vos_trace.h" // Pick up the sme callback registration API #include "sme_Api.h" #include "ccmApi.h" /* BT-AMP PAL API header file */ #include "bapApi.h" #include "bapInternal.h" // Pick up the BTAMP RSN definitions #include "bapRsnTxRx.h" //#include "assert.h" #include "bapApiTimer.h" #if defined(ANI_OS_TYPE_ANDROID) #include "bap_hdd_main.h" #endif //#define BAP_DEBUG /*---------------------------------------------------------------------------- * Preprocessor Definitions and Constants * -------------------------------------------------------------------------*/ //#define VOS_GET_BAP_CB(ctx) vos_get_context( VOS_MODULE_ID_BAP, ctx) /*---------------------------------------------------------------------------- * Type Declarations * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Global Data Definitions * -------------------------------------------------------------------------*/ // include the phy link state machine structure here static tWLAN_BAPbapPhysLinkMachine bapPhysLinkMachineInitial = BTAMPFSM_INSTANCEDATA_INIT; /*---------------------------------------------------------------------------- * External declarations for global context * -------------------------------------------------------------------------*/ // No! Get this from VOS. // The main per-Physical Link (per WLAN association) context. //tBtampContext btampCtx; ptBtampContext gpBtampCtx; // Include the Local AMP Info structure. tBtampHCI_AMP_Info btampHCI_AMP_Info; // Include the Local Data Block Size info structure. tBtampHCI_Data_Block_Size btampHCI_Data_Block_Size; // Include the Local Version info structure. tBtampHCI_Version_Info btampHCI_Version_Info; // Include the Local Supported Cmds info structure. tBtampHCI_Supported_Cmds btampHCI_Supported_Cmds; static unsigned char pBtStaOwnMacAddr[WNI_CFG_BSSID_LEN]; /*BT-AMP SSID; per spec should have this format: "AMP-00-0a-f5-04-05-08" */ #define WLAN_BAP_SSID_MAX_LEN 21 static char pBtStaOwnSsid[WLAN_BAP_SSID_MAX_LEN]; /*---------------------------------------------------------------------------- * Static Variable Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Function Declarations and Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Externalized Function Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Function Declarations and Documentation * -------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANBAP_Open DESCRIPTION Called at driver initialization (vos_open). BAP will initialize all its internal resources and will wait for the call to start to register with the other modules. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_Open ( v_PVOID_t pvosGCtx ) { ptBtampContext pBtampCtx = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Allocate (and sanity check?!) BAP control block ------------------------------------------------------------------------*/ vos_alloc_context(pvosGCtx, VOS_MODULE_ID_BAP, (v_VOID_t**)&pBtampCtx, sizeof(tBtampContext)); pBtampCtx = VOS_GET_BAP_CB(pvosGCtx); if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer from pvosGCtx on WLANBAP_Open"); //"Failed to allocate BAP pointer from pvosGCtx on WLANBAP_Open"); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Clean up BAP control block, initialize all values ------------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANBAP_Open"); WLANBAP_CleanCB(pBtampCtx, 0 /*do not empty*/); // Setup the "link back" to the VOSS context pBtampCtx->pvosGCtx = pvosGCtx; // Store a pointer to the BAP context provided by VOSS gpBtampCtx = pBtampCtx; /*------------------------------------------------------------------------ Allocate internal resources ------------------------------------------------------------------------*/ return VOS_STATUS_SUCCESS; }/* WLANBAP_Open */ /*========================================================================== FUNCTION WLANBAP_Start DESCRIPTION Called as part of the overall start procedure (vos_start). BAP will use this call to register with TL as the BAP entity for BT-AMP RSN frames. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) Other codes can be returned as a result of a BAL failure; SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_Start ( v_PVOID_t pvosGCtx ) { ptBtampContext pBtampCtx = NULL; VOS_STATUS vosStatus; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract BAP control block ------------------------------------------------------------------------*/ pBtampCtx = VOS_GET_BAP_CB(pvosGCtx); if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer from pvosGCtx on WLANBAP_Start"); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Register with TL as an BT-AMP RSN client ------------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANBAP_Start TL register"); /*------------------------------------------------------------------------ Register with CSR for Roam (connection status) Events ------------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANBAP_Start CSR Register"); /* Initialize the BAP Tx packet monitor timer */ WLANBAP_InitConnectionAcceptTimer (pBtampCtx ); WLANBAP_InitLinkSupervisionTimer(pBtampCtx); vosStatus = vos_timer_init( &pBtampCtx->bapTxPktMonitorTimer, VOS_TIMER_TYPE_SW, /* use this type */ WLANBAP_TxPacketMonitorHandler, pBtampCtx); vosStatus = vos_lock_init(&pBtampCtx->bapLock); if(!VOS_IS_STATUS_SUCCESS(vosStatus)) { VOS_TRACE(VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR,"Lock Init Fail"); } return vosStatus; }/* WLANBAP_Start */ /*========================================================================== FUNCTION WLANBAP_Stop DESCRIPTION Called by vos_stop to stop operation in BAP, before close. BAP will suspend all BT-AMP Protocol Adaption Layer operation and will wait for the close request to clean up its resources. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_Stop ( v_PVOID_t pvosGCtx ) { ptBtampContext pBtampCtx = NULL; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract BAP control block ------------------------------------------------------------------------*/ pBtampCtx = VOS_GET_BAP_CB(pvosGCtx); if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer from pvosGCtx on WLANBAP_Stop"); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Stop BAP (de-register RSN handler!?) ------------------------------------------------------------------------*/ vosStatus = WLANBAP_DeinitConnectionAcceptTimer(pBtampCtx); if ( VOS_STATUS_SUCCESS != vosStatus) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Couldn't destroy bapConnectionAcceptTimer"); } vosStatus = WLANBAP_DeinitLinkSupervisionTimer(pBtampCtx); if ( VOS_STATUS_SUCCESS != vosStatus) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Couldn't destroy bapLinkSupervisionTimer"); } vosStatus = vos_timer_destroy ( &pBtampCtx->bapTxPktMonitorTimer ); if ( VOS_STATUS_SUCCESS != vosStatus) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Couldn't destroy bapTxPktMonitorTimer"); } vos_lock_destroy(&pBtampCtx->bapLock); return VOS_STATUS_SUCCESS; }/* WLANBAP_Stop */ /*========================================================================== FUNCTION WLANBAP_Close DESCRIPTION Called by vos_close during general driver close procedure. BAP will clean up all the internal resources. DEPENDENCIES PARAMETERS IN pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_Close ( v_PVOID_t pvosGCtx ) { ptBtampContext pBtampCtx = NULL; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ Sanity check Extract BAP control block ------------------------------------------------------------------------*/ pBtampCtx = VOS_GET_BAP_CB(pvosGCtx); if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer from pvosGCtx on WLANBAP_Close"); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Cleanup BAP control block. ------------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANBAP_Close"); WLANBAP_CleanCB(pBtampCtx, 1 /* empty queues/lists/pkts if any*/); #if defined(ANI_OS_TYPE_ANDROID) && defined(WLAN_BTAMP_FEATURE) BSL_Deinit(pvosGCtx); #endif /*------------------------------------------------------------------------ Free BAP context from VOSS global ------------------------------------------------------------------------*/ vos_free_context(pvosGCtx, VOS_MODULE_ID_BAP, pBtampCtx); return VOS_STATUS_SUCCESS; }/* WLANBAP_Close */ /*---------------------------------------------------------------------------- HDD interfaces - Per instance initialization ---------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANBAP_GetNewHndl DESCRIPTION Called by HDD at driver open (BSL_Open). BAP will initialize allocate a per-instance "file handle" equivalent for this specific open call. There should only ever be one call to BSL_Open. Since the open app user is the BT stack. DEPENDENCIES PARAMETERS IN hBtampHandle: Handle to return btampHandle value in. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_GetNewHndl ( ptBtampHandle *hBtampHandle /* Handle to return btampHandle value in */ ) { ptBtampContext btampContext = NULL; /*------------------------------------------------------------------------ Sanity check params ------------------------------------------------------------------------*/ if ( NULL == hBtampHandle) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle pointer in WLANBAP_GetNewHndl"); return VOS_STATUS_E_FAULT; } #ifndef BTAMP_MULTIPLE_PHY_LINKS /*------------------------------------------------------------------------ Sanity check the BAP control block pointer ------------------------------------------------------------------------*/ if ( NULL == gpBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer in WLANBAP_GetNewHndl"); return VOS_STATUS_E_FAULT; } //*hBtampHandle = (ptBtampHandle) &btampCtx; /* return a pointer to the tBtampContext structure - allocated by VOS for us */ *hBtampHandle = (ptBtampHandle) gpBtampCtx; btampContext = gpBtampCtx; /* Update the MAC address and SSID if in case the Read Local AMP Assoc * Request is made before Create Physical Link creation. */ WLANBAP_ReadMacConfig (btampContext); return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_GetNewHndl */ /*========================================================================== FUNCTION WLANBAP_ReleaseHndl DESCRIPTION Called by HDD at driver open (BSL_Close). BAP will reclaim (invalidate) the "file handle" passed into this call. DEPENDENCIES PARAMETERS IN btampHandle: btampHandle value to invalidate. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: btampHandle is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_ReleaseHndl ( ptBtampHandle btampHandle /* btamp handle value to release */ ) { /* obtain btamp Context */ ptBtampContext btampContext = (ptBtampContext) btampHandle; tHalHandle halHandle; eHalStatus halStatus = eHAL_STATUS_SUCCESS; /*------------------------------------------------------------------------ Sanity check params ------------------------------------------------------------------------*/ if ( NULL == btampHandle) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle value in WLANBAP_ReleaseHndl"); return VOS_STATUS_E_FAULT; } /* JEZ081001: TODO: Major: */ /* Check to see if any wireless associations are still active */ /* ...if so, I have to call * sme_RoamDisconnect(VOS_GET_HAL_CB(btampHandle->pvosGCtx), * btampHandle->sessionId, * eCSR_DISCONNECT_REASON_UNSPECIFIED); * on all of them */ halHandle = VOS_GET_HAL_CB(btampContext->pvosGCtx); if(NULL == halHandle) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "halHandle is NULL in %s", __func__); return VOS_STATUS_E_FAULT; } if( btampContext->isBapSessionOpen == TRUE ) { halStatus = sme_CloseSession(halHandle, btampContext->sessionId, NULL, NULL); if(eHAL_STATUS_SUCCESS == halStatus) { btampContext->isBapSessionOpen = FALSE; } } /* release the btampHandle */ return VOS_STATUS_SUCCESS; }/* WLANBAP_ReleaseHndl */ /*---------------------------------------------------------------------------- * Utility Function implementations * -------------------------------------------------------------------------*/ /*========================================================================== FUNCTION WLANBAP_CleanCB DESCRIPTION Clear out all fields in the BAP context. DEPENDENCIES PARAMETERS IN pBtampCtx: pointer to the BAP control block freeFlag: flag indicating whether to free any allocations. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to BAP cb is NULL ; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_CleanCB ( ptBtampContext pBtampCtx, v_U32_t freeFlag // 0 /*do not empty*/); ) { v_U16_t i; /* Logical Link index */ tpBtampLogLinkCtx pLogLinkContext = NULL; /*------------------------------------------------------------------------ Sanity check BAP control block ------------------------------------------------------------------------*/ if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer in WLANBAP_CleanCB"); return VOS_STATUS_E_FAULT; } /*------------------------------------------------------------------------ Clean up BAP control block, initialize all values ------------------------------------------------------------------------*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLANBAP_CleanCB"); // First, clear out EVERYTHING in the BT-AMP context vos_mem_set( pBtampCtx, sizeof( *pBtampCtx), 0); pBtampCtx->pvosGCtx = NULL; // Initialize physical link state machine to DISCONNECTED state //pBtampCtx->bapPhysLinkMachine = BTAMPFSM_INSTANCEDATA_INIT; // Initialize physical link state machine to DISCONNECTED state vos_mem_copy( &pBtampCtx->bapPhysLinkMachine, &bapPhysLinkMachineInitial, /* BTAMPFSM_INSTANCEDATA_INIT; */ sizeof( pBtampCtx->bapPhysLinkMachine)); VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: Initializing State: %d", __func__, bapPhysLinkMachineInitial.stateVar); VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: Initialized State: %d", __func__, pBtampCtx->bapPhysLinkMachine.stateVar); //VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampContext value: %x", __func__, pBtampCtx); #ifdef BAP_DEBUG /* Trace the tBtampCtx being passed in. */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN BAP Context Monitor: pBtampCtx value = %x in %s:%d", pBtampCtx, __func__, __LINE__ ); #endif //BAP_DEBUG pBtampCtx->sessionId = 0; pBtampCtx->pAppHdl = NULL; // Per-app BSL context pBtampCtx->pHddHdl = NULL; // Per-app BSL context /* 8 bits of phy_link_handle identifies this association */ pBtampCtx->phy_link_handle = 0; pBtampCtx->channel = 0; pBtampCtx->BAPDeviceRole = BT_RESPONDER; pBtampCtx->ucSTAId = 0; // gNeedPhysLinkCompEvent pBtampCtx->gNeedPhysLinkCompEvent = VOS_FALSE; // gPhysLinkStatus pBtampCtx->gPhysLinkStatus = WLANBAP_STATUS_SUCCESS; // gDiscRequested pBtampCtx->gDiscRequested = VOS_FALSE; // gDiscReason pBtampCtx->gDiscReason = WLANBAP_STATUS_SUCCESS; /* Connection Accept Timer interval*/ pBtampCtx->bapConnectionAcceptTimerInterval = WLANBAP_CONNECTION_ACCEPT_TIMEOUT; /* Link Supervision Timer interval*/ pBtampCtx->bapLinkSupervisionTimerInterval = WLANBAP_LINK_SUPERVISION_TIMEOUT; /* Logical Link Accept Timer interval*/ pBtampCtx->bapLogicalLinkAcceptTimerInterval = WLANBAP_LOGICAL_LINK_ACCEPT_TIMEOUT; /* Best Effort Flush timer interval*/ pBtampCtx->bapBEFlushTimerInterval = WLANBAP_BE_FLUSH_TIMEOUT; // Include the associations MAC addresses vos_mem_copy( pBtampCtx->self_mac_addr, pBtStaOwnMacAddr, /* Where do I get the current MAC address? */ sizeof(pBtampCtx->self_mac_addr)); vos_mem_set( pBtampCtx->peer_mac_addr, sizeof(pBtampCtx->peer_mac_addr), 0); // The array of logical links pBtampCtx->current_log_link_index = 0; /* assigned mod 16 */ pBtampCtx->total_log_link_index = 0; /* should never be >16 */ // Clear up the array of logical links for (i = 0; i < WLANBAP_MAX_LOG_LINKS ; i++) { pLogLinkContext = &pBtampCtx->btampLogLinkCtx[i]; pLogLinkContext->present = 0; pLogLinkContext->uTxPktCompleted = 0; pLogLinkContext->log_link_handle = 0; } // Include the HDD BAP Shim Layer callbacks for Fetch, TxComp, and RxPkt pBtampCtx->pfnBtampFetchPktCB = NULL; pBtampCtx->pfnBtamp_STARxCB = NULL; pBtampCtx->pfnBtampTxCompCB = NULL; /* Implements the callback for ALL asynchronous events. */ pBtampCtx->pBapHCIEventCB = NULL; /* Set the default for event mask */ vos_mem_set( pBtampCtx->event_mask_page_2, sizeof(pBtampCtx->event_mask_page_2), 0); /* Set the default for location data. */ pBtampCtx->btamp_Location_Data_Info.loc_options = 0x58; /* Set the default data transfer mode */ pBtampCtx->ucDataTrafficMode = WLANBAP_FLOW_CONTROL_MODE_BLOCK_BASED; return VOS_STATUS_SUCCESS; }/* WLANBAP_CleanCB */ /*========================================================================== FUNCTION WLANBAP_GetCtxFromStaId DESCRIPTION Called inside the BT-AMP PAL (BAP) layer whenever we need either the BSL context or the BTAMP context from the StaId. DEPENDENCIES PARAMETERS IN ucSTAId: The StaId (used by TL, PE, and HAL) OUT hBtampHandle: Handle (pointer to a pointer) to return the btampHandle value in. hHddHdl: Handle to return the BSL context pointer in. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: NULL pointer; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_GetCtxFromStaId ( v_U8_t ucSTAId, /* The StaId (used by TL, PE, and HAL) */ ptBtampHandle *hBtampHandle, /* Handle to return per app btampHandle value in */ ptBtampContext *hBtampContext, /* Handle to return per assoc btampContext value in */ v_PVOID_t *hHddHdl /* Handle to return BSL context in */ ) { #ifndef BTAMP_MULTIPLE_PHY_LINKS /* For now, we know there is only one application context */ /* ...and only one physical link context */ //*hBtampHandle = &((ptBtampContext) btampCtx); //*hBtampHandle = &btampCtx; *hBtampHandle = (v_VOID_t*)gpBtampCtx; //*hBtampContext = &btampCtx; *hBtampContext = gpBtampCtx; /* Handle to return BSL context in */ //*hHddHdl = btampCtx.pHddHdl; *hHddHdl = gpBtampCtx->pHddHdl; return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_GetCtxFromStaId */ /*========================================================================== FUNCTION WLANBAP_GetStaIdFromLinkCtx DESCRIPTION Called inside the BT-AMP PAL (BAP) layer whenever we need the StaId (or hHddHdl) from the BTAMP context and phy_link_handle. DEPENDENCIES PARAMETERS IN hBtampHandle: Handle (pointer to a pointer) to return the btampHandle value in. phy_link_handle: physical link handle value. Unique per assoc. OUT pucSTAId: The StaId (used by TL, PE, and HAL) hHddHdl: Handle to return the BSL context pointer in. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: NULL pointer; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_GetStaIdFromLinkCtx ( ptBtampHandle btampHandle, /* btampHandle value in */ v_U8_t phy_link_handle, /* phy_link_handle value in */ v_U8_t *pucSTAId, /* The StaId (used by TL, PE, and HAL) */ v_PVOID_t *hHddHdl /* Handle to return BSL context */ ) { #ifndef BTAMP_MULTIPLE_PHY_LINKS ptBtampContext pBtampCtx = (ptBtampContext) btampHandle; /*------------------------------------------------------------------------ Sanity check params ------------------------------------------------------------------------*/ if ( NULL == pBtampCtx) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle value in %s", __func__); return VOS_STATUS_E_FAULT; } /* Since there is only one physical link...we have stored all * the physical link specific context in the application context */ /* The StaId (used by TL, PE, and HAL) */ *pucSTAId = pBtampCtx->ucSTAId; /* Handle to return BSL context */ *hHddHdl = pBtampCtx->pHddHdl; return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_GetStaIdFromLinkCtx */ /*========================================================================== FUNCTION WLANBAP_CreateNewPhyLinkCtx DESCRIPTION Called in order to create (or update) a BAP Physical Link "context" DEPENDENCIES PARAMETERS IN btampHandle: BAP app context handle phy_link_handle: phy_link_handle from the Command pHddHdl: BSL passes in its specific context OUT hBtampContext: Handle (pointer to a pointer) to return the per "Phy Link" ptBtampContext value in. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: NULL pointer; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_CreateNewPhyLinkCtx ( ptBtampHandle btampHandle, v_U8_t phy_link_handle, /* I get phy_link_handle from the Command */ v_PVOID_t pHddHdl, /* BSL passes in its specific context */ ptBtampContext *hBtampContext, /* Handle to return per assoc btampContext value in */ tWLAN_BAPRole BAPDeviceRole ) { #ifndef BTAMP_MULTIPLE_PHY_LINKS ptBtampContext pBtampCtx = gpBtampCtx; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /* Read and Set MAC address and SSID to BT-AMP context */ WLANBAP_ReadMacConfig (pBtampCtx); /*------------------------------------------------------------------------ For now, presume security is not enabled. ------------------------------------------------------------------------*/ pBtampCtx->ucSecEnabled = WLANBAP_SECURITY_ENABLED_STATE; /*------------------------------------------------------------------------ Initial Short Range Mode for this physical link is 'disabled' ------------------------------------------------------------------------*/ pBtampCtx->phy_link_srm = 0; /*------------------------------------------------------------------------ Clear out the logical links. ------------------------------------------------------------------------*/ pBtampCtx->current_log_link_index = 0; pBtampCtx->total_log_link_index = 0; /*------------------------------------------------------------------------ Now configure the roaming profile links. To SSID and bssid. ------------------------------------------------------------------------*/ // We have room for two SSIDs. pBtampCtx->csrRoamProfile.SSIDs.numOfSSIDs = 1; // This is true for now. pBtampCtx->csrRoamProfile.SSIDs.SSIDList = pBtampCtx->SSIDList; //Array of two pBtampCtx->csrRoamProfile.SSIDs.SSIDList[0].SSID.length = 0; pBtampCtx->csrRoamProfile.SSIDs.SSIDList[0].handoffPermitted = VOS_FALSE; pBtampCtx->csrRoamProfile.SSIDs.SSIDList[0].ssidHidden = VOS_FALSE; pBtampCtx->csrRoamProfile.BSSIDs.numOfBSSIDs = 1; // This is true for now. pBtampCtx->csrRoamProfile.BSSIDs.bssid = &pBtampCtx->bssid; // Now configure the auth type in the roaming profile. To open. //pBtampCtx->csrRoamProfile.AuthType = eCSR_AUTH_TYPE_OPEN_SYSTEM; // open is the default //pBtampCtx->csrRoamProfile.negotiatedAuthType = eCSR_AUTH_TYPE_OPEN_SYSTEM; // open is the default pBtampCtx->csrRoamProfile.negotiatedAuthType = eCSR_AUTH_TYPE_RSN_PSK; pBtampCtx->csrRoamProfile.negotiatedUCEncryptionType = eCSR_ENCRYPT_TYPE_AES; pBtampCtx->phy_link_handle = phy_link_handle; /* For now, we know there is only one physical link context */ //*hBtampContext = &btampCtx; pBtampCtx->pHddHdl = pHddHdl; *hBtampContext = pBtampCtx; VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Btamp Ctxt = %p", pBtampCtx); return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_CreateNewPhyLinkCtx */ /*========================================================================== FUNCTION WLANBAP_UpdatePhyLinkCtxStaId DESCRIPTION Called to update the STAId value associated with Physical Link "context" DEPENDENCIES PARAMETERS IN pBtampContext: ptBtampContext to update. ucSTAId: The StaId (used by TL, PE, and HAL) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: NULL pointer; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_UpdatePhyLinkCtxStaId ( ptBtampContext pBtampContext, /* btampContext value in */ v_U8_t ucSTAId ) { #ifndef BTAMP_MULTIPLE_PHY_LINKS /*------------------------------------------------------------------------ Sanity check params ------------------------------------------------------------------------*/ if ( NULL == pBtampContext) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle value in %s", __func__); return VOS_STATUS_E_FAULT; } /* The StaId (used by TL, PE, and HAL) */ pBtampContext->ucSTAId = ucSTAId; return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_UpdatePhyLinkCtxStaId */ v_U8_t bapAllocNextLogLinkIndex ( ptBtampContext pBtampContext, /* Pointer to the per assoc btampContext value */ v_U8_t phy_link_handle /* I get phy_link_handle from the Command */ ) { return ++(pBtampContext->current_log_link_index) % WLANBAP_MAX_LOG_LINKS; }/* bapAllocNextLogLinkIndex */ /*========================================================================== FUNCTION WLANBAP_CreateNewLogLinkCtx DESCRIPTION Called in order to allocate a BAP Logical Link "context" and "index" DEPENDENCIES PARAMETERS IN pBtampContext: Pointer to the ptBtampContext value in. phy_link_handle: phy_link_handle involved OUT pLog_link_handle: return the log_link_handle here RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: NULL pointer; access would cause a page fault VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_CreateNewLogLinkCtx ( ptBtampContext pBtampContext, /* Pointer to the per assoc btampContext value */ v_U8_t phy_link_handle, /* I get phy_link_handle from the Command */ v_U8_t tx_flow_spec[18], v_U8_t rx_flow_spec[18], v_U16_t *pLog_link_handle /* Return the logical link index here */ ) { #ifndef BTAMP_MULTIPLE_PHY_LINKS v_U16_t i; /* Logical Link index */ tpBtampLogLinkCtx pLogLinkContext; v_U32_t retval; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /*------------------------------------------------------------------------ For now, allocate the logical links serially. ------------------------------------------------------------------------*/ i = pBtampContext->current_log_link_index = bapAllocNextLogLinkIndex(pBtampContext, phy_link_handle); pBtampContext->total_log_link_index++; *pLog_link_handle = (i << 8) + ( v_U16_t ) phy_link_handle ; /* Return the logical link index here */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, " %s:*pLog_link_handle=%x", __func__,*pLog_link_handle); /*------------------------------------------------------------------------ Evaluate the Tx and Rx Flow specification for this logical link. ------------------------------------------------------------------------*/ // Currently we only support flow specs with service types of BE (0x01) #ifdef BAP_DEBUG /* Trace the tBtampCtx being passed in. */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN BAP Context Monitor: pBtampContext value = %p in %s:%d", pBtampContext, __func__, __LINE__ ); #endif //BAP_DEBUG /*------------------------------------------------------------------------ Now configure the Logical Link context. ------------------------------------------------------------------------*/ pLogLinkContext = &(pBtampContext->btampLogLinkCtx[i]); /* Extract Tx flow spec into the context structure */ retval = btampUnpackTlvFlow_Spec((void *)pBtampContext, tx_flow_spec, WLAN_BAP_PAL_FLOW_SPEC_TLV_LEN, &pLogLinkContext->btampFlowSpec); if (retval != BTAMP_PARSE_SUCCESS) { /* Flow spec parsing failed, return failure */ return VOS_STATUS_E_BADMSG; } /* Save the Logical link handle in the logical link context As of now, only the index is saved as logical link handle since same is returned in the event. FIXME: Decide whether this index has to be combined with physical link handle to generate the Logical link handle. */ pLogLinkContext->log_link_handle = *pLog_link_handle; // Mark this entry as OCCUPIED pLogLinkContext->present = VOS_TRUE; // Now initialize the Logical Link context pLogLinkContext->btampAC = 1; // Now initialize the values in the Logical Link context pLogLinkContext->ucTID = 0; // Currently we only support BE TID (0x00) pLogLinkContext->ucUP = 0; pLogLinkContext->uTxPktCompleted = 0; return VOS_STATUS_SUCCESS; #else // defined(BTAMP_MULTIPLE_PHY_LINKS) #endif //BTAMP_MULTIPLE_PHY_LINKS }/* WLANBAP_CreateNewLogLinkCtx */ /*========================================================================== FUNCTION WLANBAP_pmcFullPwrReqCB DESCRIPTION Callback provide to PMC in the pmcRequestFullPower API. DEPENDENCIES PARAMETERS IN callbackContext: The user passed in a context to identify status: The halStatus RETURN VALUE None SIDE EFFECTS ============================================================================*/ void WLANBAP_pmcFullPwrReqCB ( void *callbackContext, eHalStatus status ) { }/* WLANBAP_pmcFullPwrReqCB */ /*========================================================================== FUNCTION WLANBAP_ReadMacConfig DESCRIPTION This function sets the MAC config (Address and SSID to BT-AMP context DEPENDENCIES PARAMETERS pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context RETURN VALUE None SIDE EFFECTS ============================================================================*/ void WLANBAP_ReadMacConfig ( ptBtampContext pBtampCtx ) { tANI_U32 len = WNI_CFG_BSSID_LEN; tHalHandle pMac = NULL; /*------------------------------------------------------------------------ Temporary method to get the self MAC address ------------------------------------------------------------------------*/ if (NULL == pBtampCtx) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "pBtampCtx is NULL in %s", __func__); return; } pMac = (tHalHandle)vos_get_context( VOS_MODULE_ID_SME, pBtampCtx->pvosGCtx); if (NULL == pMac) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "pMac is NULL in %s", __func__); return; } ccmCfgGetStr( pMac, WNI_CFG_STA_ID, pBtStaOwnMacAddr, &len ); VOS_ASSERT( WNI_CFG_BSSID_LEN == len ); /* Form the SSID from Mac address */ VOS_SNPRINTF( pBtStaOwnSsid, WLAN_BAP_SSID_MAX_LEN, "AMP-%02x-%02x-%02x-%02x-%02x-%02x", pBtStaOwnMacAddr[0], pBtStaOwnMacAddr[1], pBtStaOwnMacAddr[2], pBtStaOwnMacAddr[3], pBtStaOwnMacAddr[4], pBtStaOwnMacAddr[5]); /*------------------------------------------------------------------------ Set the MAC address for this instance ------------------------------------------------------------------------*/ vos_mem_copy( pBtampCtx->self_mac_addr, pBtStaOwnMacAddr, sizeof(pBtampCtx->self_mac_addr)); /*------------------------------------------------------------------------ Set our SSID value ------------------------------------------------------------------------*/ pBtampCtx->ownSsidLen = 21; vos_mem_copy( pBtampCtx->ownSsid, pBtStaOwnSsid, pBtampCtx->ownSsidLen); } /*========================================================================== FUNCTION WLANBAP_NeedBTCoexPriority DESCRIPTION This function will cause a message to be sent to BTC firmware if a change in priority has occurred. (From AMP's point-of-view.) DEPENDENCIES PARAMETERS pvosGCtx: pointer to the global vos context; a handle to HAL's control block can be extracted from its context RETURN VALUE None SIDE EFFECTS ============================================================================*/ // Global static int gBapCoexPriority; void WLANBAP_NeedBTCoexPriority ( ptBtampContext pBtampCtx, v_U32_t needCoexPriority ) { tHalHandle pMac = NULL; tSmeBtAmpEvent btAmpEvent; /*------------------------------------------------------------------------ Retrieve the pMac (HAL context) ------------------------------------------------------------------------*/ pMac = (tHalHandle)vos_get_context( VOS_MODULE_ID_SME, pBtampCtx->pvosGCtx); // Is re-entrancy protection needed for this? if (needCoexPriority != gBapCoexPriority) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "Calling %s with needCoexPriority=%d.", __func__, needCoexPriority); gBapCoexPriority = needCoexPriority; switch ( needCoexPriority) { case 0: /* Idle */ btAmpEvent.btAmpEventType = BTAMP_EVENT_CONNECTION_TERMINATED; pBtampCtx->btamp_session_on = FALSE; sme_sendBTAmpEvent(pMac, btAmpEvent); break; case 1: /* Associating */ btAmpEvent.btAmpEventType = BTAMP_EVENT_CONNECTION_START; pBtampCtx->btamp_session_on = TRUE; sme_sendBTAmpEvent(pMac, btAmpEvent); break; case 2: /* Post-assoc */ btAmpEvent.btAmpEventType = BTAMP_EVENT_CONNECTION_STOP; sme_sendBTAmpEvent(pMac, btAmpEvent); break; default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: Invalid Coexistence priority request: %d", __func__, needCoexPriority); } } } /*========================================================================== FUNCTION WLANBAP_RxCallback DESCRIPTION This function is called by TL call this function for all frames except for Data frames DEPENDENCIES PARAMETERS pvosGCtx: pointer to the global vos context; a handle to BAP's control block can be extracted from its context pPacket Vos packet frameType Frame type RETURN VALUE None SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_RxCallback ( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket, WLANTL_BAPFrameEnumType frameType ) { ptBtampContext pBtampCtx = NULL; pBtampCtx = VOS_GET_BAP_CB(pvosGCtx); if ( NULL == pBtampCtx ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP pointer from pvosGCtx on WLANBAP_Start"); return VOS_STATUS_E_FAULT; } switch (frameType) { case WLANTL_BT_AMP_TYPE_LS_REQ: /* Fall through */ case WLANTL_BT_AMP_TYPE_LS_REP: { /* Link supervision frame, process this frame */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: link Supervision packet received over TL: %d, => BAP", __func__, frameType); WLANBAP_RxProcLsPkt((ptBtampHandle)pBtampCtx, pBtampCtx->phy_link_handle, frameType, pPacket); break; } case WLANTL_BT_AMP_TYPE_AR: /* Fall through */ case WLANTL_BT_AMP_TYPE_SEC: { /* Call the RSN callback handler */ bapRsnRxCallback (pvosGCtx, pPacket); break; } default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: Invalid frametype from TL: %d, => BAP", __func__, frameType); } return ( VOS_STATUS_SUCCESS ); }
gpl-2.0
techomancer/kernel-galaxytab
net/sched/em_u32.c
1905
1469
/* * net/sched/em_u32.c U32 Ematch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Based on net/sched/cls_u32.c */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <net/pkt_cls.h> static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em, struct tcf_pkt_info *info) { struct tc_u32_key *key = (struct tc_u32_key *) em->data; const unsigned char *ptr = skb_network_header(skb); if (info) { if (info->ptr) ptr = info->ptr; ptr += (info->nexthdr & key->offmask); } ptr += key->off; if (!tcf_valid_offset(skb, ptr, sizeof(u32))) return 0; return !(((*(__be32*) ptr) ^ key->val) & key->mask); } static struct tcf_ematch_ops em_u32_ops = { .kind = TCF_EM_U32, .datalen = sizeof(struct tc_u32_key), .match = em_u32_match, .owner = THIS_MODULE, .link = LIST_HEAD_INIT(em_u32_ops.link) }; static int __init init_em_u32(void) { return tcf_em_register(&em_u32_ops); } static void __exit exit_em_u32(void) { tcf_em_unregister(&em_u32_ops); } MODULE_LICENSE("GPL"); module_init(init_em_u32); module_exit(exit_em_u32); MODULE_ALIAS_TCF_EMATCH(TCF_EM_U32);
gpl-2.0
kozmikkick/flounder
arch/xtensa/kernel/ptrace.c
2673
8381
// TODO some minor issues /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2007 Tensilica Inc. * * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * Chris Zankel <chris@zankel.net> * Scott Foehner<sfoehner@yahoo.com>, * Kevin Chea * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca> */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/smp.h> #include <linux/security.h> #include <linux/signal.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/elf.h> #include <asm/coprocessor.h> void user_enable_single_step(struct task_struct *child) { child->ptrace |= PT_SINGLESTEP; } void user_disable_single_step(struct task_struct *child) { child->ptrace &= ~PT_SINGLESTEP; } /* * Called by kernel/ptrace.c when detaching to disable single stepping. */ void ptrace_disable(struct task_struct *child) { /* Nothing to do.. */ } int ptrace_getregs(struct task_struct *child, void __user *uregs) { struct pt_regs *regs = task_pt_regs(child); xtensa_gregset_t __user *gregset = uregs; unsigned long wb = regs->windowbase; int i; if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t))) return -EIO; __put_user(regs->pc, &gregset->pc); __put_user(regs->ps & ~(1 << PS_EXCM_BIT), &gregset->ps); __put_user(regs->lbeg, &gregset->lbeg); __put_user(regs->lend, &gregset->lend); __put_user(regs->lcount, &gregset->lcount); __put_user(regs->windowstart, &gregset->windowstart); __put_user(regs->windowbase, &gregset->windowbase); __put_user(regs->threadptr, &gregset->threadptr); for (i = 0; i < XCHAL_NUM_AREGS; i++) __put_user(regs->areg[i], gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS)); return 0; } int ptrace_setregs(struct task_struct *child, void __user *uregs) { struct pt_regs *regs = task_pt_regs(child); xtensa_gregset_t *gregset = uregs; const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK; unsigned long ps; unsigned long wb, ws; if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t))) return -EIO; __get_user(regs->pc, &gregset->pc); __get_user(ps, &gregset->ps); __get_user(regs->lbeg, &gregset->lbeg); __get_user(regs->lend, &gregset->lend); __get_user(regs->lcount, &gregset->lcount); __get_user(ws, &gregset->windowstart); __get_user(wb, &gregset->windowbase); __get_user(regs->threadptr, &gregset->threadptr); regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT); if (wb >= XCHAL_NUM_AREGS / 4) return -EFAULT; if (wb != regs->windowbase || ws != regs->windowstart) { unsigned long rotws, wmask; rotws = (((ws | (ws << WSBITS)) >> wb) & ((1 << WSBITS) - 1)) & ~1; wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) | (rotws & 0xF) | 1; regs->windowbase = wb; regs->windowstart = ws; regs->wmask = wmask; } if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4, gregset->a, wb * 16)) return -EFAULT; if (__copy_from_user(regs->areg, gregset->a + wb * 4, (WSBITS - wb) * 16)) return -EFAULT; return 0; } int ptrace_getxregs(struct task_struct *child, void __user *uregs) { struct pt_regs *regs = task_pt_regs(child); struct thread_info *ti = task_thread_info(child); elf_xtregs_t __user *xtregs = uregs; int ret = 0; if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t))) return -EIO; #if XTENSA_HAVE_COPROCESSORS /* Flush all coprocessor registers to memory. */ coprocessor_flush_all(ti); ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp, sizeof(xtregs_coprocessor_t)); #endif ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt, sizeof(xtregs->opt)); ret |= __copy_to_user(&xtregs->user,&ti->xtregs_user, sizeof(xtregs->user)); return ret ? -EFAULT : 0; } int ptrace_setxregs(struct task_struct *child, void __user *uregs) { struct thread_info *ti = task_thread_info(child); struct pt_regs *regs = task_pt_regs(child); elf_xtregs_t *xtregs = uregs; int ret = 0; if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t))) return -EFAULT; #if XTENSA_HAVE_COPROCESSORS /* Flush all coprocessors before we overwrite them. */ coprocessor_flush_all(ti); coprocessor_release_all(ti); ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0, sizeof(xtregs_coprocessor_t)); #endif ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt, sizeof(xtregs->opt)); ret |= __copy_from_user(&ti->xtregs_user, &xtregs->user, sizeof(xtregs->user)); return ret ? -EFAULT : 0; } int ptrace_peekusr(struct task_struct *child, long regno, long __user *ret) { struct pt_regs *regs; unsigned long tmp; regs = task_pt_regs(child); tmp = 0; /* Default return value. */ switch(regno) { case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: tmp = regs->areg[regno - REG_AR_BASE]; break; case REG_A_BASE ... REG_A_BASE + 15: tmp = regs->areg[regno - REG_A_BASE]; break; case REG_PC: tmp = regs->pc; break; case REG_PS: /* Note: PS.EXCM is not set while user task is running; * its being set in regs is for exception handling * convenience. */ tmp = (regs->ps & ~(1 << PS_EXCM_BIT)); break; case REG_WB: break; /* tmp = 0 */ case REG_WS: { unsigned long wb = regs->windowbase; unsigned long ws = regs->windowstart; tmp = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1); break; } case REG_LBEG: tmp = regs->lbeg; break; case REG_LEND: tmp = regs->lend; break; case REG_LCOUNT: tmp = regs->lcount; break; case REG_SAR: tmp = regs->sar; break; case SYSCALL_NR: tmp = regs->syscall; break; default: return -EIO; } return put_user(tmp, ret); } int ptrace_pokeusr(struct task_struct *child, long regno, long val) { struct pt_regs *regs; regs = task_pt_regs(child); switch (regno) { case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: regs->areg[regno - REG_AR_BASE] = val; break; case REG_A_BASE ... REG_A_BASE + 15: regs->areg[regno - REG_A_BASE] = val; break; case REG_PC: regs->pc = val; break; case SYSCALL_NR: regs->syscall = val; break; default: return -EIO; } return 0; } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret = -EPERM; void __user *datap = (void __user *) data; switch (request) { case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: ret = generic_ptrace_peekdata(child, addr, data); break; case PTRACE_PEEKUSR: /* read register specified by addr. */ ret = ptrace_peekusr(child, addr, datap); break; case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA: ret = generic_ptrace_pokedata(child, addr, data); break; case PTRACE_POKEUSR: /* write register specified by addr. */ ret = ptrace_pokeusr(child, addr, data); break; case PTRACE_GETREGS: ret = ptrace_getregs(child, datap); break; case PTRACE_SETREGS: ret = ptrace_setregs(child, datap); break; case PTRACE_GETXTREGS: ret = ptrace_getxregs(child, datap); break; case PTRACE_SETXTREGS: ret = ptrace_setxregs(child, datap); break; default: ret = ptrace_request(child, request, addr, data); break; } return ret; } void do_syscall_trace(void) { /* * The 0x80 provides a way for the tracing parent to distinguish * between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ if (current->exit_code) { send_sig(current->exit_code, current, 1); current->exit_code = 0; } } void do_syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE) && (current->ptrace & PT_PTRACED)) do_syscall_trace(); #if 0 audit_syscall_entry(current, AUDIT_ARCH_XTENSA..); #endif } void do_syscall_trace_leave(struct pt_regs *regs) { if ((test_thread_flag(TIF_SYSCALL_TRACE)) && (current->ptrace & PT_PTRACED)) do_syscall_trace(); }
gpl-2.0
CyanogenMod/android_kernel_cyanogen_msm8916
drivers/input/input-mt.c
2673
11342
/* * Input Multitouch Library * * Copyright (c) 2008-2010 Henrik Rydberg * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/input/mt.h> #include <linux/export.h> #include <linux/slab.h> #define TRKID_SGN ((TRKID_MAX + 1) >> 1) static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src) { if (dev->absinfo && test_bit(src, dev->absbit)) { dev->absinfo[dst] = dev->absinfo[src]; dev->absinfo[dst].fuzz = 0; dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst); } } /** * input_mt_init_slots() - initialize MT input slots * @dev: input device supporting MT events and finger tracking * @num_slots: number of slots used by the device * @flags: mt tasks to handle in core * * This function allocates all necessary memory for MT slot handling * in the input device, prepares the ABS_MT_SLOT and * ABS_MT_TRACKING_ID events for use and sets up appropriate buffers. * Depending on the flags set, it also performs pointer emulation and * frame synchronization. * * May be called repeatedly. Returns -EINVAL if attempting to * reinitialize with a different number of slots. */ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots, unsigned int flags) { struct input_mt *mt = dev->mt; int i; if (!num_slots) return 0; if (mt) return mt->num_slots != num_slots ? -EINVAL : 0; mt = kzalloc(sizeof(*mt) + num_slots * sizeof(*mt->slots), GFP_KERNEL); if (!mt) goto err_mem; mt->num_slots = num_slots; mt->flags = flags; input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); input_set_abs_params(dev, ABS_MT_TRACKING_ID, 0, TRKID_MAX, 0, 0); if (flags & (INPUT_MT_POINTER | INPUT_MT_DIRECT)) { __set_bit(EV_KEY, dev->evbit); __set_bit(BTN_TOUCH, dev->keybit); copy_abs(dev, ABS_X, ABS_MT_POSITION_X); copy_abs(dev, ABS_Y, ABS_MT_POSITION_Y); copy_abs(dev, ABS_PRESSURE, ABS_MT_PRESSURE); } if (flags & INPUT_MT_POINTER) { __set_bit(BTN_TOOL_FINGER, dev->keybit); __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit); if (num_slots >= 3) __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit); if (num_slots >= 4) __set_bit(BTN_TOOL_QUADTAP, dev->keybit); if (num_slots >= 5) __set_bit(BTN_TOOL_QUINTTAP, dev->keybit); __set_bit(INPUT_PROP_POINTER, dev->propbit); } if (flags & INPUT_MT_DIRECT) __set_bit(INPUT_PROP_DIRECT, dev->propbit); if (flags & INPUT_MT_SEMI_MT) __set_bit(INPUT_PROP_SEMI_MT, dev->propbit); if (flags & INPUT_MT_TRACK) { unsigned int n2 = num_slots * num_slots; mt->red = kcalloc(n2, sizeof(*mt->red), GFP_KERNEL); if (!mt->red) goto err_mem; } /* Mark slots as 'unused' */ for (i = 0; i < num_slots; i++) input_mt_set_value(&mt->slots[i], ABS_MT_TRACKING_ID, -1); dev->mt = mt; return 0; err_mem: kfree(mt); return -ENOMEM; } EXPORT_SYMBOL(input_mt_init_slots); /** * input_mt_destroy_slots() - frees the MT slots of the input device * @dev: input device with allocated MT slots * * This function is only needed in error path as the input core will * automatically free the MT slots when the device is destroyed. */ void input_mt_destroy_slots(struct input_dev *dev) { if (dev->mt) { kfree(dev->mt->red); kfree(dev->mt); } dev->mt = NULL; } EXPORT_SYMBOL(input_mt_destroy_slots); /** * input_mt_report_slot_state() - report contact state * @dev: input device with allocated MT slots * @tool_type: the tool type to use in this slot * @active: true if contact is active, false otherwise * * Reports a contact via ABS_MT_TRACKING_ID, and optionally * ABS_MT_TOOL_TYPE. If active is true and the slot is currently * inactive, or if the tool type is changed, a new tracking id is * assigned to the slot. The tool type is only reported if the * corresponding absbit field is set. */ void input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active) { struct input_mt *mt = dev->mt; struct input_mt_slot *slot; int id; if (!mt) return; slot = &mt->slots[mt->slot]; slot->frame = mt->frame; if (!active) { input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); return; } id = input_mt_get_value(slot, ABS_MT_TRACKING_ID); if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type) id = input_mt_new_trkid(mt); input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id); input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type); } EXPORT_SYMBOL(input_mt_report_slot_state); /** * input_mt_report_finger_count() - report contact count * @dev: input device with allocated MT slots * @count: the number of contacts * * Reports the contact count via BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, * BTN_TOOL_TRIPLETAP and BTN_TOOL_QUADTAP. * * The input core ensures only the KEY events already setup for * this device will produce output. */ void input_mt_report_finger_count(struct input_dev *dev, int count) { input_event(dev, EV_KEY, BTN_TOOL_FINGER, count == 1); input_event(dev, EV_KEY, BTN_TOOL_DOUBLETAP, count == 2); input_event(dev, EV_KEY, BTN_TOOL_TRIPLETAP, count == 3); input_event(dev, EV_KEY, BTN_TOOL_QUADTAP, count == 4); input_event(dev, EV_KEY, BTN_TOOL_QUINTTAP, count == 5); } EXPORT_SYMBOL(input_mt_report_finger_count); /** * input_mt_report_pointer_emulation() - common pointer emulation * @dev: input device with allocated MT slots * @use_count: report number of active contacts as finger count * * Performs legacy pointer emulation via BTN_TOUCH, ABS_X, ABS_Y and * ABS_PRESSURE. Touchpad finger count is emulated if use_count is true. * * The input core ensures only the KEY and ABS axes already setup for * this device will produce output. */ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count) { struct input_mt *mt = dev->mt; struct input_mt_slot *oldest; int oldid, count, i; if (!mt) return; oldest = NULL; oldid = mt->trkid; count = 0; for (i = 0; i < mt->num_slots; ++i) { struct input_mt_slot *ps = &mt->slots[i]; int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID); if (id < 0) continue; if ((id - oldid) & TRKID_SGN) { oldest = ps; oldid = id; } count++; } input_event(dev, EV_KEY, BTN_TOUCH, count > 0); if (use_count) input_mt_report_finger_count(dev, count); if (oldest) { int x = input_mt_get_value(oldest, ABS_MT_POSITION_X); int y = input_mt_get_value(oldest, ABS_MT_POSITION_Y); input_event(dev, EV_ABS, ABS_X, x); input_event(dev, EV_ABS, ABS_Y, y); if (test_bit(ABS_MT_PRESSURE, dev->absbit)) { int p = input_mt_get_value(oldest, ABS_MT_PRESSURE); input_event(dev, EV_ABS, ABS_PRESSURE, p); } } else { if (test_bit(ABS_MT_PRESSURE, dev->absbit)) input_event(dev, EV_ABS, ABS_PRESSURE, 0); } } EXPORT_SYMBOL(input_mt_report_pointer_emulation); /** * input_mt_sync_frame() - synchronize mt frame * @dev: input device with allocated MT slots * * Close the frame and prepare the internal state for a new one. * Depending on the flags, marks unused slots as inactive and performs * pointer emulation. */ void input_mt_sync_frame(struct input_dev *dev) { struct input_mt *mt = dev->mt; struct input_mt_slot *s; bool use_count = false; if (!mt) return; if (mt->flags & INPUT_MT_DROP_UNUSED) { for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { if (input_mt_is_used(mt, s)) continue; input_mt_slot(dev, s - mt->slots); input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); } } if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT)) use_count = true; input_mt_report_pointer_emulation(dev, use_count); mt->frame++; } EXPORT_SYMBOL(input_mt_sync_frame); static int adjust_dual(int *begin, int step, int *end, int eq) { int f, *p, s, c; if (begin == end) return 0; f = *begin; p = begin + step; s = p == end ? f + 1 : *p; for (; p != end; p += step) if (*p < f) s = f, f = *p; else if (*p < s) s = *p; c = (f + s + 1) / 2; if (c == 0 || (c > 0 && !eq)) return 0; if (s < 0) c *= 2; for (p = begin; p != end; p += step) *p -= c; return (c < s && s <= 0) || (f >= 0 && f < c); } static void find_reduced_matrix(int *w, int nr, int nc, int nrc) { int i, k, sum; for (k = 0; k < nrc; k++) { for (i = 0; i < nr; i++) adjust_dual(w + i, nr, w + i + nrc, nr <= nc); sum = 0; for (i = 0; i < nrc; i += nr) sum += adjust_dual(w + i, 1, w + i + nr, nc <= nr); if (!sum) break; } } static int input_mt_set_matrix(struct input_mt *mt, const struct input_mt_pos *pos, int num_pos) { const struct input_mt_pos *p; struct input_mt_slot *s; int *w = mt->red; int x, y; for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { if (!input_mt_is_active(s)) continue; x = input_mt_get_value(s, ABS_MT_POSITION_X); y = input_mt_get_value(s, ABS_MT_POSITION_Y); for (p = pos; p != pos + num_pos; p++) { int dx = x - p->x, dy = y - p->y; *w++ = dx * dx + dy * dy; } } return w - mt->red; } static void input_mt_set_slots(struct input_mt *mt, int *slots, int num_pos) { struct input_mt_slot *s; int *w = mt->red, *p; for (p = slots; p != slots + num_pos; p++) *p = -1; for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { if (!input_mt_is_active(s)) continue; for (p = slots; p != slots + num_pos; p++) if (*w++ < 0) *p = s - mt->slots; } for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { if (input_mt_is_active(s)) continue; for (p = slots; p != slots + num_pos; p++) if (*p < 0) { *p = s - mt->slots; break; } } } /** * input_mt_assign_slots() - perform a best-match assignment * @dev: input device with allocated MT slots * @slots: the slot assignment to be filled * @pos: the position array to match * @num_pos: number of positions * * Performs a best match against the current contacts and returns * the slot assignment list. New contacts are assigned to unused * slots. * * Returns zero on success, or negative error in case of failure. */ int input_mt_assign_slots(struct input_dev *dev, int *slots, const struct input_mt_pos *pos, int num_pos) { struct input_mt *mt = dev->mt; int nrc; if (!mt || !mt->red) return -ENXIO; if (num_pos > mt->num_slots) return -EINVAL; if (num_pos < 1) return 0; nrc = input_mt_set_matrix(mt, pos, num_pos); find_reduced_matrix(mt->red, num_pos, nrc / num_pos, nrc); input_mt_set_slots(mt, slots, num_pos); return 0; } EXPORT_SYMBOL(input_mt_assign_slots); /** * input_mt_get_slot_by_key() - return slot matching key * @dev: input device with allocated MT slots * @key: the key of the sought slot * * Returns the slot of the given key, if it exists, otherwise * set the key on the first unused slot and return. * * If no available slot can be found, -1 is returned. */ int input_mt_get_slot_by_key(struct input_dev *dev, int key) { struct input_mt *mt = dev->mt; struct input_mt_slot *s; if (!mt) return -1; for (s = mt->slots; s != mt->slots + mt->num_slots; s++) if (input_mt_is_active(s) && s->key == key) return s - mt->slots; for (s = mt->slots; s != mt->slots + mt->num_slots; s++) if (!input_mt_is_active(s)) { s->key = key; return s - mt->slots; } return -1; } EXPORT_SYMBOL(input_mt_get_slot_by_key);
gpl-2.0
Capful/android_kernel_htc_msm8660
drivers/s390/block/dasd_fba.c
2929
17919
/* * File...........: linux/drivers/s390/block/dasd_fba.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> * Copyright IBM Corp. 1999, 2009 */ #define KMSG_COMPONENT "dasd-fba" #include <linux/stddef.h> #include <linux/kernel.h> #include <asm/debug.h> #include <linux/slab.h> #include <linux/hdreg.h> /* HDIO_GETGEO */ #include <linux/bio.h> #include <linux/module.h> #include <linux/init.h> #include <asm/idals.h> #include <asm/ebcdic.h> #include <asm/io.h> #include <asm/ccwdev.h> #include "dasd_int.h" #include "dasd_fba.h" #ifdef PRINTK_HEADER #undef PRINTK_HEADER #endif /* PRINTK_HEADER */ #define PRINTK_HEADER "dasd(fba):" #define DASD_FBA_CCW_WRITE 0x41 #define DASD_FBA_CCW_READ 0x42 #define DASD_FBA_CCW_LOCATE 0x43 #define DASD_FBA_CCW_DEFINE_EXTENT 0x63 MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_fba_discipline; struct dasd_fba_private { struct dasd_fba_characteristics rdc_data; }; static struct ccw_device_id dasd_fba_ids[] = { { CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1}, { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2}, { /* end of list */ }, }; MODULE_DEVICE_TABLE(ccw, dasd_fba_ids); static struct ccw_driver dasd_fba_driver; /* see below */ static int dasd_fba_probe(struct ccw_device *cdev) { return dasd_generic_probe(cdev, &dasd_fba_discipline); } static int dasd_fba_set_online(struct ccw_device *cdev) { return dasd_generic_set_online(cdev, &dasd_fba_discipline); } static struct ccw_driver dasd_fba_driver = { .driver = { .name = "dasd-fba", .owner = THIS_MODULE, }, .ids = dasd_fba_ids, .probe = dasd_fba_probe, .remove = dasd_generic_remove, .set_offline = dasd_generic_set_offline, .set_online = dasd_fba_set_online, .notify = dasd_generic_notify, .path_event = dasd_generic_path_event, .freeze = dasd_generic_pm_freeze, .thaw = dasd_generic_restore_device, .restore = dasd_generic_restore_device, }; static void define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, int blksize, int beg, int nr) { ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT; ccw->flags = 0; ccw->count = 16; ccw->cda = (__u32) __pa(data); memset(data, 0, sizeof (struct DE_fba_data)); if (rw == WRITE) (data->mask).perm = 0x0; else if (rw == READ) (data->mask).perm = 0x1; else data->mask.perm = 0x2; data->blk_size = blksize; data->ext_loc = beg; data->ext_end = nr - 1; } static void locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, int block_nr, int block_ct) { ccw->cmd_code = DASD_FBA_CCW_LOCATE; ccw->flags = 0; ccw->count = 8; ccw->cda = (__u32) __pa(data); memset(data, 0, sizeof (struct LO_fba_data)); if (rw == WRITE) data->operation.cmd = 0x5; else if (rw == READ) data->operation.cmd = 0x6; else data->operation.cmd = 0x8; data->blk_nr = block_nr; data->blk_ct = block_ct; } static int dasd_fba_check_characteristics(struct dasd_device *device) { struct dasd_block *block; struct dasd_fba_private *private; struct ccw_device *cdev = device->cdev; int rc; int readonly; private = (struct dasd_fba_private *) device->private; if (!private) { private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); if (!private) { dev_warn(&device->cdev->dev, "Allocating memory for private DASD " "data failed\n"); return -ENOMEM; } device->private = (void *) private; } else { memset(private, 0, sizeof(*private)); } block = dasd_alloc_block(); if (IS_ERR(block)) { DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate " "dasd block structure"); device->private = NULL; kfree(private); return PTR_ERR(block); } device->block = block; block->base = device; /* Read Device Characteristics */ rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC, &private->rdc_data, 32); if (rc) { DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device " "characteristics returned error %d", rc); device->block = NULL; dasd_free_block(block); device->private = NULL; kfree(private); return rc; } device->default_expires = DASD_EXPIRES; device->path_data.opm = LPM_ANYPATH; readonly = dasd_device_is_ro(device); if (readonly) set_bit(DASD_FLAG_DEVICE_RO, &device->flags); dev_info(&device->cdev->dev, "New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB " "and %d B/blk%s\n", cdev->id.dev_type, cdev->id.dev_model, cdev->id.cu_type, cdev->id.cu_model, ((private->rdc_data.blk_bdsa * (private->rdc_data.blk_size >> 9)) >> 11), private->rdc_data.blk_size, readonly ? ", read-only device" : ""); return 0; } static int dasd_fba_do_analysis(struct dasd_block *block) { struct dasd_fba_private *private; int sb, rc; private = (struct dasd_fba_private *) block->base->private; rc = dasd_check_blocksize(private->rdc_data.blk_size); if (rc) { DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d", private->rdc_data.blk_size); return rc; } block->blocks = private->rdc_data.blk_bdsa; block->bp_block = private->rdc_data.blk_size; block->s2b_shift = 0; /* bits to shift 512 to get a block */ for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1) block->s2b_shift++; return 0; } static int dasd_fba_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) { if (dasd_check_blocksize(block->bp_block) != 0) return -EINVAL; geo->cylinders = (block->blocks << block->s2b_shift) >> 10; geo->heads = 16; geo->sectors = 128 >> block->s2b_shift; return 0; } static dasd_erp_fn_t dasd_fba_erp_action(struct dasd_ccw_req * cqr) { return dasd_default_erp_action; } static dasd_erp_fn_t dasd_fba_erp_postaction(struct dasd_ccw_req * cqr) { if (cqr->function == dasd_default_erp_action) return dasd_default_erp_postaction; DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p", cqr->function); return NULL; } static void dasd_fba_check_for_device_change(struct dasd_device *device, struct dasd_ccw_req *cqr, struct irb *irb) { char mask; /* first of all check for state change pending interrupt */ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; if ((irb->scsw.cmd.dstat & mask) == mask) dasd_generic_handle_state_change(device); }; static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, struct dasd_block *block, struct request *req) { struct dasd_fba_private *private; unsigned long *idaws; struct LO_fba_data *LO_data; struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; struct bio_vec *bv; char *dst; int count, cidaw, cplength, datasize; sector_t recid, first_rec, last_rec; unsigned int blksize, off; unsigned char cmd; private = (struct dasd_fba_private *) block->base->private; if (rq_data_dir(req) == READ) { cmd = DASD_FBA_CCW_READ; } else if (rq_data_dir(req) == WRITE) { cmd = DASD_FBA_CCW_WRITE; } else return ERR_PTR(-EINVAL); blksize = block->bp_block; /* Calculate record id of first and last block. */ first_rec = blk_rq_pos(req) >> block->s2b_shift; last_rec = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; /* Check struct bio and count the number of blocks for the request. */ count = 0; cidaw = 0; rq_for_each_segment(bv, req, iter) { if (bv->bv_len & (blksize - 1)) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); count += bv->bv_len >> (block->s2b_shift + 9); #if defined(CONFIG_64BIT) if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) cidaw += bv->bv_len / blksize; #endif } /* Paranoia. */ if (count != last_rec - first_rec + 1) return ERR_PTR(-EINVAL); /* 1x define extent + 1x locate record + number of blocks */ cplength = 2 + count; /* 1x define extent + 1x locate record */ datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) + cidaw * sizeof(unsigned long); /* * Find out number of additional locate record ccws if the device * can't do data chaining. */ if (private->rdc_data.mode.bits.data_chain == 0) { cplength += count - 1; datasize += (count - 1)*sizeof(struct LO_fba_data); } /* Allocate the ccw request. */ cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; /* First ccw is define extent. */ define_extent(ccw++, cqr->data, rq_data_dir(req), block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); /* Build locate_record + read/write ccws. */ idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); LO_data = (struct LO_fba_data *) (idaws + cidaw); /* Locate record for all blocks for smart devices. */ if (private->rdc_data.mode.bits.data_chain != 0) { ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count); } recid = first_rec; rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, GFP_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) memcpy(copy + bv->bv_offset, dst, bv->bv_len); if (copy) dst = copy + bv->bv_offset; } for (off = 0; off < bv->bv_len; off += blksize) { /* Locate record for stupid devices. */ if (private->rdc_data.mode.bits.data_chain == 0) { ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw, LO_data++, rq_data_dir(req), recid - first_rec, 1); ccw->flags = CCW_FLAG_CC; ccw++; } else { if (recid > first_rec) ccw[-1].flags |= CCW_FLAG_DC; else ccw[-1].flags |= CCW_FLAG_CC; } ccw->cmd_code = cmd; ccw->count = block->bp_block; if (idal_is_needed(dst, blksize)) { ccw->cda = (__u32)(addr_t) idaws; ccw->flags = CCW_FLAG_IDA; idaws = idal_create_words(idaws, dst, blksize); } else { ccw->cda = (__u32)(addr_t) dst; ccw->flags = 0; } ccw++; dst += blksize; recid++; } } if (blk_noretry_request(req) || block->base->features & DASD_FEATURE_FAILFAST) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->startdev = memdev; cqr->memdev = memdev; cqr->block = block; cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */ cqr->retries = 32; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } static int dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) { struct dasd_fba_private *private; struct ccw1 *ccw; struct req_iterator iter; struct bio_vec *bv; char *dst, *cda; unsigned int blksize, off; int status; if (!dasd_page_cache) goto out; private = (struct dasd_fba_private *) cqr->block->base->private; blksize = cqr->block->bp_block; ccw = cqr->cpaddr; /* Skip over define extent & locate record. */ ccw++; if (private->rdc_data.mode.bits.data_chain != 0) ccw++; rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; for (off = 0; off < bv->bv_len; off += blksize) { /* Skip locate record. */ if (private->rdc_data.mode.bits.data_chain == 0) ccw++; if (dst) { if (ccw->flags & CCW_FLAG_IDA) cda = *((char **)((addr_t) ccw->cda)); else cda = (char *)((addr_t) ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) memcpy(dst, cda, bv->bv_len); kmem_cache_free(dasd_page_cache, (void *)((addr_t)cda & PAGE_MASK)); } dst = NULL; } ccw++; } } out: status = cqr->status == DASD_CQR_DONE; dasd_sfree_request(cqr, cqr->memdev); return status; } static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr) { cqr->status = DASD_CQR_FILLED; }; static int dasd_fba_fill_info(struct dasd_device * device, struct dasd_information2_t * info) { info->label_block = 1; info->FBA_layout = 1; info->format = DASD_FORMAT_LDL; info->characteristics_size = sizeof(struct dasd_fba_characteristics); memcpy(info->characteristics, &((struct dasd_fba_private *) device->private)->rdc_data, sizeof (struct dasd_fba_characteristics)); info->confdata_size = 0; return 0; } static void dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb, char *reason) { u64 *sense; sense = (u64 *) dasd_get_sense(irb); if (sense) { DBF_DEV_EVENT(DBF_EMERG, device, "%s: %s %02x%02x%02x %016llx %016llx %016llx " "%016llx", reason, scsw_is_tm(&irb->scsw) ? "t" : "c", scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), sense[0], sense[1], sense[2], sense[3]); } else { DBF_DEV_EVENT(DBF_EMERG, device, "%s", "SORRY - NO VALID SENSE AVAILABLE\n"); } } static void dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, struct irb *irb) { char *page; struct ccw1 *act, *end, *last; int len, sl, sct, count; page = (char *) get_zeroed_page(GFP_ATOMIC); if (page == NULL) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "No memory to dump sense data"); return; } len = sprintf(page, KERN_ERR PRINTK_HEADER " I/O status report for device %s:\n", dev_name(&device->cdev->dev)); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " in req: %p CS: 0x%02X DS: 0x%02X\n", req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " device %s: Failing CCW: %p\n", dev_name(&device->cdev->dev), (void *) (addr_t) irb->scsw.cmd.cpa); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { len += sprintf(page + len, " %02x", irb->ecw[8 * sl + sct]); } len += sprintf(page + len, "\n"); } } else { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " SORRY - NO VALID SENSE AVAILABLE\n"); } printk(KERN_ERR "%s", page); /* dump the Channel Program */ /* print first CCWs (maximum 8) */ act = req->cpaddr; for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); end = min(act + 8, last); len = sprintf(page, KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req); while (act <= end) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " CCW %p: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) len += sprintf(page + len, " %08X", ((int *) (addr_t) act->cda) [(count>>2)]); len += sprintf(page + len, "\n"); act++; } printk(KERN_ERR "%s", page); /* print failing CCW area */ len = 0; if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); } end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); while (act <= end) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " CCW %p: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) len += sprintf(page + len, " %08X", ((int *) (addr_t) act->cda) [(count>>2)]); len += sprintf(page + len, "\n"); act++; } /* print last CCWs */ if (act < last - 2) { act = last - 2; len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); } while (act <= last) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " CCW %p: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) len += sprintf(page + len, " %08X", ((int *) (addr_t) act->cda) [(count>>2)]); len += sprintf(page + len, "\n"); act++; } if (len > 0) printk(KERN_ERR "%s", page); free_page((unsigned long) page); } /* * max_blocks is dependent on the amount of storage that is available * in the static io buffer for each device. Currently each device has * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In * addition we have one define extent ccw + 16 bytes of data and a * locate record ccw for each block (stupid devices!) + 16 bytes of data. * That makes: * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum. * We want to fit two into the available memory so that we can immediately * start the next request if one finishes off. That makes 100.1 blocks * for one request. Give a little safety and the result is 96. */ static struct dasd_discipline dasd_fba_discipline = { .owner = THIS_MODULE, .name = "FBA ", .ebcname = "FBA ", .max_blocks = 96, .check_device = dasd_fba_check_characteristics, .do_analysis = dasd_fba_do_analysis, .verify_path = dasd_generic_verify_path, .fill_geometry = dasd_fba_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, .handle_terminated_request = dasd_fba_handle_terminated_request, .erp_action = dasd_fba_erp_action, .erp_postaction = dasd_fba_erp_postaction, .check_for_device_change = dasd_fba_check_for_device_change, .build_cp = dasd_fba_build_cp, .free_cp = dasd_fba_free_cp, .dump_sense = dasd_fba_dump_sense, .dump_sense_dbf = dasd_fba_dump_sense_dbf, .fill_info = dasd_fba_fill_info, }; static int __init dasd_fba_init(void) { int ret; ASCEBC(dasd_fba_discipline.ebcname, 4); ret = ccw_driver_register(&dasd_fba_driver); if (!ret) wait_for_device_probe(); return ret; } static void __exit dasd_fba_cleanup(void) { ccw_driver_unregister(&dasd_fba_driver); } module_init(dasd_fba_init); module_exit(dasd_fba_cleanup);
gpl-2.0
aopp/android_kernel_asus_grouper
fs/udf/truncate.c
2929
8193
/* * truncate.c * * PURPOSE * Truncate handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1999-2004 Ben Fennema * (C) 1999 Stelias Computing Inc * * HISTORY * * 02/24/99 blf Created. * */ #include "udfdecl.h" #include <linux/fs.h> #include <linux/mm.h> #include <linux/buffer_head.h> #include "udf_i.h" #include "udf_sb.h" static void extent_trunc(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, int8_t etype, uint32_t elen, uint32_t nelen) { struct kernel_lb_addr neloc = {}; int last_block = (elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; int first_block = (nelen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (nelen) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, eloc, 0, last_block); etype = (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30); } else neloc = *eloc; nelen = (etype << 30) | nelen; } if (elen != nelen) { udf_write_aext(inode, epos, &neloc, nelen, 0); if (last_block - first_block > 0) { if (etype == (EXT_RECORDED_ALLOCATED >> 30)) mark_inode_dirty(inode); if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) udf_free_blocks(inode->i_sb, inode, eloc, first_block, last_block - first_block); } } } /* * Truncate the last extent to match i_size. This function assumes * that preallocation extent is already truncated. */ void udf_truncate_tail_extent(struct inode *inode) { struct extent_position epos = {}; struct kernel_lb_addr eloc; uint32_t elen, nelen; uint64_t lbcount = 0; int8_t etype = -1, netype; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || inode->i_size == iinfo->i_lenExtents) return; /* Are we going to delete the file anyway? */ if (inode->i_nlink == 0) return; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); /* Find the last extent in the file */ while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { etype = netype; lbcount += elen; if (lbcount > inode->i_size) { if (lbcount - inode->i_size >= inode->i_sb->s_blocksize) printk(KERN_WARNING "udf_truncate_tail_extent(): Too long " "extent after EOF in inode %u: i_size: " "%Ld lbcount: %Ld extent %u+%u\n", (unsigned)inode->i_ino, (long long)inode->i_size, (long long)lbcount, (unsigned)eloc.logicalBlockNum, (unsigned)elen); nelen = elen - (lbcount - inode->i_size); epos.offset -= adsize; extent_trunc(inode, &epos, &eloc, etype, elen, nelen); epos.offset += adsize; if (udf_next_aext(inode, &epos, &eloc, &elen, 1) != -1) printk(KERN_ERR "udf_truncate_tail_extent(): " "Extent after EOF in inode %u.\n", (unsigned)inode->i_ino); break; } } /* This inode entry is in-memory only and thus we don't have to mark * the inode dirty */ iinfo->i_lenExtents = inode->i_size; brelse(epos.bh); } void udf_discard_prealloc(struct inode *inode) { struct extent_position epos = { NULL, 0, {0, 0} }; struct kernel_lb_addr eloc; uint32_t elen; uint64_t lbcount = 0; int8_t etype = -1, netype; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || inode->i_size == iinfo->i_lenExtents) return; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else adsize = 0; epos.block = iinfo->i_location; /* Find the last extent in the file */ while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { etype = netype; lbcount += elen; } if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { epos.offset -= adsize; lbcount -= elen; extent_trunc(inode, &epos, &eloc, etype, elen, 0); if (!epos.bh) { iinfo->i_lenAlloc = epos.offset - udf_file_entry_alloc_offset(inode); mark_inode_dirty(inode); } else { struct allocExtDesc *aed = (struct allocExtDesc *)(epos.bh->b_data); aed->lengthAllocDescs = cpu_to_le32(epos.offset - sizeof(struct allocExtDesc)); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos.bh->b_data, epos.offset); else udf_update_tag(epos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos.bh, inode); } } /* This inode entry is in-memory only and thus we don't have to mark * the inode dirty */ iinfo->i_lenExtents = lbcount; brelse(epos.bh); } static void udf_update_alloc_ext_desc(struct inode *inode, struct extent_position *epos, u32 lenalloc) { struct super_block *sb = inode->i_sb; struct udf_sb_info *sbi = UDF_SB(sb); struct allocExtDesc *aed = (struct allocExtDesc *) (epos->bh->b_data); int len = sizeof(struct allocExtDesc); aed->lengthAllocDescs = cpu_to_le32(lenalloc); if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || sbi->s_udfrev >= 0x0201) len += lenalloc; udf_update_tag(epos->bh->b_data, len); mark_buffer_dirty_inode(epos->bh, inode); } /* * Truncate extents of inode to inode->i_size. This function can be used only * for making file shorter. For making file longer, udf_extend_file() has to * be used. */ void udf_truncate_extents(struct inode *inode) { struct extent_position epos; struct kernel_lb_addr eloc, neloc = {}; uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc; int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset; loff_t byte_offset; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); byte_offset = (offset << sb->s_blocksize_bits) + (inode->i_size & (sb->s_blocksize - 1)); if (etype == -1) { /* We should extend the file? */ WARN_ON(byte_offset); return; } epos.offset -= adsize; extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset); epos.offset += adsize; if (byte_offset) lenalloc = epos.offset; else lenalloc = epos.offset - adsize; if (!epos.bh) lenalloc -= udf_file_entry_alloc_offset(inode); else lenalloc -= sizeof(struct allocExtDesc); while ((etype = udf_current_aext(inode, &epos, &eloc, &elen, 0)) != -1) { if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { udf_write_aext(inode, &epos, &neloc, nelen, 0); if (indirect_ext_len) { /* We managed to free all extents in the * indirect extent - free it too */ BUG_ON(!epos.bh); udf_free_blocks(sb, inode, &epos.block, 0, indirect_ext_len); } else if (!epos.bh) { iinfo->i_lenAlloc = lenalloc; mark_inode_dirty(inode); } else udf_update_alloc_ext_desc(inode, &epos, lenalloc); brelse(epos.bh); epos.offset = sizeof(struct allocExtDesc); epos.block = eloc; epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, &eloc, 0)); if (elen) indirect_ext_len = (elen + sb->s_blocksize - 1) >> sb->s_blocksize_bits; else indirect_ext_len = 1; } else { extent_trunc(inode, &epos, &eloc, etype, elen, 0); epos.offset += adsize; } } if (indirect_ext_len) { BUG_ON(!epos.bh); udf_free_blocks(sb, inode, &epos.block, 0, indirect_ext_len); } else if (!epos.bh) { iinfo->i_lenAlloc = lenalloc; mark_inode_dirty(inode); } else udf_update_alloc_ext_desc(inode, &epos, lenalloc); iinfo->i_lenExtents = inode->i_size; brelse(epos.bh); }
gpl-2.0
NAM-IL/ARM_Linux_Kernel_12b
lib/mpi/mpicoder.c
3441
6370
/* mpicoder.c - Coder for the external representation of MPIs * Copyright (C) 1998, 1999 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include <linux/bitops.h> #include <asm-generic/bitops/count_zeros.h> #include "mpi-internal.h" #define MAX_EXTERN_MPI_BITS 16384 /** * mpi_read_raw_data - Read a raw byte stream as a positive integer * @xbuffer: The data to read * @nbytes: The amount of data to read */ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes) { const uint8_t *buffer = xbuffer; int i, j; unsigned nbits, nlimbs; mpi_limb_t a; MPI val = NULL; while (nbytes > 0 && buffer[0] == 0) { buffer++; nbytes--; } nbits = nbytes * 8; if (nbits > MAX_EXTERN_MPI_BITS) { pr_info("MPI: mpi too large (%u bits)\n", nbits); return NULL; } if (nbytes > 0) nbits -= count_leading_zeros(buffer[0]); else nbits = 0; nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); val = mpi_alloc(nlimbs); if (!val) return NULL; val->nbits = nbits; val->sign = 0; val->nlimbs = nlimbs; if (nbytes > 0) { i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; i %= BYTES_PER_MPI_LIMB; for (j = nlimbs; j > 0; j--) { a = 0; for (; i < BYTES_PER_MPI_LIMB; i++) { a <<= 8; a |= *buffer++; } i = 0; val->d[j - 1] = a; } } return val; } EXPORT_SYMBOL_GPL(mpi_read_raw_data); MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) { const uint8_t *buffer = xbuffer; int i, j; unsigned nbits, nbytes, nlimbs, nread = 0; mpi_limb_t a; MPI val = NULL; if (*ret_nread < 2) goto leave; nbits = buffer[0] << 8 | buffer[1]; if (nbits > MAX_EXTERN_MPI_BITS) { pr_info("MPI: mpi too large (%u bits)\n", nbits); goto leave; } buffer += 2; nread = 2; nbytes = DIV_ROUND_UP(nbits, 8); nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); val = mpi_alloc(nlimbs); if (!val) return NULL; i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; i %= BYTES_PER_MPI_LIMB; val->nbits = nbits; j = val->nlimbs = nlimbs; val->sign = 0; for (; j > 0; j--) { a = 0; for (; i < BYTES_PER_MPI_LIMB; i++) { if (++nread > *ret_nread) { printk ("MPI: mpi larger than buffer nread=%d ret_nread=%d\n", nread, *ret_nread); goto leave; } a <<= 8; a |= *buffer++; } i = 0; val->d[j - 1] = a; } leave: *ret_nread = nread; return val; } EXPORT_SYMBOL_GPL(mpi_read_from_buffer); /**************** * Return an allocated buffer with the MPI (msb first). * NBYTES receives the length of this buffer. Caller must free the * return string (This function does return a 0 byte buffer with NBYTES * set to zero if the value of A is zero. If sign is not NULL, it will * be set to the sign of the A. */ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) { uint8_t *p, *buffer; mpi_limb_t alimb; int i; unsigned int n; if (sign) *sign = a->sign; *nbytes = n = a->nlimbs * BYTES_PER_MPI_LIMB; if (!n) n++; /* avoid zero length allocation */ p = buffer = kmalloc(n, GFP_KERNEL); if (!p) return NULL; for (i = a->nlimbs - 1; i >= 0; i--) { alimb = a->d[i]; #if BYTES_PER_MPI_LIMB == 4 *p++ = alimb >> 24; *p++ = alimb >> 16; *p++ = alimb >> 8; *p++ = alimb; #elif BYTES_PER_MPI_LIMB == 8 *p++ = alimb >> 56; *p++ = alimb >> 48; *p++ = alimb >> 40; *p++ = alimb >> 32; *p++ = alimb >> 24; *p++ = alimb >> 16; *p++ = alimb >> 8; *p++ = alimb; #else #error please implement for this limb size. #endif } /* this is sub-optimal but we need to do the shift operation * because the caller has to free the returned buffer */ for (p = buffer; !*p && *nbytes; p++, --*nbytes) ; if (p != buffer) memmove(buffer, p, *nbytes); return buffer; } EXPORT_SYMBOL_GPL(mpi_get_buffer); /**************** * Use BUFFER to update MPI. */ int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign) { const uint8_t *buffer = xbuffer, *p; mpi_limb_t alimb; int nlimbs; int i; nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); if (RESIZE_IF_NEEDED(a, nlimbs) < 0) return -ENOMEM; a->sign = sign; for (i = 0, p = buffer + nbytes - 1; p >= buffer + BYTES_PER_MPI_LIMB;) { #if BYTES_PER_MPI_LIMB == 4 alimb = (mpi_limb_t) *p--; alimb |= (mpi_limb_t) *p-- << 8; alimb |= (mpi_limb_t) *p-- << 16; alimb |= (mpi_limb_t) *p-- << 24; #elif BYTES_PER_MPI_LIMB == 8 alimb = (mpi_limb_t) *p--; alimb |= (mpi_limb_t) *p-- << 8; alimb |= (mpi_limb_t) *p-- << 16; alimb |= (mpi_limb_t) *p-- << 24; alimb |= (mpi_limb_t) *p-- << 32; alimb |= (mpi_limb_t) *p-- << 40; alimb |= (mpi_limb_t) *p-- << 48; alimb |= (mpi_limb_t) *p-- << 56; #else #error please implement for this limb size. #endif a->d[i++] = alimb; } if (p >= buffer) { #if BYTES_PER_MPI_LIMB == 4 alimb = *p--; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 8; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 16; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 24; #elif BYTES_PER_MPI_LIMB == 8 alimb = (mpi_limb_t) *p--; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 8; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 16; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 24; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 32; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 40; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 48; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 56; #else #error please implement for this limb size. #endif a->d[i++] = alimb; } a->nlimbs = i; if (i != nlimbs) { pr_emerg("MPI: mpi_set_buffer: Assertion failed (%d != %d)", i, nlimbs); BUG(); } return 0; } EXPORT_SYMBOL_GPL(mpi_set_buffer);
gpl-2.0
Huawei-Kiwi/android_kernel_huawei_msm8939
fs/sysv/ialloc.c
4465
5889
/* * linux/fs/sysv/ialloc.c * * minix/bitmap.c * Copyright (C) 1991, 1992 Linus Torvalds * * ext/freelists.c * Copyright (C) 1992 Remy Card (card@masi.ibp.fr) * * xenix/alloc.c * Copyright (C) 1992 Doug Evans * * coh/alloc.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/ialloc.c * Copyright (C) 1993 Bruno Haible * * This file contains code for allocating/freeing inodes. */ #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include "sysv.h" /* We don't trust the value of sb->sv_sbd2->s_tinode = *sb->sv_sb_total_free_inodes but we nevertheless keep it up to date. */ /* An inode on disk is considered free if both i_mode == 0 and i_nlink == 0. */ /* return &sb->sv_sb_fic_inodes[i] = &sbd->s_inode[i]; */ static inline sysv_ino_t * sv_sb_fic_inode(struct super_block * sb, unsigned int i) { struct sysv_sb_info *sbi = SYSV_SB(sb); if (sbi->s_bh1 == sbi->s_bh2) return &sbi->s_sb_fic_inodes[i]; else { /* 512 byte Xenix FS */ unsigned int offset = offsetof(struct xenix_super_block, s_inode[i]); if (offset < 512) return (sysv_ino_t*)(sbi->s_sbd1 + offset); else return (sysv_ino_t*)(sbi->s_sbd2 + offset); } } struct sysv_inode * sysv_raw_inode(struct super_block *sb, unsigned ino, struct buffer_head **bh) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct sysv_inode *res; int block = sbi->s_firstinodezone + sbi->s_block_base; block += (ino-1) >> sbi->s_inodes_per_block_bits; *bh = sb_bread(sb, block); if (!*bh) return NULL; res = (struct sysv_inode *)(*bh)->b_data; return res + ((ino-1) & sbi->s_inodes_per_block_1); } static int refill_free_cache(struct super_block *sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; int i = 0, ino; ino = SYSV_ROOT_INO+1; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto out; while (ino <= sbi->s_ninodes) { if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) { *sv_sb_fic_inode(sb,i++) = cpu_to_fs16(SYSV_SB(sb), ino); if (i == sbi->s_fic_size) break; } if ((ino++ & sbi->s_inodes_per_block_1) == 0) { brelse(bh); raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto out; } else raw_inode++; } brelse(bh); out: return i; } void sysv_free_inode(struct inode * inode) { struct super_block *sb = inode->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned int ino; struct buffer_head * bh; struct sysv_inode * raw_inode; unsigned count; sb = inode->i_sb; ino = inode->i_ino; if (ino <= SYSV_ROOT_INO || ino > sbi->s_ninodes) { printk("sysv_free_inode: inode 0,1,2 or nonexistent inode\n"); return; } raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("sysv_free_inode: unable to read inode block on device " "%s\n", inode->i_sb->s_id); return; } mutex_lock(&sbi->s_lock); count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count); if (count < sbi->s_fic_size) { *sv_sb_fic_inode(sb,count++) = cpu_to_fs16(sbi, ino); *sbi->s_sb_fic_count = cpu_to_fs16(sbi, count); } fs16_add(sbi, sbi->s_sb_total_free_inodes, 1); dirty_sb(sb); memset(raw_inode, 0, sizeof(struct sysv_inode)); mark_buffer_dirty(bh); mutex_unlock(&sbi->s_lock); brelse(bh); } struct inode * sysv_new_inode(const struct inode * dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); struct inode *inode; sysv_ino_t ino; unsigned count; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE }; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); mutex_lock(&sbi->s_lock); count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count); if (count == 0 || (*sv_sb_fic_inode(sb,count-1) == 0)) { count = refill_free_cache(sb); if (count == 0) { iput(inode); mutex_unlock(&sbi->s_lock); return ERR_PTR(-ENOSPC); } } /* Now count > 0. */ ino = *sv_sb_fic_inode(sb,--count); *sbi->s_sb_fic_count = cpu_to_fs16(sbi, count); fs16_add(sbi, sbi->s_sb_total_free_inodes, -1); dirty_sb(sb); inode_init_owner(inode, dir, mode); inode->i_ino = fs16_to_cpu(sbi, ino); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; inode->i_blocks = 0; memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data)); SYSV_I(inode)->i_dir_start_lookup = 0; insert_inode_hash(inode); mark_inode_dirty(inode); sysv_write_inode(inode, &wbc); /* ensure inode not allocated again */ mark_inode_dirty(inode); /* cleared by sysv_write_inode() */ /* That's it. */ mutex_unlock(&sbi->s_lock); return inode; } unsigned long sysv_count_free_inodes(struct super_block * sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; int ino, count, sb_count; mutex_lock(&sbi->s_lock); sb_count = fs16_to_cpu(sbi, *sbi->s_sb_total_free_inodes); if (0) goto trust_sb; /* this causes a lot of disk traffic ... */ count = 0; ino = SYSV_ROOT_INO+1; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto Eio; while (ino <= sbi->s_ninodes) { if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) count++; if ((ino++ & sbi->s_inodes_per_block_1) == 0) { brelse(bh); raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) goto Eio; } else raw_inode++; } brelse(bh); if (count != sb_count) goto Einval; out: mutex_unlock(&sbi->s_lock); return count; Einval: printk("sysv_count_free_inodes: " "free inode count was %d, correcting to %d\n", sb_count, count); if (!(sb->s_flags & MS_RDONLY)) { *sbi->s_sb_total_free_inodes = cpu_to_fs16(SYSV_SB(sb), count); dirty_sb(sb); } goto out; Eio: printk("sysv_count_free_inodes: unable to read inode table\n"); trust_sb: count = sb_count; goto out; }
gpl-2.0
byzvulture/android_kernel_nubia_nx507j
drivers/block/amiflop.c
5233
48032
/* * linux/amiga/amiflop.c * * Copyright (C) 1993 Greg Harp * Portions of this driver are based on code contributed by Brad Pepers * * revised 28.5.95 by Joerg Dorchain * - now no bugs(?) any more for both HD & DD * - added support for 40 Track 5.25" drives, 80-track hopefully behaves * like 3.5" dd (no way to test - are there any 5.25" drives out there * that work on an A4000?) * - wrote formatting routine (maybe dirty, but works) * * june/july 1995 added ms-dos support by Joerg Dorchain * (portions based on messydos.device and various contributors) * - currently only 9 and 18 sector disks * * - fixed a bug with the internal trackbuffer when using multiple * disks the same time * - made formatting a bit safer * - added command line and machine based default for "silent" df0 * * december 1995 adapted for 1.2.13pl4 by Joerg Dorchain * - works but I think it's inefficient. (look in redo_fd_request) * But the changes were very efficient. (only three and a half lines) * * january 1996 added special ioctl for tracking down read/write problems * - usage ioctl(d, RAW_TRACK, ptr); the raw track buffer (MFM-encoded data * is copied to area. (area should be large enough since no checking is * done - 30K is currently sufficient). return the actual size of the * trackbuffer * - replaced udelays() by a timer (CIAA timer B) for the waits * needed for the disk mechanic. * * february 1996 fixed error recovery and multiple disk access * - both got broken the first time I tampered with the driver :-( * - still not safe, but better than before * * revised Marts 3rd, 1996 by Jes Sorensen for use in the 1.3.28 kernel. * - Minor changes to accept the kdev_t. * - Replaced some more udelays with ms_delays. Udelay is just a loop, * and so the delay will be different depending on the given * processor :-( * - The driver could use a major cleanup because of the new * major/minor handling that came with kdev_t. It seems to work for * the time being, but I can't guarantee that it will stay like * that when we start using 16 (24?) bit minors. * * restructured jan 1997 by Joerg Dorchain * - Fixed Bug accessing multiple disks * - some code cleanup * - added trackbuffer for each drive to speed things up * - fixed some race conditions (who finds the next may send it to me ;-) */ #include <linux/module.h> #include <linux/slab.h> #include <linux/fd.h> #include <linux/hdreg.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/amifdreg.h> #include <linux/amifd.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/setup.h> #include <asm/uaccess.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <asm/irq.h> #undef DEBUG /* print _LOTS_ of infos */ #define RAW_IOCTL #ifdef RAW_IOCTL #define IOCTL_RAW_TRACK 0x5254524B /* 'RTRK' */ #endif /* * Defines */ /* * Error codes */ #define FD_OK 0 /* operation succeeded */ #define FD_ERROR -1 /* general error (seek, read, write, etc) */ #define FD_NOUNIT 1 /* unit does not exist */ #define FD_UNITBUSY 2 /* unit already active */ #define FD_NOTACTIVE 3 /* unit is not active */ #define FD_NOTREADY 4 /* unit is not ready (motor not on/no disk) */ #define MFM_NOSYNC 1 #define MFM_HEADER 2 #define MFM_DATA 3 #define MFM_TRACK 4 /* * Floppy ID values */ #define FD_NODRIVE 0x00000000 /* response when no unit is present */ #define FD_DD_3 0xffffffff /* double-density 3.5" (880K) drive */ #define FD_HD_3 0x55555555 /* high-density 3.5" (1760K) drive */ #define FD_DD_5 0xaaaaaaaa /* double-density 5.25" (440K) drive */ static DEFINE_MUTEX(amiflop_mutex); static unsigned long int fd_def_df0 = FD_DD_3; /* default for df0 if it doesn't identify */ module_param(fd_def_df0, ulong, 0); MODULE_LICENSE("GPL"); /* * Macros */ #define MOTOR_ON (ciab.prb &= ~DSKMOTOR) #define MOTOR_OFF (ciab.prb |= DSKMOTOR) #define SELECT(mask) (ciab.prb &= ~mask) #define DESELECT(mask) (ciab.prb |= mask) #define SELMASK(drive) (1 << (3 + (drive & 3))) static struct fd_drive_type drive_types[] = { /* code name tr he rdsz wrsz sm pc1 pc2 sd st st*/ /* warning: times are now in milliseconds (ms) */ { FD_DD_3, "DD 3.5", 80, 2, 14716, 13630, 1, 80,161, 3, 18, 1}, { FD_HD_3, "HD 3.5", 80, 2, 28344, 27258, 2, 80,161, 3, 18, 1}, { FD_DD_5, "DD 5.25", 40, 2, 14716, 13630, 1, 40, 81, 6, 30, 2}, { FD_NODRIVE, "No Drive", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; static int num_dr_types = ARRAY_SIZE(drive_types); static int amiga_read(int), dos_read(int); static void amiga_write(int), dos_write(int); static struct fd_data_type data_types[] = { { "Amiga", 11 , amiga_read, amiga_write}, { "MS-Dos", 9, dos_read, dos_write} }; /* current info on each unit */ static struct amiga_floppy_struct unit[FD_MAX_UNITS]; static struct timer_list flush_track_timer[FD_MAX_UNITS]; static struct timer_list post_write_timer; static struct timer_list motor_on_timer; static struct timer_list motor_off_timer[FD_MAX_UNITS]; static int on_attempts; /* Synchronization of FDC access */ /* request loop (trackbuffer) */ static volatile int fdc_busy = -1; static volatile int fdc_nested; static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); static DECLARE_COMPLETION(motor_on_completion); static volatile int selected = -1; /* currently selected drive */ static int writepending; static int writefromint; static char *raw_buf; static int fdc_queue; static DEFINE_SPINLOCK(amiflop_lock); #define RAW_BUF_SIZE 30000 /* size of raw disk data */ /* * These are global variables, as that's the easiest way to give * information to interrupts. They are the data used for the current * request. */ static volatile char block_flag; static DECLARE_WAIT_QUEUE_HEAD(wait_fd_block); /* MS-Dos MFM Coding tables (should go quick and easy) */ static unsigned char mfmencode[16]={ 0x2a, 0x29, 0x24, 0x25, 0x12, 0x11, 0x14, 0x15, 0x4a, 0x49, 0x44, 0x45, 0x52, 0x51, 0x54, 0x55 }; static unsigned char mfmdecode[128]; /* floppy internal millisecond timer stuff */ static DECLARE_COMPLETION(ms_wait_completion); #define MS_TICKS ((amiga_eclock+50)/1000) /* * Note that MAX_ERRORS=X doesn't imply that we retry every bad read * max X times - some types of errors increase the errorcount by 2 or * even 3, so we might actually retry only X/2 times before giving up. */ #define MAX_ERRORS 12 #define custom amiga_custom /* Prevent "aliased" accesses. */ static int fd_ref[4] = { 0,0,0,0 }; static int fd_device[4] = { 0, 0, 0, 0 }; /* * Here come the actual hardware access and helper functions. * They are not reentrant and single threaded because all drives * share the same hardware and the same trackbuffer. */ /* Milliseconds timer */ static irqreturn_t ms_isr(int irq, void *dummy) { complete(&ms_wait_completion); return IRQ_HANDLED; } /* all waits are queued up A more generic routine would do a schedule a la timer.device */ static void ms_delay(int ms) { int ticks; static DEFINE_MUTEX(mutex); if (ms > 0) { mutex_lock(&mutex); ticks = MS_TICKS*ms-1; ciaa.tblo=ticks%256; ciaa.tbhi=ticks/256; ciaa.crb=0x19; /*count eclock, force load, one-shoot, start */ wait_for_completion(&ms_wait_completion); mutex_unlock(&mutex); } } /* Hardware semaphore */ /* returns true when we would get the semaphore */ static inline int try_fdc(int drive) { drive &= 3; return ((fdc_busy < 0) || (fdc_busy == drive)); } static void get_fdc(int drive) { unsigned long flags; drive &= 3; #ifdef DEBUG printk("get_fdc: drive %d fdc_busy %d fdc_nested %d\n",drive,fdc_busy,fdc_nested); #endif local_irq_save(flags); wait_event(fdc_wait, try_fdc(drive)); fdc_busy = drive; fdc_nested++; local_irq_restore(flags); } static inline void rel_fdc(void) { #ifdef DEBUG if (fdc_nested == 0) printk("fd: unmatched rel_fdc\n"); printk("rel_fdc: fdc_busy %d fdc_nested %d\n",fdc_busy,fdc_nested); #endif fdc_nested--; if (fdc_nested == 0) { fdc_busy = -1; wake_up(&fdc_wait); } } static void fd_select (int drive) { unsigned char prb = ~0; drive&=3; #ifdef DEBUG printk("selecting %d\n",drive); #endif if (drive == selected) return; get_fdc(drive); selected = drive; if (unit[drive].track % 2 != 0) prb &= ~DSKSIDE; if (unit[drive].motor == 1) prb &= ~DSKMOTOR; ciab.prb |= (SELMASK(0)|SELMASK(1)|SELMASK(2)|SELMASK(3)); ciab.prb = prb; prb &= ~SELMASK(drive); ciab.prb = prb; rel_fdc(); } static void fd_deselect (int drive) { unsigned char prb; unsigned long flags; drive&=3; #ifdef DEBUG printk("deselecting %d\n",drive); #endif if (drive != selected) { printk(KERN_WARNING "Deselecting drive %d while %d was selected!\n",drive,selected); return; } get_fdc(drive); local_irq_save(flags); selected = -1; prb = ciab.prb; prb |= (SELMASK(0)|SELMASK(1)|SELMASK(2)|SELMASK(3)); ciab.prb = prb; local_irq_restore (flags); rel_fdc(); } static void motor_on_callback(unsigned long nr) { if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) { complete_all(&motor_on_completion); } else { motor_on_timer.expires = jiffies + HZ/10; add_timer(&motor_on_timer); } } static int fd_motor_on(int nr) { nr &= 3; del_timer(motor_off_timer + nr); if (!unit[nr].motor) { unit[nr].motor = 1; fd_select(nr); INIT_COMPLETION(motor_on_completion); motor_on_timer.data = nr; mod_timer(&motor_on_timer, jiffies + HZ/2); on_attempts = 10; wait_for_completion(&motor_on_completion); fd_deselect(nr); } if (on_attempts == 0) { on_attempts = -1; #if 0 printk (KERN_ERR "motor_on failed, turning motor off\n"); fd_motor_off (nr); return 0; #else printk (KERN_WARNING "DSKRDY not set after 1.5 seconds - assuming drive is spinning notwithstanding\n"); #endif } return 1; } static void fd_motor_off(unsigned long drive) { long calledfromint; #ifdef MODULE long decusecount; decusecount = drive & 0x40000000; #endif calledfromint = drive & 0x80000000; drive&=3; if (calledfromint && !try_fdc(drive)) { /* We would be blocked in an interrupt, so try again later */ motor_off_timer[drive].expires = jiffies + 1; add_timer(motor_off_timer + drive); return; } unit[drive].motor = 0; fd_select(drive); udelay (1); fd_deselect(drive); } static void floppy_off (unsigned int nr) { int drive; drive = nr & 3; /* called this way it is always from interrupt */ motor_off_timer[drive].data = nr | 0x80000000; mod_timer(motor_off_timer + drive, jiffies + 3*HZ); } static int fd_calibrate(int drive) { unsigned char prb; int n; drive &= 3; get_fdc(drive); if (!fd_motor_on (drive)) return 0; fd_select (drive); prb = ciab.prb; prb |= DSKSIDE; prb &= ~DSKDIREC; ciab.prb = prb; for (n = unit[drive].type->tracks/2; n != 0; --n) { if (ciaa.pra & DSKTRACK0) break; prb &= ~DSKSTEP; ciab.prb = prb; prb |= DSKSTEP; udelay (2); ciab.prb = prb; ms_delay(unit[drive].type->step_delay); } ms_delay (unit[drive].type->settle_time); prb |= DSKDIREC; n = unit[drive].type->tracks + 20; for (;;) { prb &= ~DSKSTEP; ciab.prb = prb; prb |= DSKSTEP; udelay (2); ciab.prb = prb; ms_delay(unit[drive].type->step_delay + 1); if ((ciaa.pra & DSKTRACK0) == 0) break; if (--n == 0) { printk (KERN_ERR "fd%d: calibrate failed, turning motor off\n", drive); fd_motor_off (drive); unit[drive].track = -1; rel_fdc(); return 0; } } unit[drive].track = 0; ms_delay(unit[drive].type->settle_time); rel_fdc(); fd_deselect(drive); return 1; } static int fd_seek(int drive, int track) { unsigned char prb; int cnt; #ifdef DEBUG printk("seeking drive %d to track %d\n",drive,track); #endif drive &= 3; get_fdc(drive); if (unit[drive].track == track) { rel_fdc(); return 1; } if (!fd_motor_on(drive)) { rel_fdc(); return 0; } if (unit[drive].track < 0 && !fd_calibrate(drive)) { rel_fdc(); return 0; } fd_select (drive); cnt = unit[drive].track/2 - track/2; prb = ciab.prb; prb |= DSKSIDE | DSKDIREC; if (track % 2 != 0) prb &= ~DSKSIDE; if (cnt < 0) { cnt = - cnt; prb &= ~DSKDIREC; } ciab.prb = prb; if (track % 2 != unit[drive].track % 2) ms_delay (unit[drive].type->side_time); unit[drive].track = track; if (cnt == 0) { rel_fdc(); fd_deselect(drive); return 1; } do { prb &= ~DSKSTEP; ciab.prb = prb; prb |= DSKSTEP; udelay (1); ciab.prb = prb; ms_delay (unit[drive].type->step_delay); } while (--cnt != 0); ms_delay (unit[drive].type->settle_time); rel_fdc(); fd_deselect(drive); return 1; } static unsigned long fd_get_drive_id(int drive) { int i; ulong id = 0; drive&=3; get_fdc(drive); /* set up for ID */ MOTOR_ON; udelay(2); SELECT(SELMASK(drive)); udelay(2); DESELECT(SELMASK(drive)); udelay(2); MOTOR_OFF; udelay(2); SELECT(SELMASK(drive)); udelay(2); DESELECT(SELMASK(drive)); udelay(2); /* loop and read disk ID */ for (i=0; i<32; i++) { SELECT(SELMASK(drive)); udelay(2); /* read and store value of DSKRDY */ id <<= 1; id |= (ciaa.pra & DSKRDY) ? 0 : 1; /* cia regs are low-active! */ DESELECT(SELMASK(drive)); } rel_fdc(); /* * RB: At least A500/A2000's df0: don't identify themselves. * As every (real) Amiga has at least a 3.5" DD drive as df0: * we default to that if df0: doesn't identify as a certain * type. */ if(drive == 0 && id == FD_NODRIVE) { id = fd_def_df0; printk(KERN_NOTICE "fd: drive 0 didn't identify, setting default %08lx\n", (ulong)fd_def_df0); } /* return the ID value */ return (id); } static irqreturn_t fd_block_done(int irq, void *dummy) { if (block_flag) custom.dsklen = 0x4000; if (block_flag == 2) { /* writing */ writepending = 2; post_write_timer.expires = jiffies + 1; /* at least 2 ms */ post_write_timer.data = selected; add_timer(&post_write_timer); } else { /* reading */ block_flag = 0; wake_up (&wait_fd_block); } return IRQ_HANDLED; } static void raw_read(int drive) { drive&=3; get_fdc(drive); wait_event(wait_fd_block, !block_flag); fd_select(drive); /* setup adkcon bits correctly */ custom.adkcon = ADK_MSBSYNC; custom.adkcon = ADK_SETCLR|ADK_WORDSYNC|ADK_FAST; custom.dsksync = MFM_SYNC; custom.dsklen = 0; custom.dskptr = (u_char *)ZTWO_PADDR((u_char *)raw_buf); custom.dsklen = unit[drive].type->read_size/sizeof(short) | DSKLEN_DMAEN; custom.dsklen = unit[drive].type->read_size/sizeof(short) | DSKLEN_DMAEN; block_flag = 1; wait_event(wait_fd_block, !block_flag); custom.dsklen = 0; fd_deselect(drive); rel_fdc(); } static int raw_write(int drive) { ushort adk; drive&=3; get_fdc(drive); /* corresponds to rel_fdc() in post_write() */ if ((ciaa.pra & DSKPROT) == 0) { rel_fdc(); return 0; } wait_event(wait_fd_block, !block_flag); fd_select(drive); /* clear adkcon bits */ custom.adkcon = ADK_PRECOMP1|ADK_PRECOMP0|ADK_WORDSYNC|ADK_MSBSYNC; /* set appropriate adkcon bits */ adk = ADK_SETCLR|ADK_FAST; if ((ulong)unit[drive].track >= unit[drive].type->precomp2) adk |= ADK_PRECOMP1; else if ((ulong)unit[drive].track >= unit[drive].type->precomp1) adk |= ADK_PRECOMP0; custom.adkcon = adk; custom.dsklen = DSKLEN_WRITE; custom.dskptr = (u_char *)ZTWO_PADDR((u_char *)raw_buf); custom.dsklen = unit[drive].type->write_size/sizeof(short) | DSKLEN_DMAEN|DSKLEN_WRITE; custom.dsklen = unit[drive].type->write_size/sizeof(short) | DSKLEN_DMAEN|DSKLEN_WRITE; block_flag = 2; return 1; } /* * to be called at least 2ms after the write has finished but before any * other access to the hardware. */ static void post_write (unsigned long drive) { #ifdef DEBUG printk("post_write for drive %ld\n",drive); #endif drive &= 3; custom.dsklen = 0; block_flag = 0; writepending = 0; writefromint = 0; unit[drive].dirty = 0; wake_up(&wait_fd_block); fd_deselect(drive); rel_fdc(); /* corresponds to get_fdc() in raw_write */ } /* * The following functions are to convert the block contents into raw data * written to disk and vice versa. * (Add other formats here ;-)) */ static unsigned long scan_sync(unsigned long raw, unsigned long end) { ushort *ptr = (ushort *)raw, *endp = (ushort *)end; while (ptr < endp && *ptr++ != 0x4489) ; if (ptr < endp) { while (*ptr == 0x4489 && ptr < endp) ptr++; return (ulong)ptr; } return 0; } static inline unsigned long checksum(unsigned long *addr, int len) { unsigned long csum = 0; len /= sizeof(*addr); while (len-- > 0) csum ^= *addr++; csum = ((csum>>1) & 0x55555555) ^ (csum & 0x55555555); return csum; } static unsigned long decode (unsigned long *data, unsigned long *raw, int len) { ulong *odd, *even; /* convert length from bytes to longwords */ len >>= 2; odd = raw; even = odd + len; /* prepare return pointer */ raw += len * 2; do { *data++ = ((*odd++ & 0x55555555) << 1) | (*even++ & 0x55555555); } while (--len != 0); return (ulong)raw; } struct header { unsigned char magic; unsigned char track; unsigned char sect; unsigned char ord; unsigned char labels[16]; unsigned long hdrchk; unsigned long datachk; }; static int amiga_read(int drive) { unsigned long raw; unsigned long end; int scnt; unsigned long csum; struct header hdr; drive&=3; raw = (long) raw_buf; end = raw + unit[drive].type->read_size; for (scnt = 0;scnt < unit[drive].dtype->sects * unit[drive].type->sect_mult; scnt++) { if (!(raw = scan_sync(raw, end))) { printk (KERN_INFO "can't find sync for sector %d\n", scnt); return MFM_NOSYNC; } raw = decode ((ulong *)&hdr.magic, (ulong *)raw, 4); raw = decode ((ulong *)&hdr.labels, (ulong *)raw, 16); raw = decode ((ulong *)&hdr.hdrchk, (ulong *)raw, 4); raw = decode ((ulong *)&hdr.datachk, (ulong *)raw, 4); csum = checksum((ulong *)&hdr, (char *)&hdr.hdrchk-(char *)&hdr); #ifdef DEBUG printk ("(%x,%d,%d,%d) (%lx,%lx,%lx,%lx) %lx %lx\n", hdr.magic, hdr.track, hdr.sect, hdr.ord, *(ulong *)&hdr.labels[0], *(ulong *)&hdr.labels[4], *(ulong *)&hdr.labels[8], *(ulong *)&hdr.labels[12], hdr.hdrchk, hdr.datachk); #endif if (hdr.hdrchk != csum) { printk(KERN_INFO "MFM_HEADER: %08lx,%08lx\n", hdr.hdrchk, csum); return MFM_HEADER; } /* verify track */ if (hdr.track != unit[drive].track) { printk(KERN_INFO "MFM_TRACK: %d, %d\n", hdr.track, unit[drive].track); return MFM_TRACK; } raw = decode ((ulong *)(unit[drive].trackbuf + hdr.sect*512), (ulong *)raw, 512); csum = checksum((ulong *)(unit[drive].trackbuf + hdr.sect*512), 512); if (hdr.datachk != csum) { printk(KERN_INFO "MFM_DATA: (%x:%d:%d:%d) sc=%d %lx, %lx\n", hdr.magic, hdr.track, hdr.sect, hdr.ord, scnt, hdr.datachk, csum); printk (KERN_INFO "data=(%lx,%lx,%lx,%lx)\n", ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[0], ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[1], ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[2], ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[3]); return MFM_DATA; } } return 0; } static void encode(unsigned long data, unsigned long *dest) { unsigned long data2; data &= 0x55555555; data2 = data ^ 0x55555555; data |= ((data2 >> 1) | 0x80000000) & (data2 << 1); if (*(dest - 1) & 0x00000001) data &= 0x7FFFFFFF; *dest = data; } static void encode_block(unsigned long *dest, unsigned long *src, int len) { int cnt, to_cnt = 0; unsigned long data; /* odd bits */ for (cnt = 0; cnt < len / 4; cnt++) { data = src[cnt] >> 1; encode(data, dest + to_cnt++); } /* even bits */ for (cnt = 0; cnt < len / 4; cnt++) { data = src[cnt]; encode(data, dest + to_cnt++); } } static unsigned long *putsec(int disk, unsigned long *raw, int cnt) { struct header hdr; int i; disk&=3; *raw = (raw[-1]&1) ? 0x2AAAAAAA : 0xAAAAAAAA; raw++; *raw++ = 0x44894489; hdr.magic = 0xFF; hdr.track = unit[disk].track; hdr.sect = cnt; hdr.ord = unit[disk].dtype->sects * unit[disk].type->sect_mult - cnt; for (i = 0; i < 16; i++) hdr.labels[i] = 0; hdr.hdrchk = checksum((ulong *)&hdr, (char *)&hdr.hdrchk-(char *)&hdr); hdr.datachk = checksum((ulong *)(unit[disk].trackbuf+cnt*512), 512); encode_block(raw, (ulong *)&hdr.magic, 4); raw += 2; encode_block(raw, (ulong *)&hdr.labels, 16); raw += 8; encode_block(raw, (ulong *)&hdr.hdrchk, 4); raw += 2; encode_block(raw, (ulong *)&hdr.datachk, 4); raw += 2; encode_block(raw, (ulong *)(unit[disk].trackbuf+cnt*512), 512); raw += 256; return raw; } static void amiga_write(int disk) { unsigned int cnt; unsigned long *ptr = (unsigned long *)raw_buf; disk&=3; /* gap space */ for (cnt = 0; cnt < 415 * unit[disk].type->sect_mult; cnt++) *ptr++ = 0xaaaaaaaa; /* sectors */ for (cnt = 0; cnt < unit[disk].dtype->sects * unit[disk].type->sect_mult; cnt++) ptr = putsec (disk, ptr, cnt); *(ushort *)ptr = (ptr[-1]&1) ? 0x2AA8 : 0xAAA8; } struct dos_header { unsigned char track, /* 0-80 */ side, /* 0-1 */ sec, /* 0-...*/ len_desc;/* 2 */ unsigned short crc; /* on 68000 we got an alignment problem, but this compiler solves it by adding silently adding a pad byte so data won't fit and this took about 3h to discover.... */ unsigned char gap1[22]; /* for longword-alignedness (0x4e) */ }; /* crc routines are borrowed from the messydos-handler */ /* excerpt from the messydos-device ; The CRC is computed not only over the actual data, but including ; the SYNC mark (3 * $a1) and the 'ID/DATA - Address Mark' ($fe/$fb). ; As we don't read or encode these fields into our buffers, we have to ; preload the registers containing the CRC with the values they would have ; after stepping over these fields. ; ; How CRCs "really" work: ; ; First, you should regard a bitstring as a series of coefficients of ; polynomials. We calculate with these polynomials in modulo-2 ; arithmetic, in which both add and subtract are done the same as ; exclusive-or. Now, we modify our data (a very long polynomial) in ; such a way that it becomes divisible by the CCITT-standard 16-bit ; 16 12 5 ; polynomial: x + x + x + 1, represented by $11021. The easiest ; way to do this would be to multiply (using proper arithmetic) our ; datablock with $11021. So we have: ; data * $11021 = ; data * ($10000 + $1021) = ; data * $10000 + data * $1021 ; The left part of this is simple: Just add two 0 bytes. But then ; the right part (data $1021) remains difficult and even could have ; a carry into the left part. The solution is to use a modified ; multiplication, which has a result that is not correct, but with ; a difference of any multiple of $11021. We then only need to keep ; the 16 least significant bits of the result. ; ; The following algorithm does this for us: ; ; unsigned char *data, c, crclo, crchi; ; while (not done) { ; c = *data++ + crchi; ; crchi = (@ c) >> 8 + crclo; ; crclo = @ c; ; } ; ; Remember, + is done with EOR, the @ operator is in two tables (high ; and low byte separately), which is calculated as ; ; $1021 * (c & $F0) ; xor $1021 * (c & $0F) ; xor $1021 * (c >> 4) (* is regular multiplication) ; ; ; Anyway, the end result is the same as the remainder of the division of ; the data by $11021. I am afraid I need to study theory a bit more... my only works was to code this from manx to C.... */ static ushort dos_crc(void * data_a3, int data_d0, int data_d1, int data_d3) { static unsigned char CRCTable1[] = { 0x00,0x10,0x20,0x30,0x40,0x50,0x60,0x70,0x81,0x91,0xa1,0xb1,0xc1,0xd1,0xe1,0xf1, 0x12,0x02,0x32,0x22,0x52,0x42,0x72,0x62,0x93,0x83,0xb3,0xa3,0xd3,0xc3,0xf3,0xe3, 0x24,0x34,0x04,0x14,0x64,0x74,0x44,0x54,0xa5,0xb5,0x85,0x95,0xe5,0xf5,0xc5,0xd5, 0x36,0x26,0x16,0x06,0x76,0x66,0x56,0x46,0xb7,0xa7,0x97,0x87,0xf7,0xe7,0xd7,0xc7, 0x48,0x58,0x68,0x78,0x08,0x18,0x28,0x38,0xc9,0xd9,0xe9,0xf9,0x89,0x99,0xa9,0xb9, 0x5a,0x4a,0x7a,0x6a,0x1a,0x0a,0x3a,0x2a,0xdb,0xcb,0xfb,0xeb,0x9b,0x8b,0xbb,0xab, 0x6c,0x7c,0x4c,0x5c,0x2c,0x3c,0x0c,0x1c,0xed,0xfd,0xcd,0xdd,0xad,0xbd,0x8d,0x9d, 0x7e,0x6e,0x5e,0x4e,0x3e,0x2e,0x1e,0x0e,0xff,0xef,0xdf,0xcf,0xbf,0xaf,0x9f,0x8f, 0x91,0x81,0xb1,0xa1,0xd1,0xc1,0xf1,0xe1,0x10,0x00,0x30,0x20,0x50,0x40,0x70,0x60, 0x83,0x93,0xa3,0xb3,0xc3,0xd3,0xe3,0xf3,0x02,0x12,0x22,0x32,0x42,0x52,0x62,0x72, 0xb5,0xa5,0x95,0x85,0xf5,0xe5,0xd5,0xc5,0x34,0x24,0x14,0x04,0x74,0x64,0x54,0x44, 0xa7,0xb7,0x87,0x97,0xe7,0xf7,0xc7,0xd7,0x26,0x36,0x06,0x16,0x66,0x76,0x46,0x56, 0xd9,0xc9,0xf9,0xe9,0x99,0x89,0xb9,0xa9,0x58,0x48,0x78,0x68,0x18,0x08,0x38,0x28, 0xcb,0xdb,0xeb,0xfb,0x8b,0x9b,0xab,0xbb,0x4a,0x5a,0x6a,0x7a,0x0a,0x1a,0x2a,0x3a, 0xfd,0xed,0xdd,0xcd,0xbd,0xad,0x9d,0x8d,0x7c,0x6c,0x5c,0x4c,0x3c,0x2c,0x1c,0x0c, 0xef,0xff,0xcf,0xdf,0xaf,0xbf,0x8f,0x9f,0x6e,0x7e,0x4e,0x5e,0x2e,0x3e,0x0e,0x1e }; static unsigned char CRCTable2[] = { 0x00,0x21,0x42,0x63,0x84,0xa5,0xc6,0xe7,0x08,0x29,0x4a,0x6b,0x8c,0xad,0xce,0xef, 0x31,0x10,0x73,0x52,0xb5,0x94,0xf7,0xd6,0x39,0x18,0x7b,0x5a,0xbd,0x9c,0xff,0xde, 0x62,0x43,0x20,0x01,0xe6,0xc7,0xa4,0x85,0x6a,0x4b,0x28,0x09,0xee,0xcf,0xac,0x8d, 0x53,0x72,0x11,0x30,0xd7,0xf6,0x95,0xb4,0x5b,0x7a,0x19,0x38,0xdf,0xfe,0x9d,0xbc, 0xc4,0xe5,0x86,0xa7,0x40,0x61,0x02,0x23,0xcc,0xed,0x8e,0xaf,0x48,0x69,0x0a,0x2b, 0xf5,0xd4,0xb7,0x96,0x71,0x50,0x33,0x12,0xfd,0xdc,0xbf,0x9e,0x79,0x58,0x3b,0x1a, 0xa6,0x87,0xe4,0xc5,0x22,0x03,0x60,0x41,0xae,0x8f,0xec,0xcd,0x2a,0x0b,0x68,0x49, 0x97,0xb6,0xd5,0xf4,0x13,0x32,0x51,0x70,0x9f,0xbe,0xdd,0xfc,0x1b,0x3a,0x59,0x78, 0x88,0xa9,0xca,0xeb,0x0c,0x2d,0x4e,0x6f,0x80,0xa1,0xc2,0xe3,0x04,0x25,0x46,0x67, 0xb9,0x98,0xfb,0xda,0x3d,0x1c,0x7f,0x5e,0xb1,0x90,0xf3,0xd2,0x35,0x14,0x77,0x56, 0xea,0xcb,0xa8,0x89,0x6e,0x4f,0x2c,0x0d,0xe2,0xc3,0xa0,0x81,0x66,0x47,0x24,0x05, 0xdb,0xfa,0x99,0xb8,0x5f,0x7e,0x1d,0x3c,0xd3,0xf2,0x91,0xb0,0x57,0x76,0x15,0x34, 0x4c,0x6d,0x0e,0x2f,0xc8,0xe9,0x8a,0xab,0x44,0x65,0x06,0x27,0xc0,0xe1,0x82,0xa3, 0x7d,0x5c,0x3f,0x1e,0xf9,0xd8,0xbb,0x9a,0x75,0x54,0x37,0x16,0xf1,0xd0,0xb3,0x92, 0x2e,0x0f,0x6c,0x4d,0xaa,0x8b,0xe8,0xc9,0x26,0x07,0x64,0x45,0xa2,0x83,0xe0,0xc1, 0x1f,0x3e,0x5d,0x7c,0x9b,0xba,0xd9,0xf8,0x17,0x36,0x55,0x74,0x93,0xb2,0xd1,0xf0 }; /* look at the asm-code - what looks in C a bit strange is almost as good as handmade */ register int i; register unsigned char *CRCT1, *CRCT2, *data, c, crch, crcl; CRCT1=CRCTable1; CRCT2=CRCTable2; data=data_a3; crcl=data_d1; crch=data_d0; for (i=data_d3; i>=0; i--) { c = (*data++) ^ crch; crch = CRCT1[c] ^ crcl; crcl = CRCT2[c]; } return (crch<<8)|crcl; } static inline ushort dos_hdr_crc (struct dos_header *hdr) { return dos_crc(&(hdr->track), 0xb2, 0x30, 3); /* precomputed magic */ } static inline ushort dos_data_crc(unsigned char *data) { return dos_crc(data, 0xe2, 0x95 ,511); /* precomputed magic */ } static inline unsigned char dos_decode_byte(ushort word) { register ushort w2; register unsigned char byte; register unsigned char *dec = mfmdecode; w2=word; w2>>=8; w2&=127; byte = dec[w2]; byte <<= 4; w2 = word & 127; byte |= dec[w2]; return byte; } static unsigned long dos_decode(unsigned char *data, unsigned short *raw, int len) { int i; for (i = 0; i < len; i++) *data++=dos_decode_byte(*raw++); return ((ulong)raw); } #ifdef DEBUG static void dbg(unsigned long ptr) { printk("raw data @%08lx: %08lx, %08lx ,%08lx, %08lx\n", ptr, ((ulong *)ptr)[0], ((ulong *)ptr)[1], ((ulong *)ptr)[2], ((ulong *)ptr)[3]); } #endif static int dos_read(int drive) { unsigned long end; unsigned long raw; int scnt; unsigned short crc,data_crc[2]; struct dos_header hdr; drive&=3; raw = (long) raw_buf; end = raw + unit[drive].type->read_size; for (scnt=0; scnt < unit[drive].dtype->sects * unit[drive].type->sect_mult; scnt++) { do { /* search for the right sync of each sec-hdr */ if (!(raw = scan_sync (raw, end))) { printk(KERN_INFO "dos_read: no hdr sync on " "track %d, unit %d for sector %d\n", unit[drive].track,drive,scnt); return MFM_NOSYNC; } #ifdef DEBUG dbg(raw); #endif } while (*((ushort *)raw)!=0x5554); /* loop usually only once done */ raw+=2; /* skip over headermark */ raw = dos_decode((unsigned char *)&hdr,(ushort *) raw,8); crc = dos_hdr_crc(&hdr); #ifdef DEBUG printk("(%3d,%d,%2d,%d) %x\n", hdr.track, hdr.side, hdr.sec, hdr.len_desc, hdr.crc); #endif if (crc != hdr.crc) { printk(KERN_INFO "dos_read: MFM_HEADER %04x,%04x\n", hdr.crc, crc); return MFM_HEADER; } if (hdr.track != unit[drive].track/unit[drive].type->heads) { printk(KERN_INFO "dos_read: MFM_TRACK %d, %d\n", hdr.track, unit[drive].track/unit[drive].type->heads); return MFM_TRACK; } if (hdr.side != unit[drive].track%unit[drive].type->heads) { printk(KERN_INFO "dos_read: MFM_SIDE %d, %d\n", hdr.side, unit[drive].track%unit[drive].type->heads); return MFM_TRACK; } if (hdr.len_desc != 2) { printk(KERN_INFO "dos_read: unknown sector len " "descriptor %d\n", hdr.len_desc); return MFM_DATA; } #ifdef DEBUG printk("hdr accepted\n"); #endif if (!(raw = scan_sync (raw, end))) { printk(KERN_INFO "dos_read: no data sync on track " "%d, unit %d for sector%d, disk sector %d\n", unit[drive].track, drive, scnt, hdr.sec); return MFM_NOSYNC; } #ifdef DEBUG dbg(raw); #endif if (*((ushort *)raw)!=0x5545) { printk(KERN_INFO "dos_read: no data mark after " "sync (%d,%d,%d,%d) sc=%d\n", hdr.track,hdr.side,hdr.sec,hdr.len_desc,scnt); return MFM_NOSYNC; } raw+=2; /* skip data mark (included in checksum) */ raw = dos_decode((unsigned char *)(unit[drive].trackbuf + (hdr.sec - 1) * 512), (ushort *) raw, 512); raw = dos_decode((unsigned char *)data_crc,(ushort *) raw,4); crc = dos_data_crc(unit[drive].trackbuf + (hdr.sec - 1) * 512); if (crc != data_crc[0]) { printk(KERN_INFO "dos_read: MFM_DATA (%d,%d,%d,%d) " "sc=%d, %x %x\n", hdr.track, hdr.side, hdr.sec, hdr.len_desc, scnt,data_crc[0], crc); printk(KERN_INFO "data=(%lx,%lx,%lx,%lx,...)\n", ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[0], ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[1], ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[2], ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[3]); return MFM_DATA; } } return 0; } static inline ushort dos_encode_byte(unsigned char byte) { register unsigned char *enc, b2, b1; register ushort word; enc=mfmencode; b1=byte; b2=b1>>4; b1&=15; word=enc[b2] <<8 | enc [b1]; return (word|((word&(256|64)) ? 0: 128)); } static void dos_encode_block(ushort *dest, unsigned char *src, int len) { int i; for (i = 0; i < len; i++) { *dest=dos_encode_byte(*src++); *dest|=((dest[-1]&1)||(*dest&0x4000))? 0: 0x8000; dest++; } } static unsigned long *ms_putsec(int drive, unsigned long *raw, int cnt) { static struct dos_header hdr={0,0,0,2,0, {78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78}}; int i; static ushort crc[2]={0,0x4e4e}; drive&=3; /* id gap 1 */ /* the MFM word before is always 9254 */ for(i=0;i<6;i++) *raw++=0xaaaaaaaa; /* 3 sync + 1 headermark */ *raw++=0x44894489; *raw++=0x44895554; /* fill in the variable parts of the header */ hdr.track=unit[drive].track/unit[drive].type->heads; hdr.side=unit[drive].track%unit[drive].type->heads; hdr.sec=cnt+1; hdr.crc=dos_hdr_crc(&hdr); /* header (without "magic") and id gap 2*/ dos_encode_block((ushort *)raw,(unsigned char *) &hdr.track,28); raw+=14; /*id gap 3 */ for(i=0;i<6;i++) *raw++=0xaaaaaaaa; /* 3 syncs and 1 datamark */ *raw++=0x44894489; *raw++=0x44895545; /* data */ dos_encode_block((ushort *)raw, (unsigned char *)unit[drive].trackbuf+cnt*512,512); raw+=256; /*data crc + jd's special gap (long words :-/) */ crc[0]=dos_data_crc(unit[drive].trackbuf+cnt*512); dos_encode_block((ushort *) raw,(unsigned char *)crc,4); raw+=2; /* data gap */ for(i=0;i<38;i++) *raw++=0x92549254; return raw; /* wrote 652 MFM words */ } static void dos_write(int disk) { int cnt; unsigned long raw = (unsigned long) raw_buf; unsigned long *ptr=(unsigned long *)raw; disk&=3; /* really gap4 + indexgap , but we write it first and round it up */ for (cnt=0;cnt<425;cnt++) *ptr++=0x92549254; /* the following is just guessed */ if (unit[disk].type->sect_mult==2) /* check for HD-Disks */ for(cnt=0;cnt<473;cnt++) *ptr++=0x92549254; /* now the index marks...*/ for (cnt=0;cnt<20;cnt++) *ptr++=0x92549254; for (cnt=0;cnt<6;cnt++) *ptr++=0xaaaaaaaa; *ptr++=0x52245224; *ptr++=0x52245552; for (cnt=0;cnt<20;cnt++) *ptr++=0x92549254; /* sectors */ for(cnt = 0; cnt < unit[disk].dtype->sects * unit[disk].type->sect_mult; cnt++) ptr=ms_putsec(disk,ptr,cnt); *(ushort *)ptr = 0xaaa8; /* MFM word before is always 0x9254 */ } /* * Here comes the high level stuff (i.e. the filesystem interface) * and helper functions. * Normally this should be the only part that has to be adapted to * different kernel versions. */ /* FIXME: this assumes the drive is still spinning - * which is only true if we complete writing a track within three seconds */ static void flush_track_callback(unsigned long nr) { nr&=3; writefromint = 1; if (!try_fdc(nr)) { /* we might block in an interrupt, so try again later */ flush_track_timer[nr].expires = jiffies + 1; add_timer(flush_track_timer + nr); return; } get_fdc(nr); (*unit[nr].dtype->write_fkt)(nr); if (!raw_write(nr)) { printk (KERN_NOTICE "floppy disk write protected\n"); writefromint = 0; writepending = 0; } rel_fdc(); } static int non_int_flush_track (unsigned long nr) { unsigned long flags; nr&=3; writefromint = 0; del_timer(&post_write_timer); get_fdc(nr); if (!fd_motor_on(nr)) { writepending = 0; rel_fdc(); return 0; } local_irq_save(flags); if (writepending != 2) { local_irq_restore(flags); (*unit[nr].dtype->write_fkt)(nr); if (!raw_write(nr)) { printk (KERN_NOTICE "floppy disk write protected " "in write!\n"); writepending = 0; return 0; } wait_event(wait_fd_block, block_flag != 2); } else { local_irq_restore(flags); ms_delay(2); /* 2 ms post_write delay */ post_write(nr); } rel_fdc(); return 1; } static int get_track(int drive, int track) { int error, errcnt; drive&=3; if (unit[drive].track == track) return 0; get_fdc(drive); if (!fd_motor_on(drive)) { rel_fdc(); return -1; } if (unit[drive].dirty == 1) { del_timer (flush_track_timer + drive); non_int_flush_track (drive); } errcnt = 0; while (errcnt < MAX_ERRORS) { if (!fd_seek(drive, track)) return -1; raw_read(drive); error = (*unit[drive].dtype->read_fkt)(drive); if (error == 0) { rel_fdc(); return 0; } /* Read Error Handling: recalibrate and try again */ unit[drive].track = -1; errcnt++; } rel_fdc(); return -1; } /* * Round-robin between our available drives, doing one request from each */ static struct request *set_next_request(void) { struct request_queue *q; int cnt = FD_MAX_UNITS; struct request *rq = NULL; /* Find next queue we can dispatch from */ fdc_queue = fdc_queue + 1; if (fdc_queue == FD_MAX_UNITS) fdc_queue = 0; for(cnt = FD_MAX_UNITS; cnt > 0; cnt--) { if (unit[fdc_queue].type->code == FD_NODRIVE) { if (++fdc_queue == FD_MAX_UNITS) fdc_queue = 0; continue; } q = unit[fdc_queue].gendisk->queue; if (q) { rq = blk_fetch_request(q); if (rq) break; } if (++fdc_queue == FD_MAX_UNITS) fdc_queue = 0; } return rq; } static void redo_fd_request(void) { struct request *rq; unsigned int cnt, block, track, sector; int drive; struct amiga_floppy_struct *floppy; char *data; unsigned long flags; int err; next_req: rq = set_next_request(); if (!rq) { /* Nothing left to do */ return; } floppy = rq->rq_disk->private_data; drive = floppy - unit; next_segment: /* Here someone could investigate to be more efficient */ for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) { #ifdef DEBUG printk("fd: sector %ld + %d requested for %s\n", blk_rq_pos(rq), cnt, (rq_data_dir(rq) == READ) ? "read" : "write"); #endif block = blk_rq_pos(rq) + cnt; if ((int)block > floppy->blocks) { err = -EIO; break; } track = block / (floppy->dtype->sects * floppy->type->sect_mult); sector = block % (floppy->dtype->sects * floppy->type->sect_mult); data = rq->buffer + 512 * cnt; #ifdef DEBUG printk("access to track %d, sector %d, with buffer at " "0x%08lx\n", track, sector, data); #endif if (get_track(drive, track) == -1) { err = -EIO; break; } if (rq_data_dir(rq) == READ) { memcpy(data, floppy->trackbuf + sector * 512, 512); } else { memcpy(floppy->trackbuf + sector * 512, data, 512); /* keep the drive spinning while writes are scheduled */ if (!fd_motor_on(drive)) { err = -EIO; break; } /* * setup a callback to write the track buffer * after a short (1 tick) delay. */ local_irq_save(flags); floppy->dirty = 1; /* reset the timer */ mod_timer (flush_track_timer + drive, jiffies + 1); local_irq_restore(flags); } } if (__blk_end_request_cur(rq, err)) goto next_segment; goto next_req; } static void do_fd_request(struct request_queue * q) { redo_fd_request(); } static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { int drive = MINOR(bdev->bd_dev) & 3; geo->heads = unit[drive].type->heads; geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult; geo->cylinders = unit[drive].type->tracks; return 0; } static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param) { struct amiga_floppy_struct *p = bdev->bd_disk->private_data; int drive = p - unit; static struct floppy_struct getprm; void __user *argp = (void __user *)param; switch(cmd){ case FDFMTBEG: get_fdc(drive); if (fd_ref[drive] > 1) { rel_fdc(); return -EBUSY; } fsync_bdev(bdev); if (fd_motor_on(drive) == 0) { rel_fdc(); return -ENODEV; } if (fd_calibrate(drive) == 0) { rel_fdc(); return -ENXIO; } floppy_off(drive); rel_fdc(); break; case FDFMTTRK: if (param < p->type->tracks * p->type->heads) { get_fdc(drive); if (fd_seek(drive,param) != 0){ memset(p->trackbuf, FD_FILL_BYTE, p->dtype->sects * p->type->sect_mult * 512); non_int_flush_track(drive); } floppy_off(drive); rel_fdc(); } else return -EINVAL; break; case FDFMTEND: floppy_off(drive); invalidate_bdev(bdev); break; case FDGETPRM: memset((void *)&getprm, 0, sizeof (getprm)); getprm.track=p->type->tracks; getprm.head=p->type->heads; getprm.sect=p->dtype->sects * p->type->sect_mult; getprm.size=p->blocks; if (copy_to_user(argp, &getprm, sizeof(struct floppy_struct))) return -EFAULT; break; case FDSETPRM: case FDDEFPRM: return -EINVAL; case FDFLUSH: /* unconditionally, even if not needed */ del_timer (flush_track_timer + drive); non_int_flush_track(drive); break; #ifdef RAW_IOCTL case IOCTL_RAW_TRACK: if (copy_to_user(argp, raw_buf, p->type->read_size)) return -EFAULT; else return p->type->read_size; #endif default: printk(KERN_DEBUG "fd_ioctl: unknown cmd %d for drive %d.", cmd, drive); return -ENOSYS; } return 0; } static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param) { int ret; mutex_lock(&amiflop_mutex); ret = fd_locked_ioctl(bdev, mode, cmd, param); mutex_unlock(&amiflop_mutex); return ret; } static void fd_probe(int dev) { unsigned long code; int type; int drive; drive = dev & 3; code = fd_get_drive_id(drive); /* get drive type */ for (type = 0; type < num_dr_types; type++) if (drive_types[type].code == code) break; if (type >= num_dr_types) { printk(KERN_WARNING "fd_probe: unsupported drive type " "%08lx found\n", code); unit[drive].type = &drive_types[num_dr_types-1]; /* FD_NODRIVE */ return; } unit[drive].type = drive_types + type; unit[drive].track = -1; unit[drive].disk = -1; unit[drive].motor = 0; unit[drive].busy = 0; unit[drive].status = -1; } /* * floppy_open check for aliasing (/dev/fd0 can be the same as * /dev/PS0 etc), and disallows simultaneous access to the same * drive with different device numbers. */ static int floppy_open(struct block_device *bdev, fmode_t mode) { int drive = MINOR(bdev->bd_dev) & 3; int system = (MINOR(bdev->bd_dev) & 4) >> 2; int old_dev; unsigned long flags; mutex_lock(&amiflop_mutex); old_dev = fd_device[drive]; if (fd_ref[drive] && old_dev != system) { mutex_unlock(&amiflop_mutex); return -EBUSY; } if (mode & (FMODE_READ|FMODE_WRITE)) { check_disk_change(bdev); if (mode & FMODE_WRITE) { int wrprot; get_fdc(drive); fd_select (drive); wrprot = !(ciaa.pra & DSKPROT); fd_deselect (drive); rel_fdc(); if (wrprot) { mutex_unlock(&amiflop_mutex); return -EROFS; } } } local_irq_save(flags); fd_ref[drive]++; fd_device[drive] = system; local_irq_restore(flags); unit[drive].dtype=&data_types[system]; unit[drive].blocks=unit[drive].type->heads*unit[drive].type->tracks* data_types[system].sects*unit[drive].type->sect_mult; set_capacity(unit[drive].gendisk, unit[drive].blocks); printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive, unit[drive].type->name, data_types[system].name); mutex_unlock(&amiflop_mutex); return 0; } static int floppy_release(struct gendisk *disk, fmode_t mode) { struct amiga_floppy_struct *p = disk->private_data; int drive = p - unit; mutex_lock(&amiflop_mutex); if (unit[drive].dirty == 1) { del_timer (flush_track_timer + drive); non_int_flush_track (drive); } if (!fd_ref[drive]--) { printk(KERN_CRIT "floppy_release with fd_ref == 0"); fd_ref[drive] = 0; } #ifdef MODULE /* the mod_use counter is handled this way */ floppy_off (drive | 0x40000000); #endif mutex_unlock(&amiflop_mutex); return 0; } /* * check_events is never called from an interrupt, so we can relax a bit * here, sleep etc. Note that floppy-on tries to set current_DOR to point * to the desired drive, but it will probably not survive the sleep if * several floppies are used at the same time: thus the loop. */ static unsigned amiga_check_events(struct gendisk *disk, unsigned int clearing) { struct amiga_floppy_struct *p = disk->private_data; int drive = p - unit; int changed; static int first_time = 1; if (first_time) changed = first_time--; else { get_fdc(drive); fd_select (drive); changed = !(ciaa.pra & DSKCHANGE); fd_deselect (drive); rel_fdc(); } if (changed) { fd_probe(drive); p->track = -1; p->dirty = 0; writepending = 0; /* if this was true before, too bad! */ writefromint = 0; return DISK_EVENT_MEDIA_CHANGE; } return 0; } static const struct block_device_operations floppy_fops = { .owner = THIS_MODULE, .open = floppy_open, .release = floppy_release, .ioctl = fd_ioctl, .getgeo = fd_getgeo, .check_events = amiga_check_events, }; static int __init fd_probe_drives(void) { int drive,drives,nomem; printk(KERN_INFO "FD: probing units\nfound "); drives=0; nomem=0; for(drive=0;drive<FD_MAX_UNITS;drive++) { struct gendisk *disk; fd_probe(drive); if (unit[drive].type->code == FD_NODRIVE) continue; disk = alloc_disk(1); if (!disk) { unit[drive].type->code = FD_NODRIVE; continue; } unit[drive].gendisk = disk; disk->queue = blk_init_queue(do_fd_request, &amiflop_lock); if (!disk->queue) { unit[drive].type->code = FD_NODRIVE; continue; } drives++; if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) { printk("no mem for "); unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */ drives--; nomem = 1; } printk("fd%d ",drive); disk->major = FLOPPY_MAJOR; disk->first_minor = drive; disk->fops = &floppy_fops; sprintf(disk->disk_name, "fd%d", drive); disk->private_data = &unit[drive]; set_capacity(disk, 880*2); add_disk(disk); } if ((drives > 0) || (nomem == 0)) { if (drives == 0) printk("no drives"); printk("\n"); return drives; } printk("\n"); return -ENOMEM; } static struct kobject *floppy_find(dev_t dev, int *part, void *data) { int drive = *part & 3; if (unit[drive].type->code == FD_NODRIVE) return NULL; *part = 0; return get_disk(unit[drive].gendisk); } static int __init amiga_floppy_probe(struct platform_device *pdev) { int i, ret; if (register_blkdev(FLOPPY_MAJOR,"fd")) return -EBUSY; ret = -ENOMEM; raw_buf = amiga_chip_alloc(RAW_BUF_SIZE, "Floppy"); if (!raw_buf) { printk("fd: cannot get chip mem buffer\n"); goto out_blkdev; } ret = -EBUSY; if (request_irq(IRQ_AMIGA_DSKBLK, fd_block_done, 0, "floppy_dma", NULL)) { printk("fd: cannot get irq for dma\n"); goto out_irq; } if (request_irq(IRQ_AMIGA_CIAA_TB, ms_isr, 0, "floppy_timer", NULL)) { printk("fd: cannot get irq for timer\n"); goto out_irq2; } ret = -ENODEV; if (fd_probe_drives() < 1) /* No usable drives */ goto out_probe; blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, floppy_find, NULL, NULL); /* initialize variables */ init_timer(&motor_on_timer); motor_on_timer.expires = 0; motor_on_timer.data = 0; motor_on_timer.function = motor_on_callback; for (i = 0; i < FD_MAX_UNITS; i++) { init_timer(&motor_off_timer[i]); motor_off_timer[i].expires = 0; motor_off_timer[i].data = i|0x80000000; motor_off_timer[i].function = fd_motor_off; init_timer(&flush_track_timer[i]); flush_track_timer[i].expires = 0; flush_track_timer[i].data = i; flush_track_timer[i].function = flush_track_callback; unit[i].track = -1; } init_timer(&post_write_timer); post_write_timer.expires = 0; post_write_timer.data = 0; post_write_timer.function = post_write; for (i = 0; i < 128; i++) mfmdecode[i]=255; for (i = 0; i < 16; i++) mfmdecode[mfmencode[i]]=i; /* make sure that disk DMA is enabled */ custom.dmacon = DMAF_SETCLR | DMAF_DISK; /* init ms timer */ ciaa.crb = 8; /* one-shot, stop */ return 0; out_probe: free_irq(IRQ_AMIGA_CIAA_TB, NULL); out_irq2: free_irq(IRQ_AMIGA_DSKBLK, NULL); out_irq: amiga_chip_free(raw_buf); out_blkdev: unregister_blkdev(FLOPPY_MAJOR,"fd"); return ret; } #if 0 /* not safe to unload */ static int __exit amiga_floppy_remove(struct platform_device *pdev) { int i; for( i = 0; i < FD_MAX_UNITS; i++) { if (unit[i].type->code != FD_NODRIVE) { struct request_queue *q = unit[i].gendisk->queue; del_gendisk(unit[i].gendisk); put_disk(unit[i].gendisk); kfree(unit[i].trackbuf); if (q) blk_cleanup_queue(q); } } blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); free_irq(IRQ_AMIGA_CIAA_TB, NULL); free_irq(IRQ_AMIGA_DSKBLK, NULL); custom.dmacon = DMAF_DISK; /* disable DMA */ amiga_chip_free(raw_buf); unregister_blkdev(FLOPPY_MAJOR, "fd"); } #endif static struct platform_driver amiga_floppy_driver = { .driver = { .name = "amiga-floppy", .owner = THIS_MODULE, }, }; static int __init amiga_floppy_init(void) { return platform_driver_probe(&amiga_floppy_driver, amiga_floppy_probe); } module_init(amiga_floppy_init); #ifndef MODULE static int __init amiga_floppy_setup (char *str) { int n; if (!MACH_IS_AMIGA) return 0; if (!get_option(&str, &n)) return 0; printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n); fd_def_df0 = n; return 1; } __setup("floppy=", amiga_floppy_setup); #endif MODULE_ALIAS("platform:amiga-floppy");
gpl-2.0
evil-god/runbo-q5x6
kernel/fs/nilfs2/direct.c
11633
9161
/* * direct.c - NILFS direct block pointer. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Koji Sato <koji@osrg.net>. */ #include <linux/errno.h> #include "nilfs.h" #include "page.h" #include "direct.h" #include "alloc.h" #include "dat.h" static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct) { return (__le64 *) ((struct nilfs_direct_node *)direct->b_u.u_data + 1); } static inline __u64 nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key) { return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key)); } static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct, __u64 key, __u64 ptr) { *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr); } static int nilfs_direct_lookup(const struct nilfs_bmap *direct, __u64 key, int level, __u64 *ptrp) { __u64 ptr; if (key > NILFS_DIRECT_KEY_MAX || level != 1) return -ENOENT; ptr = nilfs_direct_get_ptr(direct, key); if (ptr == NILFS_BMAP_INVALID_PTR) return -ENOENT; *ptrp = ptr; return 0; } static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct, __u64 key, __u64 *ptrp, unsigned maxblocks) { struct inode *dat = NULL; __u64 ptr, ptr2; sector_t blocknr; int ret, cnt; if (key > NILFS_DIRECT_KEY_MAX) return -ENOENT; ptr = nilfs_direct_get_ptr(direct, key); if (ptr == NILFS_BMAP_INVALID_PTR) return -ENOENT; if (NILFS_BMAP_USE_VBN(direct)) { dat = nilfs_bmap_get_dat(direct); ret = nilfs_dat_translate(dat, ptr, &blocknr); if (ret < 0) return ret; ptr = blocknr; } maxblocks = min_t(unsigned, maxblocks, NILFS_DIRECT_KEY_MAX - key + 1); for (cnt = 1; cnt < maxblocks && (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) != NILFS_BMAP_INVALID_PTR; cnt++) { if (dat) { ret = nilfs_dat_translate(dat, ptr2, &blocknr); if (ret < 0) return ret; ptr2 = blocknr; } if (ptr2 != ptr + cnt) break; } *ptrp = ptr; return cnt; } static __u64 nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key) { __u64 ptr; ptr = nilfs_bmap_find_target_seq(direct, key); if (ptr != NILFS_BMAP_INVALID_PTR) /* sequential access */ return ptr; else /* block group */ return nilfs_bmap_find_target_in_group(direct); } static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) { union nilfs_bmap_ptr_req req; struct inode *dat = NULL; struct buffer_head *bh; int ret; if (key > NILFS_DIRECT_KEY_MAX) return -ENOENT; if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR) return -EEXIST; if (NILFS_BMAP_USE_VBN(bmap)) { req.bpr_ptr = nilfs_direct_find_target_v(bmap, key); dat = nilfs_bmap_get_dat(bmap); } ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat); if (!ret) { /* ptr must be a pointer to a buffer head. */ bh = (struct buffer_head *)((unsigned long)ptr); set_buffer_nilfs_volatile(bh); nilfs_bmap_commit_alloc_ptr(bmap, &req, dat); nilfs_direct_set_ptr(bmap, key, req.bpr_ptr); if (!nilfs_bmap_dirty(bmap)) nilfs_bmap_set_dirty(bmap); if (NILFS_BMAP_USE_VBN(bmap)) nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr); nilfs_inode_add_blocks(bmap->b_inode, 1); } return ret; } static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key) { union nilfs_bmap_ptr_req req; struct inode *dat; int ret; if (key > NILFS_DIRECT_KEY_MAX || nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR) return -ENOENT; dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL; req.bpr_ptr = nilfs_direct_get_ptr(bmap, key); ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat); if (!ret) { nilfs_bmap_commit_end_ptr(bmap, &req, dat); nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR); nilfs_inode_sub_blocks(bmap->b_inode, 1); } return ret; } static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp) { __u64 key, lastkey; lastkey = NILFS_DIRECT_KEY_MAX + 1; for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++) if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR) lastkey = key; if (lastkey == NILFS_DIRECT_KEY_MAX + 1) return -ENOENT; *keyp = lastkey; return 0; } static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key) { return key > NILFS_DIRECT_KEY_MAX; } static int nilfs_direct_gather_data(struct nilfs_bmap *direct, __u64 *keys, __u64 *ptrs, int nitems) { __u64 key; __u64 ptr; int n; if (nitems > NILFS_DIRECT_NBLOCKS) nitems = NILFS_DIRECT_NBLOCKS; n = 0; for (key = 0; key < nitems; key++) { ptr = nilfs_direct_get_ptr(direct, key); if (ptr != NILFS_BMAP_INVALID_PTR) { keys[n] = key; ptrs[n] = ptr; n++; } } return n; } int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, __u64 key, __u64 *keys, __u64 *ptrs, int n) { __le64 *dptrs; int ret, i, j; /* no need to allocate any resource for conversion */ /* delete */ ret = bmap->b_ops->bop_delete(bmap, key); if (ret < 0) return ret; /* free resources */ if (bmap->b_ops->bop_clear != NULL) bmap->b_ops->bop_clear(bmap); /* convert */ dptrs = nilfs_direct_dptrs(bmap); for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) { if ((j < n) && (i == keys[j])) { dptrs[i] = (i != key) ? cpu_to_le64(ptrs[j]) : NILFS_BMAP_INVALID_PTR; j++; } else dptrs[i] = NILFS_BMAP_INVALID_PTR; } nilfs_direct_init(bmap); return 0; } static int nilfs_direct_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { struct nilfs_palloc_req oldreq, newreq; struct inode *dat; __u64 key; __u64 ptr; int ret; if (!NILFS_BMAP_USE_VBN(bmap)) return 0; dat = nilfs_bmap_get_dat(bmap); key = nilfs_bmap_data_get_key(bmap, bh); ptr = nilfs_direct_get_ptr(bmap, key); if (!buffer_nilfs_volatile(bh)) { oldreq.pr_entry_nr = ptr; newreq.pr_entry_nr = ptr; ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq); if (ret < 0) return ret; nilfs_dat_commit_update(dat, &oldreq, &newreq, bmap->b_ptr_type == NILFS_BMAP_PTR_VS); set_buffer_nilfs_volatile(bh); nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr); } else ret = nilfs_dat_mark_dirty(dat, ptr); return ret; } static int nilfs_direct_assign_v(struct nilfs_bmap *direct, __u64 key, __u64 ptr, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { struct inode *dat = nilfs_bmap_get_dat(direct); union nilfs_bmap_ptr_req req; int ret; req.bpr_ptr = ptr; ret = nilfs_dat_prepare_start(dat, &req.bpr_req); if (!ret) { nilfs_dat_commit_start(dat, &req.bpr_req, blocknr); binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr); binfo->bi_v.bi_blkoff = cpu_to_le64(key); } return ret; } static int nilfs_direct_assign_p(struct nilfs_bmap *direct, __u64 key, __u64 ptr, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { nilfs_direct_set_ptr(direct, key, blocknr); binfo->bi_dat.bi_blkoff = cpu_to_le64(key); binfo->bi_dat.bi_level = 0; return 0; } static int nilfs_direct_assign(struct nilfs_bmap *bmap, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { __u64 key; __u64 ptr; key = nilfs_bmap_data_get_key(bmap, *bh); if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { printk(KERN_CRIT "%s: invalid key: %llu\n", __func__, (unsigned long long)key); return -EINVAL; } ptr = nilfs_direct_get_ptr(bmap, key); if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__, (unsigned long long)ptr); return -EINVAL; } return NILFS_BMAP_USE_VBN(bmap) ? nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) : nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo); } static const struct nilfs_bmap_operations nilfs_direct_ops = { .bop_lookup = nilfs_direct_lookup, .bop_lookup_contig = nilfs_direct_lookup_contig, .bop_insert = nilfs_direct_insert, .bop_delete = nilfs_direct_delete, .bop_clear = NULL, .bop_propagate = nilfs_direct_propagate, .bop_lookup_dirty_buffers = NULL, .bop_assign = nilfs_direct_assign, .bop_mark = NULL, .bop_last_key = nilfs_direct_last_key, .bop_check_insert = nilfs_direct_check_insert, .bop_check_delete = NULL, .bop_gather_data = nilfs_direct_gather_data, }; int nilfs_direct_init(struct nilfs_bmap *bmap) { bmap->b_ops = &nilfs_direct_ops; return 0; }
gpl-2.0
TheTypoMaster/SM-G360T1_kernel
arch/mn10300/lib/negdi2.c
13937
1821
/* More subroutines needed by GCC output code on some machines. */ /* Compile this one with gcc. */ /* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public Licence as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public Licence, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public Licence restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more details. You should have received a copy of the GNU General Public Licence along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* It is incorrect to include config.h here, because this file is being compiled for the target, and hence definitions concerning only the host do not apply. */ #include <linux/types.h> union DWunion { s64 ll; struct { s32 low; s32 high; } s; }; s64 __negdi2(s64 u) { union DWunion w; union DWunion uu; uu.ll = u; w.s.low = -uu.s.low; w.s.high = -uu.s.high - ((u32) w.s.low > 0); return w.ll; }
gpl-2.0
nikhil16242/stock-golfu-kenrel
arch/mn10300/lib/negdi2.c
13937
1821
/* More subroutines needed by GCC output code on some machines. */ /* Compile this one with gcc. */ /* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public Licence as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public Licence, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public Licence restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more details. You should have received a copy of the GNU General Public Licence along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* It is incorrect to include config.h here, because this file is being compiled for the target, and hence definitions concerning only the host do not apply. */ #include <linux/types.h> union DWunion { s64 ll; struct { s32 low; s32 high; } s; }; s64 __negdi2(s64 u) { union DWunion w; union DWunion uu; uu.ll = u; w.s.low = -uu.s.low; w.s.high = -uu.s.high - ((u32) w.s.low > 0); return w.ll; }
gpl-2.0
leexdon/linux
fs/btrfs/disk-io.c
114
122240
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> #include <linux/swap.h> #include <linux/radix-tree.h> #include <linux/writeback.h> #include <linux/buffer_head.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/slab.h> #include <linux/migrate.h> #include <linux/ratelimit.h> #include <linux/uuid.h> #include <linux/semaphore.h> #include <asm/unaligned.h> #include "ctree.h" #include "disk-io.h" #include "hash.h" #include "transaction.h" #include "btrfs_inode.h" #include "volumes.h" #include "print-tree.h" #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" #include "inode-map.h" #include "check-integrity.h" #include "rcu-string.h" #include "dev-replace.h" #include "raid56.h" #include "sysfs.h" #include "qgroup.h" #ifdef CONFIG_X86 #include <asm/cpufeature.h> #endif static const struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); static void free_fs_root(struct btrfs_root *root); static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, int read_only); static void btrfs_destroy_ordered_extents(struct btrfs_root *root); static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, struct btrfs_root *root); static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); static int btrfs_destroy_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages, int mark); static int btrfs_destroy_pinned_extent(struct btrfs_root *root, struct extent_io_tree *pinned_extents); static int btrfs_cleanup_transaction(struct btrfs_root *root); static void btrfs_error_commit_super(struct btrfs_root *root); /* * btrfs_end_io_wq structs are used to do processing in task context when an IO * is complete. This is used during reads to verify checksums, and it is used * by writes to insert metadata for new file extents after IO is complete. */ struct btrfs_end_io_wq { struct bio *bio; bio_end_io_t *end_io; void *private; struct btrfs_fs_info *info; int error; enum btrfs_wq_endio_type metadata; struct list_head list; struct btrfs_work work; }; static struct kmem_cache *btrfs_end_io_wq_cache; int __init btrfs_end_io_wq_init(void) { btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq", sizeof(struct btrfs_end_io_wq), 0, SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); if (!btrfs_end_io_wq_cache) return -ENOMEM; return 0; } void btrfs_end_io_wq_exit(void) { if (btrfs_end_io_wq_cache) kmem_cache_destroy(btrfs_end_io_wq_cache); } /* * async submit bios are used to offload expensive checksumming * onto the worker threads. They checksum file and metadata bios * just before they are sent down the IO stack. */ struct async_submit_bio { struct inode *inode; struct bio *bio; struct list_head list; extent_submit_bio_hook_t *submit_bio_start; extent_submit_bio_hook_t *submit_bio_done; int rw; int mirror_num; unsigned long bio_flags; /* * bio_offset is optional, can be used if the pages in the bio * can't tell us where in the file the bio should go */ u64 bio_offset; struct btrfs_work work; int error; }; /* * Lockdep class keys for extent_buffer->lock's in this root. For a given * eb, the lockdep key is determined by the btrfs_root it belongs to and * the level the eb occupies in the tree. * * Different roots are used for different purposes and may nest inside each * other and they require separate keysets. As lockdep keys should be * static, assign keysets according to the purpose of the root as indicated * by btrfs_root->objectid. This ensures that all special purpose roots * have separate keysets. * * Lock-nesting across peer nodes is always done with the immediate parent * node locked thus preventing deadlock. As lockdep doesn't know this, use * subclass to avoid triggering lockdep warning in such cases. * * The key is set by the readpage_end_io_hook after the buffer has passed * csum validation but before the pages are unlocked. It is also set by * btrfs_init_new_buffer on freshly allocated blocks. * * We also add a check to make sure the highest level of the tree is the * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code * needs update as well. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC # if BTRFS_MAX_LEVEL != 8 # error # endif static struct btrfs_lockdep_keyset { u64 id; /* root objectid */ const char *name_stem; /* lock name stem */ char names[BTRFS_MAX_LEVEL + 1][20]; struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; } btrfs_lockdep_keysets[] = { { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, { .id = 0, .name_stem = "tree" }, }; void __init btrfs_init_lockdep(void) { int i, j; /* initialize lockdep class names */ for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; for (j = 0; j < ARRAY_SIZE(ks->names); j++) snprintf(ks->names[j], sizeof(ks->names[j]), "btrfs-%s-%02d", ks->name_stem, j); } } void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level) { struct btrfs_lockdep_keyset *ks; BUG_ON(level >= ARRAY_SIZE(ks->keys)); /* find the matching keyset, id 0 is the default entry */ for (ks = btrfs_lockdep_keysets; ks->id; ks++) if (ks->id == objectid) break; lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]); } #endif /* * extents on the btree inode are pretty simple, there's one extent * that covers the entire device */ static struct extent_map *btree_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) { struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; int ret; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); if (em) { em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; read_unlock(&em_tree->lock); goto out; } read_unlock(&em_tree->lock); em = alloc_extent_map(); if (!em) { em = ERR_PTR(-ENOMEM); goto out; } em->start = 0; em->len = (u64)-1; em->block_len = (u64)-1; em->block_start = 0; em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); if (ret == -EEXIST) { free_extent_map(em); em = lookup_extent_mapping(em_tree, start, len); if (!em) em = ERR_PTR(-EIO); } else if (ret) { free_extent_map(em); em = ERR_PTR(ret); } write_unlock(&em_tree->lock); out: return em; } u32 btrfs_csum_data(char *data, u32 seed, size_t len) { return btrfs_crc32c(seed, data, len); } void btrfs_csum_final(u32 crc, char *result) { put_unaligned_le32(~crc, result); } /* * compute the csum for a btree block, and either verify it or write it * into the csum field of the block. */ static int csum_tree_block(struct btrfs_fs_info *fs_info, struct extent_buffer *buf, int verify) { u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); char *result = NULL; unsigned long len; unsigned long cur_len; unsigned long offset = BTRFS_CSUM_SIZE; char *kaddr; unsigned long map_start; unsigned long map_len; int err; u32 crc = ~(u32)0; unsigned long inline_result; len = buf->len - offset; while (len > 0) { err = map_private_extent_buffer(buf, offset, 32, &kaddr, &map_start, &map_len); if (err) return 1; cur_len = min(len, map_len - (offset - map_start)); crc = btrfs_csum_data(kaddr + offset - map_start, crc, cur_len); len -= cur_len; offset += cur_len; } if (csum_size > sizeof(inline_result)) { result = kzalloc(csum_size, GFP_NOFS); if (!result) return 1; } else { result = (char *)&inline_result; } btrfs_csum_final(crc, result); if (verify) { if (memcmp_extent_buffer(buf, result, 0, csum_size)) { u32 val; u32 found = 0; memcpy(&found, result, csum_size); read_extent_buffer(buf, &val, 0, csum_size); printk_ratelimited(KERN_WARNING "BTRFS: %s checksum verify failed on %llu wanted %X found %X " "level %d\n", fs_info->sb->s_id, buf->start, val, found, btrfs_header_level(buf)); if (result != (char *)&inline_result) kfree(result); return 1; } } else { write_extent_buffer(buf, result, 0, csum_size); } if (result != (char *)&inline_result) kfree(result); return 0; } /* * we can't consider a given block up to date unless the transid of the * block matches the transid in the parent node's pointer. This is how we * detect blocks that either didn't get written at all or got written * in the wrong place. */ static int verify_parent_transid(struct extent_io_tree *io_tree, struct extent_buffer *eb, u64 parent_transid, int atomic) { struct extent_state *cached_state = NULL; int ret; bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB); if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 0; if (atomic) return -EAGAIN; if (need_lock) { btrfs_tree_read_lock(eb); btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); } lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 0, &cached_state); if (extent_buffer_uptodate(eb) && btrfs_header_generation(eb) == parent_transid) { ret = 0; goto out; } printk_ratelimited(KERN_ERR "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n", eb->fs_info->sb->s_id, eb->start, parent_transid, btrfs_header_generation(eb)); ret = 1; /* * Things reading via commit roots that don't have normal protection, * like send, can have a really old block in cache that may point at a * block that has been free'd and re-allocated. So don't clear uptodate * if we find an eb that is under IO (dirty/writeback) because we could * end up reading in the stale data and then writing it back out and * making everybody very sad. */ if (!extent_buffer_under_io(eb)) clear_extent_buffer_uptodate(eb); out: unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, &cached_state, GFP_NOFS); if (need_lock) btrfs_tree_read_unlock_blocking(eb); return ret; } /* * Return 0 if the superblock checksum type matches the checksum value of that * algorithm. Pass the raw disk superblock data. */ static int btrfs_check_super_csum(char *raw_disk_sb) { struct btrfs_super_block *disk_sb = (struct btrfs_super_block *)raw_disk_sb; u16 csum_type = btrfs_super_csum_type(disk_sb); int ret = 0; if (csum_type == BTRFS_CSUM_TYPE_CRC32) { u32 crc = ~(u32)0; const int csum_size = sizeof(crc); char result[csum_size]; /* * The super_block structure does not span the whole * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space * is filled with zeros and is included in the checkum. */ crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); btrfs_csum_final(crc, result); if (memcmp(raw_disk_sb, result, csum_size)) ret = 1; } if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n", csum_type); ret = 1; } return ret; } /* * helper to read a given tree block, doing retries as required when * the checksums don't match and we have alternate mirrors to try. */ static int btree_read_extent_buffer_pages(struct btrfs_root *root, struct extent_buffer *eb, u64 start, u64 parent_transid) { struct extent_io_tree *io_tree; int failed = 0; int ret; int num_copies = 0; int mirror_num = 0; int failed_mirror = 0; clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; while (1) { ret = read_extent_buffer_pages(io_tree, eb, start, WAIT_COMPLETE, btree_get_extent, mirror_num); if (!ret) { if (!verify_parent_transid(io_tree, eb, parent_transid, 0)) break; else ret = -EIO; } /* * This buffer's crc is fine, but its contents are corrupted, so * there is no reason to read the other copies, they won't be * any less wrong. */ if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) break; num_copies = btrfs_num_copies(root->fs_info, eb->start, eb->len); if (num_copies == 1) break; if (!failed_mirror) { failed = 1; failed_mirror = eb->read_mirror; } mirror_num++; if (mirror_num == failed_mirror) mirror_num++; if (mirror_num > num_copies) break; } if (failed && !ret && failed_mirror) repair_eb_io_failure(root, eb, failed_mirror); return ret; } /* * checksum a dirty tree block before IO. This has extra checks to make sure * we only fill in the checksum field in the first page of a multi-page block */ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) { u64 start = page_offset(page); u64 found_start; struct extent_buffer *eb; eb = (struct extent_buffer *)page->private; if (page != eb->pages[0]) return 0; found_start = btrfs_header_bytenr(eb); if (WARN_ON(found_start != start || !PageUptodate(page))) return 0; csum_tree_block(fs_info, eb, 0); return 0; } static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; u8 fsid[BTRFS_UUID_SIZE]; int ret = 1; read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); while (fs_devices) { if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { ret = 0; break; } fs_devices = fs_devices->seed; } return ret; } #define CORRUPT(reason, eb, root, slot) \ btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \ "root=%llu, slot=%d", reason, \ btrfs_header_bytenr(eb), root->objectid, slot) static noinline int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf) { struct btrfs_key key; struct btrfs_key leaf_key; u32 nritems = btrfs_header_nritems(leaf); int slot; if (nritems == 0) return 0; /* Check the 0 item */ if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root)) { CORRUPT("invalid item offset size pair", leaf, root, 0); return -EIO; } /* * Check to make sure each items keys are in the correct order and their * offsets make sense. We only have to loop through nritems-1 because * we check the current slot against the next slot, which verifies the * next slot's offset+size makes sense and that the current's slot * offset is correct. */ for (slot = 0; slot < nritems - 1; slot++) { btrfs_item_key_to_cpu(leaf, &leaf_key, slot); btrfs_item_key_to_cpu(leaf, &key, slot + 1); /* Make sure the keys are in the right order */ if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { CORRUPT("bad key order", leaf, root, slot); return -EIO; } /* * Make sure the offset and ends are right, remember that the * item data starts at the end of the leaf and grows towards the * front. */ if (btrfs_item_offset_nr(leaf, slot) != btrfs_item_end_nr(leaf, slot + 1)) { CORRUPT("slot offset bad", leaf, root, slot); return -EIO; } /* * Check to make sure that we don't point outside of the leaf, * just incase all the items are consistent to eachother, but * all point outside of the leaf. */ if (btrfs_item_end_nr(leaf, slot) > BTRFS_LEAF_DATA_SIZE(root)) { CORRUPT("slot end outside of leaf", leaf, root, slot); return -EIO; } } return 0; } static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, u64 phy_offset, struct page *page, u64 start, u64 end, int mirror) { u64 found_start; int found_level; struct extent_buffer *eb; struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; int ret = 0; int reads_done; if (!page->private) goto out; eb = (struct extent_buffer *)page->private; /* the pending IO might have been the only thing that kept this buffer * in memory. Make sure we have a ref for all this other checks */ extent_buffer_get(eb); reads_done = atomic_dec_and_test(&eb->io_pages); if (!reads_done) goto err; eb->read_mirror = mirror; if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { ret = -EIO; goto err; } found_start = btrfs_header_bytenr(eb); if (found_start != eb->start) { printk_ratelimited(KERN_ERR "BTRFS (device %s): bad tree block start " "%llu %llu\n", eb->fs_info->sb->s_id, found_start, eb->start); ret = -EIO; goto err; } if (check_tree_block_fsid(root->fs_info, eb)) { printk_ratelimited(KERN_ERR "BTRFS (device %s): bad fsid on block %llu\n", eb->fs_info->sb->s_id, eb->start); ret = -EIO; goto err; } found_level = btrfs_header_level(eb); if (found_level >= BTRFS_MAX_LEVEL) { btrfs_err(root->fs_info, "bad tree block level %d", (int)btrfs_header_level(eb)); ret = -EIO; goto err; } btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, found_level); ret = csum_tree_block(root->fs_info, eb, 1); if (ret) { ret = -EIO; goto err; } /* * If this is a leaf block and it is corrupt, set the corrupt bit so * that we don't try and read the other copies of this block, just * return -EIO. */ if (found_level == 0 && check_leaf(root, eb)) { set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); ret = -EIO; } if (!ret) set_extent_buffer_uptodate(eb); err: if (reads_done && test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) btree_readahead_hook(root, eb, eb->start, ret); if (ret) { /* * our io error hook is going to dec the io pages * again, we have to make sure it has something * to decrement */ atomic_inc(&eb->io_pages); clear_extent_buffer_uptodate(eb); } free_extent_buffer(eb); out: return ret; } static int btree_io_failed_hook(struct page *page, int failed_mirror) { struct extent_buffer *eb; struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; eb = (struct extent_buffer *)page->private; set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); eb->read_mirror = failed_mirror; atomic_dec(&eb->io_pages); if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) btree_readahead_hook(root, eb, eb->start, -EIO); return -EIO; /* we fixed nothing */ } static void end_workqueue_bio(struct bio *bio, int err) { struct btrfs_end_io_wq *end_io_wq = bio->bi_private; struct btrfs_fs_info *fs_info; struct btrfs_workqueue *wq; btrfs_work_func_t func; fs_info = end_io_wq->info; end_io_wq->error = err; if (bio->bi_rw & REQ_WRITE) { if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { wq = fs_info->endio_meta_write_workers; func = btrfs_endio_meta_write_helper; } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { wq = fs_info->endio_freespace_worker; func = btrfs_freespace_write_helper; } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { wq = fs_info->endio_raid56_workers; func = btrfs_endio_raid56_helper; } else { wq = fs_info->endio_write_workers; func = btrfs_endio_write_helper; } } else { if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR)) { wq = fs_info->endio_repair_workers; func = btrfs_endio_repair_helper; } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { wq = fs_info->endio_raid56_workers; func = btrfs_endio_raid56_helper; } else if (end_io_wq->metadata) { wq = fs_info->endio_meta_workers; func = btrfs_endio_meta_helper; } else { wq = fs_info->endio_workers; func = btrfs_endio_helper; } } btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); btrfs_queue_work(wq, &end_io_wq->work); } int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, enum btrfs_wq_endio_type metadata) { struct btrfs_end_io_wq *end_io_wq; end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); if (!end_io_wq) return -ENOMEM; end_io_wq->private = bio->bi_private; end_io_wq->end_io = bio->bi_end_io; end_io_wq->info = info; end_io_wq->error = 0; end_io_wq->bio = bio; end_io_wq->metadata = metadata; bio->bi_private = end_io_wq; bio->bi_end_io = end_workqueue_bio; return 0; } unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) { unsigned long limit = min_t(unsigned long, info->thread_pool_size, info->fs_devices->open_devices); return 256 * limit; } static void run_one_async_start(struct btrfs_work *work) { struct async_submit_bio *async; int ret; async = container_of(work, struct async_submit_bio, work); ret = async->submit_bio_start(async->inode, async->rw, async->bio, async->mirror_num, async->bio_flags, async->bio_offset); if (ret) async->error = ret; } static void run_one_async_done(struct btrfs_work *work) { struct btrfs_fs_info *fs_info; struct async_submit_bio *async; int limit; async = container_of(work, struct async_submit_bio, work); fs_info = BTRFS_I(async->inode)->root->fs_info; limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; if (atomic_dec_return(&fs_info->nr_async_submits) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); /* If an error occured we just want to clean up the bio and move on */ if (async->error) { bio_endio(async->bio, async->error); return; } async->submit_bio_done(async->inode, async->rw, async->bio, async->mirror_num, async->bio_flags, async->bio_offset); } static void run_one_async_free(struct btrfs_work *work) { struct async_submit_bio *async; async = container_of(work, struct async_submit_bio, work); kfree(async); } int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset, extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_done) { struct async_submit_bio *async; async = kmalloc(sizeof(*async), GFP_NOFS); if (!async) return -ENOMEM; async->inode = inode; async->rw = rw; async->bio = bio; async->mirror_num = mirror_num; async->submit_bio_start = submit_bio_start; async->submit_bio_done = submit_bio_done; btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, run_one_async_done, run_one_async_free); async->bio_flags = bio_flags; async->bio_offset = bio_offset; async->error = 0; atomic_inc(&fs_info->nr_async_submits); if (rw & REQ_SYNC) btrfs_set_work_high_priority(&async->work); btrfs_queue_work(fs_info->workers, &async->work); while (atomic_read(&fs_info->async_submit_draining) && atomic_read(&fs_info->nr_async_submits)) { wait_event(fs_info->async_submit_wait, (atomic_read(&fs_info->nr_async_submits) == 0)); } return 0; } static int btree_csum_one_bio(struct bio *bio) { struct bio_vec *bvec; struct btrfs_root *root; int i, ret = 0; bio_for_each_segment_all(bvec, bio, i) { root = BTRFS_I(bvec->bv_page->mapping->host)->root; ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); if (ret) break; } return ret; } static int __btree_submit_bio_start(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) { /* * when we're called for a write, we're already in the async * submission context. Just jump into btrfs_map_bio */ return btree_csum_one_bio(bio); } static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) { int ret; /* * when we're called for a write, we're already in the async * submission context. Just jump into btrfs_map_bio */ ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); if (ret) bio_endio(bio, ret); return ret; } static int check_async_write(struct inode *inode, unsigned long bio_flags) { if (bio_flags & EXTENT_BIO_TREE_LOG) return 0; #ifdef CONFIG_X86 if (cpu_has_xmm4_2) return 0; #endif return 1; } static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) { int async = check_async_write(inode, bio_flags); int ret; if (!(rw & REQ_WRITE)) { /* * called for a read, do the setup so that checksum validation * can happen in the async kernel threads */ ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, bio, BTRFS_WQ_ENDIO_METADATA); if (ret) goto out_w_error; ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 0); } else if (!async) { ret = btree_csum_one_bio(bio); if (ret) goto out_w_error; ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 0); } else { /* * kthread helpers are used to submit writes so that * checksumming can happen in parallel across all CPUs */ ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, inode, rw, bio, mirror_num, 0, bio_offset, __btree_submit_bio_start, __btree_submit_bio_done); } if (ret) { out_w_error: bio_endio(bio, ret); } return ret; } #ifdef CONFIG_MIGRATION static int btree_migratepage(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) { /* * we can't safely write a btree page from here, * we haven't done the locking hook */ if (PageDirty(page)) return -EAGAIN; /* * Buffers may be managed in a filesystem specific way. * We must have no buffers or drop them. */ if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) return -EAGAIN; return migrate_page(mapping, newpage, page, mode); } #endif static int btree_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct btrfs_fs_info *fs_info; int ret; if (wbc->sync_mode == WB_SYNC_NONE) { if (wbc->for_kupdate) return 0; fs_info = BTRFS_I(mapping->host)->root->fs_info; /* this is a bit racy, but that's ok */ ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, BTRFS_DIRTY_METADATA_THRESH); if (ret < 0) return 0; } return btree_write_cache_pages(mapping, wbc); } static int btree_readpage(struct file *file, struct page *page) { struct extent_io_tree *tree; tree = &BTRFS_I(page->mapping->host)->io_tree; return extent_read_full_page(tree, page, btree_get_extent, 0); } static int btree_releasepage(struct page *page, gfp_t gfp_flags) { if (PageWriteback(page) || PageDirty(page)) return 0; return try_release_extent_buffer(page); } static void btree_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { struct extent_io_tree *tree; tree = &BTRFS_I(page->mapping->host)->io_tree; extent_invalidatepage(tree, page, offset); btree_releasepage(page, GFP_NOFS); if (PagePrivate(page)) { btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, "page private not zero on page %llu", (unsigned long long)page_offset(page)); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); } } static int btree_set_page_dirty(struct page *page) { #ifdef DEBUG struct extent_buffer *eb; BUG_ON(!PagePrivate(page)); eb = (struct extent_buffer *)page->private; BUG_ON(!eb); BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); BUG_ON(!atomic_read(&eb->refs)); btrfs_assert_tree_locked(eb); #endif return __set_page_dirty_nobuffers(page); } static const struct address_space_operations btree_aops = { .readpage = btree_readpage, .writepages = btree_writepages, .releasepage = btree_releasepage, .invalidatepage = btree_invalidatepage, #ifdef CONFIG_MIGRATION .migratepage = btree_migratepage, #endif .set_page_dirty = btree_set_page_dirty, }; void readahead_tree_block(struct btrfs_root *root, u64 bytenr) { struct extent_buffer *buf = NULL; struct inode *btree_inode = root->fs_info->btree_inode; buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return; read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, WAIT_NONE, btree_get_extent, 0); free_extent_buffer(buf); } int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, int mirror_num, struct extent_buffer **eb) { struct extent_buffer *buf = NULL; struct inode *btree_inode = root->fs_info->btree_inode; struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; int ret; buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return 0; set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK, btree_get_extent, mirror_num); if (ret) { free_extent_buffer(buf); return ret; } if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { free_extent_buffer(buf); return -EIO; } else if (extent_buffer_uptodate(buf)) { *eb = buf; } else { free_extent_buffer(buf); } return 0; } struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) { return find_extent_buffer(fs_info, bytenr); } struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, u64 bytenr) { if (btrfs_test_is_dummy_root(root)) return alloc_test_extent_buffer(root->fs_info, bytenr); return alloc_extent_buffer(root->fs_info, bytenr); } int btrfs_write_tree_block(struct extent_buffer *buf) { return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, buf->start + buf->len - 1); } int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) { return filemap_fdatawait_range(buf->pages[0]->mapping, buf->start, buf->start + buf->len - 1); } struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, u64 parent_transid) { struct extent_buffer *buf = NULL; int ret; buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return ERR_PTR(-ENOMEM); ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); if (ret) { free_extent_buffer(buf); return ERR_PTR(ret); } return buf; } void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct extent_buffer *buf) { if (btrfs_header_generation(buf) == fs_info->running_transaction->transid) { btrfs_assert_tree_locked(buf); if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { __percpu_counter_add(&fs_info->dirty_metadata_bytes, -buf->len, fs_info->dirty_metadata_batch); /* ugh, clear_extent_buffer_dirty needs to lock the page */ btrfs_set_lock_blocking(buf); clear_extent_buffer_dirty(buf); } } } static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) { struct btrfs_subvolume_writers *writers; int ret; writers = kmalloc(sizeof(*writers), GFP_NOFS); if (!writers) return ERR_PTR(-ENOMEM); ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL); if (ret < 0) { kfree(writers); return ERR_PTR(ret); } init_waitqueue_head(&writers->wait); return writers; } static void btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) { percpu_counter_destroy(&writers->counter); kfree(writers); } static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize, struct btrfs_root *root, struct btrfs_fs_info *fs_info, u64 objectid) { root->node = NULL; root->commit_root = NULL; root->sectorsize = sectorsize; root->nodesize = nodesize; root->stripesize = stripesize; root->state = 0; root->orphan_cleanup_state = 0; root->objectid = objectid; root->last_trans = 0; root->highest_objectid = 0; root->nr_delalloc_inodes = 0; root->nr_ordered_extents = 0; root->name = NULL; root->inode_tree = RB_ROOT; INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; root->orphan_block_rsv = NULL; INIT_LIST_HEAD(&root->dirty_list); INIT_LIST_HEAD(&root->root_list); INIT_LIST_HEAD(&root->delalloc_inodes); INIT_LIST_HEAD(&root->delalloc_root); INIT_LIST_HEAD(&root->ordered_extents); INIT_LIST_HEAD(&root->ordered_root); INIT_LIST_HEAD(&root->logged_list[0]); INIT_LIST_HEAD(&root->logged_list[1]); spin_lock_init(&root->orphan_lock); spin_lock_init(&root->inode_lock); spin_lock_init(&root->delalloc_lock); spin_lock_init(&root->ordered_extent_lock); spin_lock_init(&root->accounting_lock); spin_lock_init(&root->log_extents_lock[0]); spin_lock_init(&root->log_extents_lock[1]); mutex_init(&root->objectid_mutex); mutex_init(&root->log_mutex); mutex_init(&root->ordered_extent_mutex); mutex_init(&root->delalloc_mutex); init_waitqueue_head(&root->log_writer_wait); init_waitqueue_head(&root->log_commit_wait[0]); init_waitqueue_head(&root->log_commit_wait[1]); INIT_LIST_HEAD(&root->log_ctxs[0]); INIT_LIST_HEAD(&root->log_ctxs[1]); atomic_set(&root->log_commit[0], 0); atomic_set(&root->log_commit[1], 0); atomic_set(&root->log_writers, 0); atomic_set(&root->log_batch, 0); atomic_set(&root->orphan_inodes, 0); atomic_set(&root->refs, 1); atomic_set(&root->will_be_snapshoted, 0); root->log_transid = 0; root->log_transid_committed = -1; root->last_log_commit = 0; if (fs_info) extent_io_tree_init(&root->dirty_log_pages, fs_info->btree_inode->i_mapping); memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); if (fs_info) root->defrag_trans_start = fs_info->generation; else root->defrag_trans_start = 0; root->root_key.objectid = objectid; root->anon_dev = 0; spin_lock_init(&root->root_item_lock); } static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info) { struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS); if (root) root->fs_info = fs_info; return root; } #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS /* Should only be used by the testing infrastructure */ struct btrfs_root *btrfs_alloc_dummy_root(void) { struct btrfs_root *root; root = btrfs_alloc_root(NULL); if (!root) return ERR_PTR(-ENOMEM); __setup_root(4096, 4096, 4096, root, NULL, 1); set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state); root->alloc_bytenr = 0; return root; } #endif struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 objectid) { struct extent_buffer *leaf; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *root; struct btrfs_key key; int ret = 0; uuid_le uuid; root = btrfs_alloc_root(fs_info); if (!root) return ERR_PTR(-ENOMEM); __setup_root(tree_root->nodesize, tree_root->sectorsize, tree_root->stripesize, root, fs_info, objectid); root->root_key.objectid = objectid; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = 0; leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); if (IS_ERR(leaf)) { ret = PTR_ERR(leaf); leaf = NULL; goto fail; } memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_bytenr(leaf, leaf->start); btrfs_set_header_generation(leaf, trans->transid); btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(leaf, objectid); root->node = leaf; write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); write_extent_buffer(leaf, fs_info->chunk_tree_uuid, btrfs_header_chunk_tree_uuid(leaf), BTRFS_UUID_SIZE); btrfs_mark_buffer_dirty(leaf); root->commit_root = btrfs_root_node(root); set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); root->root_item.flags = 0; root->root_item.byte_limit = 0; btrfs_set_root_bytenr(&root->root_item, leaf->start); btrfs_set_root_generation(&root->root_item, trans->transid); btrfs_set_root_level(&root->root_item, 0); btrfs_set_root_refs(&root->root_item, 1); btrfs_set_root_used(&root->root_item, leaf->len); btrfs_set_root_last_snapshot(&root->root_item, 0); btrfs_set_root_dirid(&root->root_item, 0); uuid_le_gen(&uuid); memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); root->root_item.drop_level = 0; key.objectid = objectid; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = 0; ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); if (ret) goto fail; btrfs_tree_unlock(leaf); return root; fail: if (leaf) { btrfs_tree_unlock(leaf); free_extent_buffer(root->commit_root); free_extent_buffer(leaf); } kfree(root); return ERR_PTR(ret); } static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_root *root; struct btrfs_root *tree_root = fs_info->tree_root; struct extent_buffer *leaf; root = btrfs_alloc_root(fs_info); if (!root) return ERR_PTR(-ENOMEM); __setup_root(tree_root->nodesize, tree_root->sectorsize, tree_root->stripesize, root, fs_info, BTRFS_TREE_LOG_OBJECTID); root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; /* * DON'T set REF_COWS for log trees * * log trees do not get reference counted because they go away * before a real commit is actually done. They do store pointers * to file data extents, and those reference counts still get * updated (along with back refs to the log tree). */ leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0); if (IS_ERR(leaf)) { kfree(root); return ERR_CAST(leaf); } memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_bytenr(leaf, leaf->start); btrfs_set_header_generation(leaf, trans->transid); btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); root->node = leaf; write_extent_buffer(root->node, root->fs_info->fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); btrfs_mark_buffer_dirty(root->node); btrfs_tree_unlock(root->node); return root; } int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_root *log_root; log_root = alloc_log_tree(trans, fs_info); if (IS_ERR(log_root)) return PTR_ERR(log_root); WARN_ON(fs_info->log_root_tree); fs_info->log_root_tree = log_root; return 0; } int btrfs_add_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_root *log_root; struct btrfs_inode_item *inode_item; log_root = alloc_log_tree(trans, root->fs_info); if (IS_ERR(log_root)) return PTR_ERR(log_root); log_root->last_trans = trans->transid; log_root->root_key.offset = root->root_key.objectid; inode_item = &log_root->root_item.inode; btrfs_set_stack_inode_generation(inode_item, 1); btrfs_set_stack_inode_size(inode_item, 3); btrfs_set_stack_inode_nlink(inode_item, 1); btrfs_set_stack_inode_nbytes(inode_item, root->nodesize); btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); btrfs_set_root_node(&log_root->root_item, log_root->node); WARN_ON(root->log_root); root->log_root = log_root; root->log_transid = 0; root->log_transid_committed = -1; root->last_log_commit = 0; return 0; } static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, struct btrfs_key *key) { struct btrfs_root *root; struct btrfs_fs_info *fs_info = tree_root->fs_info; struct btrfs_path *path; u64 generation; int ret; path = btrfs_alloc_path(); if (!path) return ERR_PTR(-ENOMEM); root = btrfs_alloc_root(fs_info); if (!root) { ret = -ENOMEM; goto alloc_fail; } __setup_root(tree_root->nodesize, tree_root->sectorsize, tree_root->stripesize, root, fs_info, key->objectid); ret = btrfs_find_root(tree_root, key, path, &root->root_item, &root->root_key); if (ret) { if (ret > 0) ret = -ENOENT; goto find_fail; } generation = btrfs_root_generation(&root->root_item); root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), generation); if (IS_ERR(root->node)) { ret = PTR_ERR(root->node); goto find_fail; } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { ret = -EIO; free_extent_buffer(root->node); goto find_fail; } root->commit_root = btrfs_root_node(root); out: btrfs_free_path(path); return root; find_fail: kfree(root); alloc_fail: root = ERR_PTR(ret); goto out; } struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, struct btrfs_key *location) { struct btrfs_root *root; root = btrfs_read_tree_root(tree_root, location); if (IS_ERR(root)) return root; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { set_bit(BTRFS_ROOT_REF_COWS, &root->state); btrfs_check_and_init_root_item(&root->root_item); } return root; } int btrfs_init_fs_root(struct btrfs_root *root) { int ret; struct btrfs_subvolume_writers *writers; root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), GFP_NOFS); if (!root->free_ino_pinned || !root->free_ino_ctl) { ret = -ENOMEM; goto fail; } writers = btrfs_alloc_subvolume_writers(); if (IS_ERR(writers)) { ret = PTR_ERR(writers); goto fail; } root->subv_writers = writers; btrfs_init_free_ino_ctl(root); spin_lock_init(&root->ino_cache_lock); init_waitqueue_head(&root->ino_cache_wait); ret = get_anon_bdev(&root->anon_dev); if (ret) goto free_writers; return 0; free_writers: btrfs_free_subvolume_writers(root->subv_writers); fail: kfree(root->free_ino_ctl); kfree(root->free_ino_pinned); return ret; } static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, u64 root_id) { struct btrfs_root *root; spin_lock(&fs_info->fs_roots_radix_lock); root = radix_tree_lookup(&fs_info->fs_roots_radix, (unsigned long)root_id); spin_unlock(&fs_info->fs_roots_radix_lock); return root; } int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) { int ret; ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); if (ret) return ret; spin_lock(&fs_info->fs_roots_radix_lock); ret = radix_tree_insert(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, root); if (ret == 0) set_bit(BTRFS_ROOT_IN_RADIX, &root->state); spin_unlock(&fs_info->fs_roots_radix_lock); radix_tree_preload_end(); return ret; } struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_key *location, bool check_ref) { struct btrfs_root *root; struct btrfs_path *path; struct btrfs_key key; int ret; if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) return fs_info->tree_root; if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) return fs_info->extent_root; if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) return fs_info->chunk_root; if (location->objectid == BTRFS_DEV_TREE_OBJECTID) return fs_info->dev_root; if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) return fs_info->csum_root; if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) return fs_info->quota_root ? fs_info->quota_root : ERR_PTR(-ENOENT); if (location->objectid == BTRFS_UUID_TREE_OBJECTID) return fs_info->uuid_root ? fs_info->uuid_root : ERR_PTR(-ENOENT); again: root = btrfs_lookup_fs_root(fs_info, location->objectid); if (root) { if (check_ref && btrfs_root_refs(&root->root_item) == 0) return ERR_PTR(-ENOENT); return root; } root = btrfs_read_fs_root(fs_info->tree_root, location); if (IS_ERR(root)) return root; if (check_ref && btrfs_root_refs(&root->root_item) == 0) { ret = -ENOENT; goto fail; } ret = btrfs_init_fs_root(root); if (ret) goto fail; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto fail; } key.objectid = BTRFS_ORPHAN_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = location->objectid; ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); btrfs_free_path(path); if (ret < 0) goto fail; if (ret == 0) set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); ret = btrfs_insert_fs_root(fs_info, root); if (ret) { if (ret == -EEXIST) { free_fs_root(root); goto again; } goto fail; } return root; fail: free_fs_root(root); return ERR_PTR(ret); } static int btrfs_congested_fn(void *congested_data, int bdi_bits) { struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; int ret = 0; struct btrfs_device *device; struct backing_dev_info *bdi; rcu_read_lock(); list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { if (!device->bdev) continue; bdi = blk_get_backing_dev_info(device->bdev); if (bdi_congested(bdi, bdi_bits)) { ret = 1; break; } } rcu_read_unlock(); return ret; } static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) { int err; err = bdi_setup_and_register(bdi, "btrfs"); if (err) return err; bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; bdi->congested_fn = btrfs_congested_fn; bdi->congested_data = info; return 0; } /* * called by the kthread helper functions to finally call the bio end_io * functions. This is where read checksum verification actually happens */ static void end_workqueue_fn(struct btrfs_work *work) { struct bio *bio; struct btrfs_end_io_wq *end_io_wq; int error; end_io_wq = container_of(work, struct btrfs_end_io_wq, work); bio = end_io_wq->bio; error = end_io_wq->error; bio->bi_private = end_io_wq->private; bio->bi_end_io = end_io_wq->end_io; kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); bio_endio(bio, error); } static int cleaner_kthread(void *arg) { struct btrfs_root *root = arg; int again; struct btrfs_trans_handle *trans; do { again = 0; /* Make the cleaner go to sleep early. */ if (btrfs_need_cleaner_sleep(root)) goto sleep; if (!mutex_trylock(&root->fs_info->cleaner_mutex)) goto sleep; /* * Avoid the problem that we change the status of the fs * during the above check and trylock. */ if (btrfs_need_cleaner_sleep(root)) { mutex_unlock(&root->fs_info->cleaner_mutex); goto sleep; } btrfs_run_delayed_iputs(root); again = btrfs_clean_one_deleted_snapshot(root); mutex_unlock(&root->fs_info->cleaner_mutex); /* * The defragger has dealt with the R/O remount and umount, * needn't do anything special here. */ btrfs_run_defrag_inodes(root->fs_info); /* * Acquires fs_info->delete_unused_bgs_mutex to avoid racing * with relocation (btrfs_relocate_chunk) and relocation * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) * after acquiring fs_info->delete_unused_bgs_mutex. So we * can't hold, nor need to, fs_info->cleaner_mutex when deleting * unused block groups. */ btrfs_delete_unused_bgs(root->fs_info); sleep: if (!try_to_freeze() && !again) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule(); __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); /* * Transaction kthread is stopped before us and wakes us up. * However we might have started a new transaction and COWed some * tree blocks when deleting unused block groups for example. So * make sure we commit the transaction we started to have a clean * shutdown when evicting the btree inode - if it has dirty pages * when we do the final iput() on it, eviction will trigger a * writeback for it which will fail with null pointer dereferences * since work queues and other resources were already released and * destroyed by the time the iput/eviction/writeback is made. */ trans = btrfs_attach_transaction(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT) btrfs_err(root->fs_info, "cleaner transaction attach returned %ld", PTR_ERR(trans)); } else { int ret; ret = btrfs_commit_transaction(trans, root); if (ret) btrfs_err(root->fs_info, "cleaner open transaction commit returned %d", ret); } return 0; } static int transaction_kthread(void *arg) { struct btrfs_root *root = arg; struct btrfs_trans_handle *trans; struct btrfs_transaction *cur; u64 transid; unsigned long now; unsigned long delay; bool cannot_commit; do { cannot_commit = false; delay = HZ * root->fs_info->commit_interval; mutex_lock(&root->fs_info->transaction_kthread_mutex); spin_lock(&root->fs_info->trans_lock); cur = root->fs_info->running_transaction; if (!cur) { spin_unlock(&root->fs_info->trans_lock); goto sleep; } now = get_seconds(); if (cur->state < TRANS_STATE_BLOCKED && (now < cur->start_time || now - cur->start_time < root->fs_info->commit_interval)) { spin_unlock(&root->fs_info->trans_lock); delay = HZ * 5; goto sleep; } transid = cur->transid; spin_unlock(&root->fs_info->trans_lock); /* If the file system is aborted, this will always fail. */ trans = btrfs_attach_transaction(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT) cannot_commit = true; goto sleep; } if (transid == trans->transid) { btrfs_commit_transaction(trans, root); } else { btrfs_end_transaction(trans, root); } sleep: wake_up_process(root->fs_info->cleaner_kthread); mutex_unlock(&root->fs_info->transaction_kthread_mutex); if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))) btrfs_cleanup_transaction(root); if (!try_to_freeze()) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop() && (!btrfs_transaction_blocked(root->fs_info) || cannot_commit)) schedule_timeout(delay); __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); return 0; } /* * this will find the highest generation in the array of * root backups. The index of the highest array is returned, * or -1 if we can't find anything. * * We check to make sure the array is valid by comparing the * generation of the latest root in the array with the generation * in the super block. If they don't match we pitch it. */ static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) { u64 cur; int newest_index = -1; struct btrfs_root_backup *root_backup; int i; for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { root_backup = info->super_copy->super_roots + i; cur = btrfs_backup_tree_root_gen(root_backup); if (cur == newest_gen) newest_index = i; } /* check to see if we actually wrapped around */ if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { root_backup = info->super_copy->super_roots; cur = btrfs_backup_tree_root_gen(root_backup); if (cur == newest_gen) newest_index = 0; } return newest_index; } /* * find the oldest backup so we know where to store new entries * in the backup array. This will set the backup_root_index * field in the fs_info struct */ static void find_oldest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) { int newest_index = -1; newest_index = find_newest_super_backup(info, newest_gen); /* if there was garbage in there, just move along */ if (newest_index == -1) { info->backup_root_index = 0; } else { info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; } } /* * copy all the root pointers into the super backup array. * this will bump the backup pointer by one when it is * done */ static void backup_super_roots(struct btrfs_fs_info *info) { int next_backup; struct btrfs_root_backup *root_backup; int last_backup; next_backup = info->backup_root_index; last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % BTRFS_NUM_BACKUP_ROOTS; /* * just overwrite the last backup if we're at the same generation * this happens only at umount */ root_backup = info->super_for_commit->super_roots + last_backup; if (btrfs_backup_tree_root_gen(root_backup) == btrfs_header_generation(info->tree_root->node)) next_backup = last_backup; root_backup = info->super_for_commit->super_roots + next_backup; /* * make sure all of our padding and empty slots get zero filled * regardless of which ones we use today */ memset(root_backup, 0, sizeof(*root_backup)); info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); btrfs_set_backup_tree_root_gen(root_backup, btrfs_header_generation(info->tree_root->node)); btrfs_set_backup_tree_root_level(root_backup, btrfs_header_level(info->tree_root->node)); btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); btrfs_set_backup_chunk_root_gen(root_backup, btrfs_header_generation(info->chunk_root->node)); btrfs_set_backup_chunk_root_level(root_backup, btrfs_header_level(info->chunk_root->node)); btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); btrfs_set_backup_extent_root_gen(root_backup, btrfs_header_generation(info->extent_root->node)); btrfs_set_backup_extent_root_level(root_backup, btrfs_header_level(info->extent_root->node)); /* * we might commit during log recovery, which happens before we set * the fs_root. Make sure it is valid before we fill it in. */ if (info->fs_root && info->fs_root->node) { btrfs_set_backup_fs_root(root_backup, info->fs_root->node->start); btrfs_set_backup_fs_root_gen(root_backup, btrfs_header_generation(info->fs_root->node)); btrfs_set_backup_fs_root_level(root_backup, btrfs_header_level(info->fs_root->node)); } btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); btrfs_set_backup_dev_root_gen(root_backup, btrfs_header_generation(info->dev_root->node)); btrfs_set_backup_dev_root_level(root_backup, btrfs_header_level(info->dev_root->node)); btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); btrfs_set_backup_csum_root_gen(root_backup, btrfs_header_generation(info->csum_root->node)); btrfs_set_backup_csum_root_level(root_backup, btrfs_header_level(info->csum_root->node)); btrfs_set_backup_total_bytes(root_backup, btrfs_super_total_bytes(info->super_copy)); btrfs_set_backup_bytes_used(root_backup, btrfs_super_bytes_used(info->super_copy)); btrfs_set_backup_num_devices(root_backup, btrfs_super_num_devices(info->super_copy)); /* * if we don't copy this out to the super_copy, it won't get remembered * for the next commit */ memcpy(&info->super_copy->super_roots, &info->super_for_commit->super_roots, sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); } /* * this copies info out of the root backup array and back into * the in-memory super block. It is meant to help iterate through * the array, so you send it the number of backups you've already * tried and the last backup index you used. * * this returns -1 when it has tried all the backups */ static noinline int next_root_backup(struct btrfs_fs_info *info, struct btrfs_super_block *super, int *num_backups_tried, int *backup_index) { struct btrfs_root_backup *root_backup; int newest = *backup_index; if (*num_backups_tried == 0) { u64 gen = btrfs_super_generation(super); newest = find_newest_super_backup(info, gen); if (newest == -1) return -1; *backup_index = newest; *num_backups_tried = 1; } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { /* we've tried all the backups, all done */ return -1; } else { /* jump to the next oldest backup */ newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % BTRFS_NUM_BACKUP_ROOTS; *backup_index = newest; *num_backups_tried += 1; } root_backup = super->super_roots + newest; btrfs_set_super_generation(super, btrfs_backup_tree_root_gen(root_backup)); btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); btrfs_set_super_root_level(super, btrfs_backup_tree_root_level(root_backup)); btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); /* * fixme: the total bytes and num_devices need to match or we should * need a fsck */ btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); return 0; } /* helper to cleanup workers */ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) { btrfs_destroy_workqueue(fs_info->fixup_workers); btrfs_destroy_workqueue(fs_info->delalloc_workers); btrfs_destroy_workqueue(fs_info->workers); btrfs_destroy_workqueue(fs_info->endio_workers); btrfs_destroy_workqueue(fs_info->endio_meta_workers); btrfs_destroy_workqueue(fs_info->endio_raid56_workers); btrfs_destroy_workqueue(fs_info->endio_repair_workers); btrfs_destroy_workqueue(fs_info->rmw_workers); btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); btrfs_destroy_workqueue(fs_info->endio_write_workers); btrfs_destroy_workqueue(fs_info->endio_freespace_worker); btrfs_destroy_workqueue(fs_info->submit_workers); btrfs_destroy_workqueue(fs_info->delayed_workers); btrfs_destroy_workqueue(fs_info->caching_workers); btrfs_destroy_workqueue(fs_info->readahead_workers); btrfs_destroy_workqueue(fs_info->flush_workers); btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); btrfs_destroy_workqueue(fs_info->extent_workers); } static void free_root_extent_buffers(struct btrfs_root *root) { if (root) { free_extent_buffer(root->node); free_extent_buffer(root->commit_root); root->node = NULL; root->commit_root = NULL; } } /* helper to cleanup tree roots */ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) { free_root_extent_buffers(info->tree_root); free_root_extent_buffers(info->dev_root); free_root_extent_buffers(info->extent_root); free_root_extent_buffers(info->csum_root); free_root_extent_buffers(info->quota_root); free_root_extent_buffers(info->uuid_root); if (chunk_root) free_root_extent_buffers(info->chunk_root); } void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) { int ret; struct btrfs_root *gang[8]; int i; while (!list_empty(&fs_info->dead_roots)) { gang[0] = list_entry(fs_info->dead_roots.next, struct btrfs_root, root_list); list_del(&gang[0]->root_list); if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) { btrfs_drop_and_free_fs_root(fs_info, gang[0]); } else { free_extent_buffer(gang[0]->node); free_extent_buffer(gang[0]->commit_root); btrfs_put_fs_root(gang[0]); } } while (1) { ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, 0, ARRAY_SIZE(gang)); if (!ret) break; for (i = 0; i < ret; i++) btrfs_drop_and_free_fs_root(fs_info, gang[i]); } if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { btrfs_free_log_root_tree(NULL, fs_info); btrfs_destroy_pinned_extent(fs_info->tree_root, fs_info->pinned_extents); } } static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) { mutex_init(&fs_info->scrub_lock); atomic_set(&fs_info->scrubs_running, 0); atomic_set(&fs_info->scrub_pause_req, 0); atomic_set(&fs_info->scrubs_paused, 0); atomic_set(&fs_info->scrub_cancel_req, 0); init_waitqueue_head(&fs_info->scrub_pause_wait); fs_info->scrub_workers_refcnt = 0; } static void btrfs_init_balance(struct btrfs_fs_info *fs_info) { spin_lock_init(&fs_info->balance_lock); mutex_init(&fs_info->balance_mutex); atomic_set(&fs_info->balance_running, 0); atomic_set(&fs_info->balance_pause_req, 0); atomic_set(&fs_info->balance_cancel_req, 0); fs_info->balance_ctl = NULL; init_waitqueue_head(&fs_info->balance_wait_q); } static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info, struct btrfs_root *tree_root) { fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; set_nlink(fs_info->btree_inode, 1); /* * we set the i_size on the btree inode to the max possible int. * the real end of the address space is determined by all of * the devices in the system */ fs_info->btree_inode->i_size = OFFSET_MAX; fs_info->btree_inode->i_mapping->a_ops = &btree_aops; RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, fs_info->btree_inode->i_mapping); BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0; extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; BTRFS_I(fs_info->btree_inode)->root = tree_root; memset(&BTRFS_I(fs_info->btree_inode)->location, 0, sizeof(struct btrfs_key)); set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(fs_info->btree_inode)->runtime_flags); btrfs_insert_inode_hash(fs_info->btree_inode); } static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) { fs_info->dev_replace.lock_owner = 0; atomic_set(&fs_info->dev_replace.nesting_level, 0); mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); mutex_init(&fs_info->dev_replace.lock_management_lock); mutex_init(&fs_info->dev_replace.lock); init_waitqueue_head(&fs_info->replace_wait); } static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) { spin_lock_init(&fs_info->qgroup_lock); mutex_init(&fs_info->qgroup_ioctl_lock); fs_info->qgroup_tree = RB_ROOT; fs_info->qgroup_op_tree = RB_ROOT; INIT_LIST_HEAD(&fs_info->dirty_qgroups); fs_info->qgroup_seq = 1; fs_info->quota_enabled = 0; fs_info->pending_quota_state = 0; fs_info->qgroup_ulist = NULL; mutex_init(&fs_info->qgroup_rescan_lock); } static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, struct btrfs_fs_devices *fs_devices) { int max_active = fs_info->thread_pool_size; unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; fs_info->workers = btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI, max_active, 16); fs_info->delalloc_workers = btrfs_alloc_workqueue("delalloc", flags, max_active, 2); fs_info->flush_workers = btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0); fs_info->caching_workers = btrfs_alloc_workqueue("cache", flags, max_active, 0); /* * a higher idle thresh on the submit workers makes it much more * likely that bios will be send down in a sane order to the * devices */ fs_info->submit_workers = btrfs_alloc_workqueue("submit", flags, min_t(u64, fs_devices->num_devices, max_active), 64); fs_info->fixup_workers = btrfs_alloc_workqueue("fixup", flags, 1, 0); /* * endios are largely parallel and should have a very * low idle thresh */ fs_info->endio_workers = btrfs_alloc_workqueue("endio", flags, max_active, 4); fs_info->endio_meta_workers = btrfs_alloc_workqueue("endio-meta", flags, max_active, 4); fs_info->endio_meta_write_workers = btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2); fs_info->endio_raid56_workers = btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4); fs_info->endio_repair_workers = btrfs_alloc_workqueue("endio-repair", flags, 1, 0); fs_info->rmw_workers = btrfs_alloc_workqueue("rmw", flags, max_active, 2); fs_info->endio_write_workers = btrfs_alloc_workqueue("endio-write", flags, max_active, 2); fs_info->endio_freespace_worker = btrfs_alloc_workqueue("freespace-write", flags, max_active, 0); fs_info->delayed_workers = btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0); fs_info->readahead_workers = btrfs_alloc_workqueue("readahead", flags, max_active, 2); fs_info->qgroup_rescan_workers = btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0); fs_info->extent_workers = btrfs_alloc_workqueue("extent-refs", flags, min_t(u64, fs_devices->num_devices, max_active), 8); if (!(fs_info->workers && fs_info->delalloc_workers && fs_info->submit_workers && fs_info->flush_workers && fs_info->endio_workers && fs_info->endio_meta_workers && fs_info->endio_meta_write_workers && fs_info->endio_repair_workers && fs_info->endio_write_workers && fs_info->endio_raid56_workers && fs_info->endio_freespace_worker && fs_info->rmw_workers && fs_info->caching_workers && fs_info->readahead_workers && fs_info->fixup_workers && fs_info->delayed_workers && fs_info->extent_workers && fs_info->qgroup_rescan_workers)) { return -ENOMEM; } return 0; } static int btrfs_replay_log(struct btrfs_fs_info *fs_info, struct btrfs_fs_devices *fs_devices) { int ret; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *log_tree_root; struct btrfs_super_block *disk_super = fs_info->super_copy; u64 bytenr = btrfs_super_log_root(disk_super); if (fs_devices->rw_devices == 0) { printk(KERN_WARNING "BTRFS: log replay required " "on RO media\n"); return -EIO; } log_tree_root = btrfs_alloc_root(fs_info); if (!log_tree_root) return -ENOMEM; __setup_root(tree_root->nodesize, tree_root->sectorsize, tree_root->stripesize, log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); log_tree_root->node = read_tree_block(tree_root, bytenr, fs_info->generation + 1); if (IS_ERR(log_tree_root->node)) { printk(KERN_ERR "BTRFS: failed to read log tree\n"); ret = PTR_ERR(log_tree_root->node); kfree(log_tree_root); return ret; } else if (!extent_buffer_uptodate(log_tree_root->node)) { printk(KERN_ERR "BTRFS: failed to read log tree\n"); free_extent_buffer(log_tree_root->node); kfree(log_tree_root); return -EIO; } /* returns with log_tree_root freed on success */ ret = btrfs_recover_log_trees(log_tree_root); if (ret) { btrfs_error(tree_root->fs_info, ret, "Failed to recover log tree"); free_extent_buffer(log_tree_root->node); kfree(log_tree_root); return ret; } if (fs_info->sb->s_flags & MS_RDONLY) { ret = btrfs_commit_super(tree_root); if (ret) return ret; } return 0; } static int btrfs_read_roots(struct btrfs_fs_info *fs_info, struct btrfs_root *tree_root) { struct btrfs_root *root; struct btrfs_key location; int ret; location.objectid = BTRFS_EXTENT_TREE_OBJECTID; location.type = BTRFS_ROOT_ITEM_KEY; location.offset = 0; root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) return PTR_ERR(root); set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->extent_root = root; location.objectid = BTRFS_DEV_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) return PTR_ERR(root); set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->dev_root = root; btrfs_init_devices_late(fs_info); location.objectid = BTRFS_CSUM_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) return PTR_ERR(root); set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->csum_root = root; location.objectid = BTRFS_QUOTA_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (!IS_ERR(root)) { set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->quota_enabled = 1; fs_info->pending_quota_state = 1; fs_info->quota_root = root; } location.objectid = BTRFS_UUID_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) { ret = PTR_ERR(root); if (ret != -ENOENT) return ret; } else { set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->uuid_root = root; } return 0; } int open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, char *options) { u32 sectorsize; u32 nodesize; u32 stripesize; u64 generation; u64 features; struct btrfs_key location; struct buffer_head *bh; struct btrfs_super_block *disk_super; struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *tree_root; struct btrfs_root *chunk_root; int ret; int err = -EINVAL; int num_backups_tried = 0; int backup_index = 0; int max_active; tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info); chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info); if (!tree_root || !chunk_root) { err = -ENOMEM; goto fail; } ret = init_srcu_struct(&fs_info->subvol_srcu); if (ret) { err = ret; goto fail; } ret = setup_bdi(fs_info, &fs_info->bdi); if (ret) { err = ret; goto fail_srcu; } ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); if (ret) { err = ret; goto fail_bdi; } fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * (1 + ilog2(nr_cpu_ids)); ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); if (ret) { err = ret; goto fail_dirty_metadata_bytes; } ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL); if (ret) { err = ret; goto fail_delalloc_bytes; } fs_info->btree_inode = new_inode(sb); if (!fs_info->btree_inode) { err = -ENOMEM; goto fail_bio_counter; } mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); INIT_LIST_HEAD(&fs_info->trans_list); INIT_LIST_HEAD(&fs_info->dead_roots); INIT_LIST_HEAD(&fs_info->delayed_iputs); INIT_LIST_HEAD(&fs_info->delalloc_roots); INIT_LIST_HEAD(&fs_info->caching_block_groups); spin_lock_init(&fs_info->delalloc_root_lock); spin_lock_init(&fs_info->trans_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); spin_lock_init(&fs_info->free_chunk_lock); spin_lock_init(&fs_info->tree_mod_seq_lock); spin_lock_init(&fs_info->super_lock); spin_lock_init(&fs_info->qgroup_op_lock); spin_lock_init(&fs_info->buffer_lock); spin_lock_init(&fs_info->unused_bgs_lock); rwlock_init(&fs_info->tree_mod_log_lock); mutex_init(&fs_info->unused_bg_unpin_mutex); mutex_init(&fs_info->delete_unused_bgs_mutex); mutex_init(&fs_info->reloc_mutex); mutex_init(&fs_info->delalloc_root_mutex); seqlock_init(&fs_info->profiles_lock); init_rwsem(&fs_info->delayed_iput_sem); INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->space_info); INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); INIT_LIST_HEAD(&fs_info->unused_bgs); btrfs_mapping_init(&fs_info->mapping_tree); btrfs_init_block_rsv(&fs_info->global_block_rsv, BTRFS_BLOCK_RSV_GLOBAL); btrfs_init_block_rsv(&fs_info->delalloc_block_rsv, BTRFS_BLOCK_RSV_DELALLOC); btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); btrfs_init_block_rsv(&fs_info->delayed_block_rsv, BTRFS_BLOCK_RSV_DELOPS); atomic_set(&fs_info->nr_async_submits, 0); atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); atomic_set(&fs_info->defrag_running, 0); atomic_set(&fs_info->qgroup_op_seq, 0); atomic64_set(&fs_info->tree_mod_seq, 0); fs_info->sb = sb; fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; fs_info->metadata_ratio = 0; fs_info->defrag_inodes = RB_ROOT; fs_info->free_chunk_space = 0; fs_info->tree_mod_log = RB_ROOT; fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ /* readahead state */ INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); spin_lock_init(&fs_info->reada_lock); fs_info->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); INIT_LIST_HEAD(&fs_info->ordered_roots); spin_lock_init(&fs_info->ordered_root_lock); fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), GFP_NOFS); if (!fs_info->delayed_root) { err = -ENOMEM; goto fail_iput; } btrfs_init_delayed_root(fs_info->delayed_root); btrfs_init_scrub(fs_info); #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY fs_info->check_integrity_print_mask = 0; #endif btrfs_init_balance(fs_info); btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work); sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); sb->s_bdi = &fs_info->bdi; btrfs_init_btree_inode(fs_info, tree_root); spin_lock_init(&fs_info->block_group_cache_lock); fs_info->block_group_cache_tree = RB_ROOT; fs_info->first_logical_byte = (u64)-1; extent_io_tree_init(&fs_info->freed_extents[0], fs_info->btree_inode->i_mapping); extent_io_tree_init(&fs_info->freed_extents[1], fs_info->btree_inode->i_mapping); fs_info->pinned_extents = &fs_info->freed_extents[0]; fs_info->do_barriers = 1; mutex_init(&fs_info->ordered_operations_mutex); mutex_init(&fs_info->ordered_extent_flush_mutex); mutex_init(&fs_info->tree_log_mutex); mutex_init(&fs_info->chunk_mutex); mutex_init(&fs_info->transaction_kthread_mutex); mutex_init(&fs_info->cleaner_mutex); mutex_init(&fs_info->volume_mutex); mutex_init(&fs_info->ro_block_group_mutex); init_rwsem(&fs_info->commit_root_sem); init_rwsem(&fs_info->cleanup_work_sem); init_rwsem(&fs_info->subvol_sem); sema_init(&fs_info->uuid_tree_rescan_sem, 1); btrfs_init_dev_replace_locks(fs_info); btrfs_init_qgroup(fs_info); btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); btrfs_init_free_cluster(&fs_info->data_alloc_cluster); init_waitqueue_head(&fs_info->transaction_throttle); init_waitqueue_head(&fs_info->transaction_wait); init_waitqueue_head(&fs_info->transaction_blocked_wait); init_waitqueue_head(&fs_info->async_submit_wait); INIT_LIST_HEAD(&fs_info->pinned_chunks); ret = btrfs_alloc_stripe_hash_table(fs_info); if (ret) { err = ret; goto fail_alloc; } __setup_root(4096, 4096, 4096, tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); invalidate_bdev(fs_devices->latest_bdev); /* * Read super block and check the signature bytes only */ bh = btrfs_read_dev_super(fs_devices->latest_bdev); if (!bh) { err = -EINVAL; goto fail_alloc; } /* * We want to check superblock checksum, the type is stored inside. * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). */ if (btrfs_check_super_csum(bh->b_data)) { printk(KERN_ERR "BTRFS: superblock checksum mismatch\n"); err = -EINVAL; goto fail_alloc; } /* * super_copy is zeroed at allocation time and we never touch the * following bytes up to INFO_SIZE, the checksum is calculated from * the whole block of INFO_SIZE */ memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); memcpy(fs_info->super_for_commit, fs_info->super_copy, sizeof(*fs_info->super_for_commit)); brelse(bh); memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); if (ret) { printk(KERN_ERR "BTRFS: superblock contains fatal errors\n"); err = -EINVAL; goto fail_alloc; } disk_super = fs_info->super_copy; if (!btrfs_super_root(disk_super)) goto fail_alloc; /* check FS state, whether FS is broken. */ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); /* * run through our array of backup supers and setup * our ring pointer to the oldest one */ generation = btrfs_super_generation(disk_super); find_oldest_super_backup(fs_info, generation); /* * In the long term, we'll store the compression type in the super * block, and it'll be used for per file compression control. */ fs_info->compress_type = BTRFS_COMPRESS_ZLIB; ret = btrfs_parse_options(tree_root, options); if (ret) { err = ret; goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super) & ~BTRFS_FEATURE_INCOMPAT_SUPP; if (features) { printk(KERN_ERR "BTRFS: couldn't mount because of " "unsupported optional features (%Lx).\n", features); err = -EINVAL; goto fail_alloc; } /* * Leafsize and nodesize were always equal, this is only a sanity check. */ if (le32_to_cpu(disk_super->__unused_leafsize) != btrfs_super_nodesize(disk_super)) { printk(KERN_ERR "BTRFS: couldn't mount because metadata " "blocksizes don't match. node %d leaf %d\n", btrfs_super_nodesize(disk_super), le32_to_cpu(disk_super->__unused_leafsize)); err = -EINVAL; goto fail_alloc; } if (btrfs_super_nodesize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) { printk(KERN_ERR "BTRFS: couldn't mount because metadata " "blocksize (%d) was too large\n", btrfs_super_nodesize(disk_super)); err = -EINVAL; goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super); features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) printk(KERN_INFO "BTRFS: has skinny extents\n"); /* * flag our filesystem as having big metadata blocks if * they are bigger than the page size */ if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) { if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n"); features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; } nodesize = btrfs_super_nodesize(disk_super); sectorsize = btrfs_super_sectorsize(disk_super); stripesize = btrfs_super_stripesize(disk_super); fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); /* * mixed block groups end up with duplicate but slightly offset * extent buffers for the same range. It leads to corruptions */ if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && (sectorsize != nodesize)) { printk(KERN_ERR "BTRFS: unequal leaf/node/sector sizes " "are not allowed for mixed block groups on %s\n", sb->s_id); goto fail_alloc; } /* * Needn't use the lock because there is no other task which will * update the flag. */ btrfs_set_super_incompat_flags(disk_super, features); features = btrfs_super_compat_ro_flags(disk_super) & ~BTRFS_FEATURE_COMPAT_RO_SUPP; if (!(sb->s_flags & MS_RDONLY) && features) { printk(KERN_ERR "BTRFS: couldn't mount RDWR because of " "unsupported option features (%Lx).\n", features); err = -EINVAL; goto fail_alloc; } max_active = fs_info->thread_pool_size; ret = btrfs_init_workqueues(fs_info, fs_devices); if (ret) { err = ret; goto fail_sb_buffer; } fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 4 * 1024 * 1024 / PAGE_CACHE_SIZE); tree_root->nodesize = nodesize; tree_root->sectorsize = sectorsize; tree_root->stripesize = stripesize; sb->s_blocksize = sectorsize; sb->s_blocksize_bits = blksize_bits(sectorsize); if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) { printk(KERN_ERR "BTRFS: valid FS not found on %s\n", sb->s_id); goto fail_sb_buffer; } if (sectorsize != PAGE_SIZE) { printk(KERN_ERR "BTRFS: incompatible sector size (%lu) " "found on %s\n", (unsigned long)sectorsize, sb->s_id); goto fail_sb_buffer; } mutex_lock(&fs_info->chunk_mutex); ret = btrfs_read_sys_array(tree_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { printk(KERN_ERR "BTRFS: failed to read the system " "array on %s\n", sb->s_id); goto fail_sb_buffer; } generation = btrfs_super_chunk_root_generation(disk_super); __setup_root(nodesize, sectorsize, stripesize, chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); chunk_root->node = read_tree_block(chunk_root, btrfs_super_chunk_root(disk_super), generation); if (IS_ERR(chunk_root->node) || !extent_buffer_uptodate(chunk_root->node)) { printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", sb->s_id); chunk_root->node = NULL; goto fail_tree_roots; } btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); chunk_root->commit_root = btrfs_root_node(chunk_root); read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); ret = btrfs_read_chunk_tree(chunk_root); if (ret) { printk(KERN_ERR "BTRFS: failed to read chunk tree on %s\n", sb->s_id); goto fail_tree_roots; } /* * keep the device that is marked to be the target device for the * dev_replace procedure */ btrfs_close_extra_devices(fs_devices, 0); if (!fs_devices->latest_bdev) { printk(KERN_ERR "BTRFS: failed to read devices on %s\n", sb->s_id); goto fail_tree_roots; } retry_root_backup: generation = btrfs_super_generation(disk_super); tree_root->node = read_tree_block(tree_root, btrfs_super_root(disk_super), generation); if (IS_ERR(tree_root->node) || !extent_buffer_uptodate(tree_root->node)) { printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", sb->s_id); tree_root->node = NULL; goto recovery_tree_root; } btrfs_set_root_node(&tree_root->root_item, tree_root->node); tree_root->commit_root = btrfs_root_node(tree_root); btrfs_set_root_refs(&tree_root->root_item, 1); ret = btrfs_read_roots(fs_info, tree_root); if (ret) goto recovery_tree_root; fs_info->generation = generation; fs_info->last_trans_committed = generation; ret = btrfs_recover_balance(fs_info); if (ret) { printk(KERN_ERR "BTRFS: failed to recover balance\n"); goto fail_block_groups; } ret = btrfs_init_dev_stats(fs_info); if (ret) { printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n", ret); goto fail_block_groups; } ret = btrfs_init_dev_replace(fs_info); if (ret) { pr_err("BTRFS: failed to init dev_replace: %d\n", ret); goto fail_block_groups; } btrfs_close_extra_devices(fs_devices, 1); ret = btrfs_sysfs_add_fsid(fs_devices, NULL); if (ret) { pr_err("BTRFS: failed to init sysfs fsid interface: %d\n", ret); goto fail_block_groups; } ret = btrfs_sysfs_add_device(fs_devices); if (ret) { pr_err("BTRFS: failed to init sysfs device interface: %d\n", ret); goto fail_fsdev_sysfs; } ret = btrfs_sysfs_add_one(fs_info); if (ret) { pr_err("BTRFS: failed to init sysfs interface: %d\n", ret); goto fail_fsdev_sysfs; } ret = btrfs_init_space_info(fs_info); if (ret) { printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret); goto fail_sysfs; } ret = btrfs_read_block_groups(fs_info->extent_root); if (ret) { printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret); goto fail_sysfs; } fs_info->num_tolerated_disk_barrier_failures = btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); if (fs_info->fs_devices->missing_devices > fs_info->num_tolerated_disk_barrier_failures && !(sb->s_flags & MS_RDONLY)) { printk(KERN_WARNING "BTRFS: " "too many missing devices, writeable mount is not allowed\n"); goto fail_sysfs; } fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, "btrfs-cleaner"); if (IS_ERR(fs_info->cleaner_kthread)) goto fail_sysfs; fs_info->transaction_kthread = kthread_run(transaction_kthread, tree_root, "btrfs-transaction"); if (IS_ERR(fs_info->transaction_kthread)) goto fail_cleaner; if (!btrfs_test_opt(tree_root, SSD) && !btrfs_test_opt(tree_root, NOSSD) && !fs_info->fs_devices->rotating) { printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD " "mode\n"); btrfs_set_opt(fs_info->mount_opt, SSD); } /* * Mount does not set all options immediatelly, we can do it now and do * not have to wait for transaction commit */ btrfs_apply_pending_changes(fs_info); #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) { ret = btrfsic_mount(tree_root, fs_devices, btrfs_test_opt(tree_root, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? 1 : 0, fs_info->check_integrity_print_mask); if (ret) printk(KERN_WARNING "BTRFS: failed to initialize" " integrity check module %s\n", sb->s_id); } #endif ret = btrfs_read_qgroup_config(fs_info); if (ret) goto fail_trans_kthread; /* do not make disk changes in broken FS */ if (btrfs_super_log_root(disk_super) != 0) { ret = btrfs_replay_log(fs_info, fs_devices); if (ret) { err = ret; goto fail_qgroup; } } ret = btrfs_find_orphan_roots(tree_root); if (ret) goto fail_qgroup; if (!(sb->s_flags & MS_RDONLY)) { ret = btrfs_cleanup_fs_roots(fs_info); if (ret) goto fail_qgroup; mutex_lock(&fs_info->cleaner_mutex); ret = btrfs_recover_relocation(tree_root); mutex_unlock(&fs_info->cleaner_mutex); if (ret < 0) { printk(KERN_WARNING "BTRFS: failed to recover relocation\n"); err = -EINVAL; goto fail_qgroup; } } location.objectid = BTRFS_FS_TREE_OBJECTID; location.type = BTRFS_ROOT_ITEM_KEY; location.offset = 0; fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); if (IS_ERR(fs_info->fs_root)) { err = PTR_ERR(fs_info->fs_root); goto fail_qgroup; } if (sb->s_flags & MS_RDONLY) return 0; down_read(&fs_info->cleanup_work_sem); if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { up_read(&fs_info->cleanup_work_sem); close_ctree(tree_root); return ret; } up_read(&fs_info->cleanup_work_sem); ret = btrfs_resume_balance_async(fs_info); if (ret) { printk(KERN_WARNING "BTRFS: failed to resume balance\n"); close_ctree(tree_root); return ret; } ret = btrfs_resume_dev_replace_async(fs_info); if (ret) { pr_warn("BTRFS: failed to resume dev_replace\n"); close_ctree(tree_root); return ret; } btrfs_qgroup_rescan_resume(fs_info); if (!fs_info->uuid_root) { pr_info("BTRFS: creating UUID tree\n"); ret = btrfs_create_uuid_tree(fs_info); if (ret) { pr_warn("BTRFS: failed to create the UUID tree %d\n", ret); close_ctree(tree_root); return ret; } } else if (btrfs_test_opt(tree_root, RESCAN_UUID_TREE) || fs_info->generation != btrfs_super_uuid_tree_generation(disk_super)) { pr_info("BTRFS: checking UUID tree\n"); ret = btrfs_check_uuid_tree(fs_info); if (ret) { pr_warn("BTRFS: failed to check the UUID tree %d\n", ret); close_ctree(tree_root); return ret; } } else { fs_info->update_uuid_tree_gen = 1; } fs_info->open = 1; return 0; fail_qgroup: btrfs_free_qgroup_config(fs_info); fail_trans_kthread: kthread_stop(fs_info->transaction_kthread); btrfs_cleanup_transaction(fs_info->tree_root); btrfs_free_fs_roots(fs_info); fail_cleaner: kthread_stop(fs_info->cleaner_kthread); /* * make sure we're done with the btree inode before we stop our * kthreads */ filemap_write_and_wait(fs_info->btree_inode->i_mapping); fail_sysfs: btrfs_sysfs_remove_one(fs_info); fail_fsdev_sysfs: btrfs_sysfs_remove_fsid(fs_info->fs_devices); fail_block_groups: btrfs_put_block_group_cache(fs_info); btrfs_free_block_groups(fs_info); fail_tree_roots: free_root_pointers(fs_info, 1); invalidate_inode_pages2(fs_info->btree_inode->i_mapping); fail_sb_buffer: btrfs_stop_all_workers(fs_info); fail_alloc: fail_iput: btrfs_mapping_tree_free(&fs_info->mapping_tree); iput(fs_info->btree_inode); fail_bio_counter: percpu_counter_destroy(&fs_info->bio_counter); fail_delalloc_bytes: percpu_counter_destroy(&fs_info->delalloc_bytes); fail_dirty_metadata_bytes: percpu_counter_destroy(&fs_info->dirty_metadata_bytes); fail_bdi: bdi_destroy(&fs_info->bdi); fail_srcu: cleanup_srcu_struct(&fs_info->subvol_srcu); fail: btrfs_free_stripe_hash_table(fs_info); btrfs_close_devices(fs_info->fs_devices); return err; recovery_tree_root: if (!btrfs_test_opt(tree_root, RECOVERY)) goto fail_tree_roots; free_root_pointers(fs_info, 0); /* don't use the log in recovery mode, it won't be valid */ btrfs_set_super_log_root(disk_super, 0); /* we can't trust the free space cache either */ btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); ret = next_root_backup(fs_info, fs_info->super_copy, &num_backups_tried, &backup_index); if (ret == -1) goto fail_block_groups; goto retry_root_backup; } static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) { if (uptodate) { set_buffer_uptodate(bh); } else { struct btrfs_device *device = (struct btrfs_device *) bh->b_private; printk_ratelimited_in_rcu(KERN_WARNING "BTRFS: lost page write due to " "I/O error on %s\n", rcu_str_deref(device->name)); /* note, we dont' set_buffer_write_io_error because we have * our own ways of dealing with the IO errors */ clear_buffer_uptodate(bh); btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); } unlock_buffer(bh); put_bh(bh); } struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) { struct buffer_head *bh; struct buffer_head *latest = NULL; struct btrfs_super_block *super; int i; u64 transid = 0; u64 bytenr; /* we would like to check all the supers, but that would make * a btrfs mount succeed after a mkfs from a different FS. * So, we need to add a special mount option to scan for * later supers, using BTRFS_SUPER_MIRROR_MAX instead */ for (i = 0; i < 1; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode)) break; bh = __bread(bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE); if (!bh) continue; super = (struct btrfs_super_block *)bh->b_data; if (btrfs_super_bytenr(super) != bytenr || btrfs_super_magic(super) != BTRFS_MAGIC) { brelse(bh); continue; } if (!latest || btrfs_super_generation(super) > transid) { brelse(latest); latest = bh; transid = btrfs_super_generation(super); } else { brelse(bh); } } return latest; } /* * this should be called twice, once with wait == 0 and * once with wait == 1. When wait == 0 is done, all the buffer heads * we write are pinned. * * They are released when wait == 1 is done. * max_mirrors must be the same for both runs, and it indicates how * many supers on this one device should be written. * * max_mirrors == 0 means to write them all. */ static int write_dev_supers(struct btrfs_device *device, struct btrfs_super_block *sb, int do_barriers, int wait, int max_mirrors) { struct buffer_head *bh; int i; int ret; int errors = 0; u32 crc; u64 bytenr; if (max_mirrors == 0) max_mirrors = BTRFS_SUPER_MIRROR_MAX; for (i = 0; i < max_mirrors; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->commit_total_bytes) break; if (wait) { bh = __find_get_block(device->bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE); if (!bh) { errors++; continue; } wait_on_buffer(bh); if (!buffer_uptodate(bh)) errors++; /* drop our reference */ brelse(bh); /* drop the reference from the wait == 0 run */ brelse(bh); continue; } else { btrfs_set_super_bytenr(sb, bytenr); crc = ~(u32)0; crc = btrfs_csum_data((char *)sb + BTRFS_CSUM_SIZE, crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); btrfs_csum_final(crc, sb->csum); /* * one reference for us, and we leave it for the * caller */ bh = __getblk(device->bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE); if (!bh) { printk(KERN_ERR "BTRFS: couldn't get super " "buffer head for bytenr %Lu\n", bytenr); errors++; continue; } memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); /* one reference for submit_bh */ get_bh(bh); set_buffer_uptodate(bh); lock_buffer(bh); bh->b_end_io = btrfs_end_buffer_write_sync; bh->b_private = device; } /* * we fua the first super. The others we allow * to go down lazy. */ if (i == 0) ret = btrfsic_submit_bh(WRITE_FUA, bh); else ret = btrfsic_submit_bh(WRITE_SYNC, bh); if (ret) errors++; } return errors < i ? 0 : -1; } /* * endio for the write_dev_flush, this will wake anyone waiting * for the barrier when it is done */ static void btrfs_end_empty_barrier(struct bio *bio, int err) { if (err) clear_bit(BIO_UPTODATE, &bio->bi_flags); if (bio->bi_private) complete(bio->bi_private); bio_put(bio); } /* * trigger flushes for one the devices. If you pass wait == 0, the flushes are * sent down. With wait == 1, it waits for the previous flush. * * any device where the flush fails with eopnotsupp are flagged as not-barrier * capable */ static int write_dev_flush(struct btrfs_device *device, int wait) { struct bio *bio; int ret = 0; if (device->nobarriers) return 0; if (wait) { bio = device->flush_bio; if (!bio) return 0; wait_for_completion(&device->flush_wait); if (!bio_flagged(bio, BIO_UPTODATE)) { ret = -EIO; btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS); } /* drop the reference from the wait == 0 run */ bio_put(bio); device->flush_bio = NULL; return ret; } /* * one reference for us, and we leave it for the * caller */ device->flush_bio = NULL; bio = btrfs_io_bio_alloc(GFP_NOFS, 0); if (!bio) return -ENOMEM; bio->bi_end_io = btrfs_end_empty_barrier; bio->bi_bdev = device->bdev; init_completion(&device->flush_wait); bio->bi_private = &device->flush_wait; device->flush_bio = bio; bio_get(bio); btrfsic_submit_bio(WRITE_FLUSH, bio); return 0; } /* * send an empty flush down to each device in parallel, * then wait for them */ static int barrier_all_devices(struct btrfs_fs_info *info) { struct list_head *head; struct btrfs_device *dev; int errors_send = 0; int errors_wait = 0; int ret; /* send down all the barriers */ head = &info->fs_devices->devices; list_for_each_entry_rcu(dev, head, dev_list) { if (dev->missing) continue; if (!dev->bdev) { errors_send++; continue; } if (!dev->in_fs_metadata || !dev->writeable) continue; ret = write_dev_flush(dev, 0); if (ret) errors_send++; } /* wait for all the barriers */ list_for_each_entry_rcu(dev, head, dev_list) { if (dev->missing) continue; if (!dev->bdev) { errors_wait++; continue; } if (!dev->in_fs_metadata || !dev->writeable) continue; ret = write_dev_flush(dev, 1); if (ret) errors_wait++; } if (errors_send > info->num_tolerated_disk_barrier_failures || errors_wait > info->num_tolerated_disk_barrier_failures) return -EIO; return 0; } int btrfs_calc_num_tolerated_disk_barrier_failures( struct btrfs_fs_info *fs_info) { struct btrfs_ioctl_space_info space; struct btrfs_space_info *sinfo; u64 types[] = {BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_SYSTEM, BTRFS_BLOCK_GROUP_METADATA, BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; int num_types = 4; int i; int c; int num_tolerated_disk_barrier_failures = (int)fs_info->fs_devices->num_devices; for (i = 0; i < num_types; i++) { struct btrfs_space_info *tmp; sinfo = NULL; rcu_read_lock(); list_for_each_entry_rcu(tmp, &fs_info->space_info, list) { if (tmp->flags == types[i]) { sinfo = tmp; break; } } rcu_read_unlock(); if (!sinfo) continue; down_read(&sinfo->groups_sem); for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { if (!list_empty(&sinfo->block_groups[c])) { u64 flags; btrfs_get_block_group_info( &sinfo->block_groups[c], &space); if (space.total_bytes == 0 || space.used_bytes == 0) continue; flags = space.flags; /* * return * 0: if dup, single or RAID0 is configured for * any of metadata, system or data, else * 1: if RAID5 is configured, or if RAID1 or * RAID10 is configured and only two mirrors * are used, else * 2: if RAID6 is configured, else * num_mirrors - 1: if RAID1 or RAID10 is * configured and more than * 2 mirrors are used. */ if (num_tolerated_disk_barrier_failures > 0 && ((flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0)) || ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0))) num_tolerated_disk_barrier_failures = 0; else if (num_tolerated_disk_barrier_failures > 1) { if (flags & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID10)) { num_tolerated_disk_barrier_failures = 1; } else if (flags & BTRFS_BLOCK_GROUP_RAID6) { num_tolerated_disk_barrier_failures = 2; } } } } up_read(&sinfo->groups_sem); } return num_tolerated_disk_barrier_failures; } static int write_all_supers(struct btrfs_root *root, int max_mirrors) { struct list_head *head; struct btrfs_device *dev; struct btrfs_super_block *sb; struct btrfs_dev_item *dev_item; int ret; int do_barriers; int max_errors; int total_errors = 0; u64 flags; do_barriers = !btrfs_test_opt(root, NOBARRIER); backup_super_roots(root->fs_info); sb = root->fs_info->super_for_commit; dev_item = &sb->dev_item; mutex_lock(&root->fs_info->fs_devices->device_list_mutex); head = &root->fs_info->fs_devices->devices; max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1; if (do_barriers) { ret = barrier_all_devices(root->fs_info); if (ret) { mutex_unlock( &root->fs_info->fs_devices->device_list_mutex); btrfs_error(root->fs_info, ret, "errors while submitting device barriers."); return ret; } } list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) { total_errors++; continue; } if (!dev->in_fs_metadata || !dev->writeable) continue; btrfs_set_stack_device_generation(dev_item, 0); btrfs_set_stack_device_type(dev_item, dev->type); btrfs_set_stack_device_id(dev_item, dev->devid); btrfs_set_stack_device_total_bytes(dev_item, dev->commit_total_bytes); btrfs_set_stack_device_bytes_used(dev_item, dev->commit_bytes_used); btrfs_set_stack_device_io_align(dev_item, dev->io_align); btrfs_set_stack_device_io_width(dev_item, dev->io_width); btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE); flags = btrfs_super_flags(sb); btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors); if (ret) total_errors++; } if (total_errors > max_errors) { btrfs_err(root->fs_info, "%d errors while writing supers", total_errors); mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); /* FUA is masked off if unsupported and can't be the reason */ btrfs_error(root->fs_info, -EIO, "%d errors while writing supers", total_errors); return -EIO; } total_errors = 0; list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) continue; if (!dev->in_fs_metadata || !dev->writeable) continue; ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors); if (ret) total_errors++; } mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (total_errors > max_errors) { btrfs_error(root->fs_info, -EIO, "%d errors while writing supers", total_errors); return -EIO; } return 0; } int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root *root, int max_mirrors) { return write_all_supers(root, max_mirrors); } /* Drop a fs root from the radix tree and free it. */ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) { spin_lock(&fs_info->fs_roots_radix_lock); radix_tree_delete(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid); spin_unlock(&fs_info->fs_roots_radix_lock); if (btrfs_root_refs(&root->root_item) == 0) synchronize_srcu(&fs_info->subvol_srcu); if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) btrfs_free_log(NULL, root); if (root->free_ino_pinned) __btrfs_remove_free_space_cache(root->free_ino_pinned); if (root->free_ino_ctl) __btrfs_remove_free_space_cache(root->free_ino_ctl); free_fs_root(root); } static void free_fs_root(struct btrfs_root *root) { iput(root->ino_cache_inode); WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); btrfs_free_block_rsv(root, root->orphan_block_rsv); root->orphan_block_rsv = NULL; if (root->anon_dev) free_anon_bdev(root->anon_dev); if (root->subv_writers) btrfs_free_subvolume_writers(root->subv_writers); free_extent_buffer(root->node); free_extent_buffer(root->commit_root); kfree(root->free_ino_ctl); kfree(root->free_ino_pinned); kfree(root->name); btrfs_put_fs_root(root); } void btrfs_free_fs_root(struct btrfs_root *root) { free_fs_root(root); } int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) { u64 root_objectid = 0; struct btrfs_root *gang[8]; int i = 0; int err = 0; unsigned int ret = 0; int index; while (1) { index = srcu_read_lock(&fs_info->subvol_srcu); ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, root_objectid, ARRAY_SIZE(gang)); if (!ret) { srcu_read_unlock(&fs_info->subvol_srcu, index); break; } root_objectid = gang[ret - 1]->root_key.objectid + 1; for (i = 0; i < ret; i++) { /* Avoid to grab roots in dead_roots */ if (btrfs_root_refs(&gang[i]->root_item) == 0) { gang[i] = NULL; continue; } /* grab all the search result for later use */ gang[i] = btrfs_grab_fs_root(gang[i]); } srcu_read_unlock(&fs_info->subvol_srcu, index); for (i = 0; i < ret; i++) { if (!gang[i]) continue; root_objectid = gang[i]->root_key.objectid; err = btrfs_orphan_cleanup(gang[i]); if (err) break; btrfs_put_fs_root(gang[i]); } root_objectid++; } /* release the uncleaned roots due to error */ for (; i < ret; i++) { if (gang[i]) btrfs_put_fs_root(gang[i]); } return err; } int btrfs_commit_super(struct btrfs_root *root) { struct btrfs_trans_handle *trans; mutex_lock(&root->fs_info->cleaner_mutex); btrfs_run_delayed_iputs(root); mutex_unlock(&root->fs_info->cleaner_mutex); wake_up_process(root->fs_info->cleaner_kthread); /* wait until ongoing cleanup work done */ down_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem); trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); return btrfs_commit_transaction(trans, root); } void close_ctree(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; int ret; fs_info->closing = 1; smp_mb(); /* wait for the uuid_scan task to finish */ down(&fs_info->uuid_tree_rescan_sem); /* avoid complains from lockdep et al., set sem back to initial state */ up(&fs_info->uuid_tree_rescan_sem); /* pause restriper - we want to resume on mount */ btrfs_pause_balance(fs_info); btrfs_dev_replace_suspend_for_unmount(fs_info); btrfs_scrub_cancel(fs_info); /* wait for any defraggers to finish */ wait_event(fs_info->transaction_wait, (atomic_read(&fs_info->defrag_running) == 0)); /* clear out the rbtree of defraggable inodes */ btrfs_cleanup_defrag_inodes(fs_info); cancel_work_sync(&fs_info->async_reclaim_work); if (!(fs_info->sb->s_flags & MS_RDONLY)) { ret = btrfs_commit_super(root); if (ret) btrfs_err(fs_info, "commit super ret %d", ret); } if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) btrfs_error_commit_super(root); kthread_stop(fs_info->transaction_kthread); kthread_stop(fs_info->cleaner_kthread); fs_info->closing = 2; smp_mb(); btrfs_free_qgroup_config(fs_info); if (percpu_counter_sum(&fs_info->delalloc_bytes)) { btrfs_info(fs_info, "at unmount delalloc count %lld", percpu_counter_sum(&fs_info->delalloc_bytes)); } btrfs_sysfs_remove_one(fs_info); btrfs_sysfs_remove_fsid(fs_info->fs_devices); btrfs_free_fs_roots(fs_info); btrfs_put_block_group_cache(fs_info); btrfs_free_block_groups(fs_info); /* * we must make sure there is not any read request to * submit after we stopping all workers. */ invalidate_inode_pages2(fs_info->btree_inode->i_mapping); btrfs_stop_all_workers(fs_info); fs_info->open = 0; free_root_pointers(fs_info, 1); iput(fs_info->btree_inode); #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY if (btrfs_test_opt(root, CHECK_INTEGRITY)) btrfsic_unmount(root, fs_info->fs_devices); #endif btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); percpu_counter_destroy(&fs_info->dirty_metadata_bytes); percpu_counter_destroy(&fs_info->delalloc_bytes); percpu_counter_destroy(&fs_info->bio_counter); bdi_destroy(&fs_info->bdi); cleanup_srcu_struct(&fs_info->subvol_srcu); btrfs_free_stripe_hash_table(fs_info); __btrfs_free_block_rsv(root->orphan_block_rsv); root->orphan_block_rsv = NULL; lock_chunks(root); while (!list_empty(&fs_info->pinned_chunks)) { struct extent_map *em; em = list_first_entry(&fs_info->pinned_chunks, struct extent_map, list); list_del_init(&em->list); free_extent_map(em); } unlock_chunks(root); } int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, int atomic) { int ret; struct inode *btree_inode = buf->pages[0]->mapping->host; ret = extent_buffer_uptodate(buf); if (!ret) return ret; ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, parent_transid, atomic); if (ret == -EAGAIN) return ret; return !ret; } int btrfs_set_buffer_uptodate(struct extent_buffer *buf) { return set_extent_buffer_uptodate(buf); } void btrfs_mark_buffer_dirty(struct extent_buffer *buf) { struct btrfs_root *root; u64 transid = btrfs_header_generation(buf); int was_dirty; #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS /* * This is a fast path so only do this check if we have sanity tests * enabled. Normal people shouldn't be marking dummy buffers as dirty * outside of the sanity tests. */ if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags))) return; #endif root = BTRFS_I(buf->pages[0]->mapping->host)->root; btrfs_assert_tree_locked(buf); if (transid != root->fs_info->generation) WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, " "found %llu running %llu\n", buf->start, transid, root->fs_info->generation); was_dirty = set_extent_buffer_dirty(buf); if (!was_dirty) __percpu_counter_add(&root->fs_info->dirty_metadata_bytes, buf->len, root->fs_info->dirty_metadata_batch); #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) { btrfs_print_leaf(root, buf); ASSERT(0); } #endif } static void __btrfs_btree_balance_dirty(struct btrfs_root *root, int flush_delayed) { /* * looks as though older kernels can get into trouble with * this code, they end up stuck in balance_dirty_pages forever */ int ret; if (current->flags & PF_MEMALLOC) return; if (flush_delayed) btrfs_balance_delayed_items(root); ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes, BTRFS_DIRTY_METADATA_THRESH); if (ret > 0) { balance_dirty_pages_ratelimited( root->fs_info->btree_inode->i_mapping); } return; } void btrfs_btree_balance_dirty(struct btrfs_root *root) { __btrfs_btree_balance_dirty(root, 1); } void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root) { __btrfs_btree_balance_dirty(root, 0); } int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) { struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; return btree_read_extent_buffer_pages(root, buf, 0, parent_transid); } static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, int read_only) { struct btrfs_super_block *sb = fs_info->super_copy; int ret = 0; if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n", btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); ret = -EINVAL; } if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n", btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); ret = -EINVAL; } if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n", btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); ret = -EINVAL; } /* * The common minimum, we don't know if we can trust the nodesize/sectorsize * items yet, they'll be verified later. Issue just a warning. */ if (!IS_ALIGNED(btrfs_super_root(sb), 4096)) printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", btrfs_super_root(sb)); if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096)) printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n", btrfs_super_chunk_root(sb)); if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096)) printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n", btrfs_super_log_root(sb)); /* * Check the lower bound, the alignment and other constraints are * checked later. */ if (btrfs_super_nodesize(sb) < 4096) { printk(KERN_ERR "BTRFS: nodesize too small: %u < 4096\n", btrfs_super_nodesize(sb)); ret = -EINVAL; } if (btrfs_super_sectorsize(sb) < 4096) { printk(KERN_ERR "BTRFS: sectorsize too small: %u < 4096\n", btrfs_super_sectorsize(sb)); ret = -EINVAL; } if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) { printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n", fs_info->fsid, sb->dev_item.fsid); ret = -EINVAL; } /* * Hint to catch really bogus numbers, bitflips or so, more exact checks are * done later */ if (btrfs_super_num_devices(sb) > (1UL << 31)) printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", btrfs_super_num_devices(sb)); if (btrfs_super_num_devices(sb) == 0) { printk(KERN_ERR "BTRFS: number of devices is 0\n"); ret = -EINVAL; } if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) { printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n", btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); ret = -EINVAL; } /* * Obvious sys_chunk_array corruptions, it must hold at least one key * and one chunk */ if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n", btrfs_super_sys_array_size(sb), BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); ret = -EINVAL; } if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) + sizeof(struct btrfs_chunk)) { printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n", btrfs_super_sys_array_size(sb), sizeof(struct btrfs_disk_key) + sizeof(struct btrfs_chunk)); ret = -EINVAL; } /* * The generation is a global counter, we'll trust it more than the others * but it's still possible that it's the one that's wrong. */ if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) printk(KERN_WARNING "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n", btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb)); if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) && btrfs_super_cache_generation(sb) != (u64)-1) printk(KERN_WARNING "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n", btrfs_super_generation(sb), btrfs_super_cache_generation(sb)); return ret; } static void btrfs_error_commit_super(struct btrfs_root *root) { mutex_lock(&root->fs_info->cleaner_mutex); btrfs_run_delayed_iputs(root); mutex_unlock(&root->fs_info->cleaner_mutex); down_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem); /* cleanup FS via transaction */ btrfs_cleanup_transaction(root); } static void btrfs_destroy_ordered_extents(struct btrfs_root *root) { struct btrfs_ordered_extent *ordered; spin_lock(&root->ordered_extent_lock); /* * This will just short circuit the ordered completion stuff which will * make sure the ordered extent gets properly cleaned up. */ list_for_each_entry(ordered, &root->ordered_extents, root_extent_list) set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); spin_unlock(&root->ordered_extent_lock); } static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) { struct btrfs_root *root; struct list_head splice; INIT_LIST_HEAD(&splice); spin_lock(&fs_info->ordered_root_lock); list_splice_init(&fs_info->ordered_roots, &splice); while (!list_empty(&splice)) { root = list_first_entry(&splice, struct btrfs_root, ordered_root); list_move_tail(&root->ordered_root, &fs_info->ordered_roots); spin_unlock(&fs_info->ordered_root_lock); btrfs_destroy_ordered_extents(root); cond_resched(); spin_lock(&fs_info->ordered_root_lock); } spin_unlock(&fs_info->ordered_root_lock); } static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, struct btrfs_root *root) { struct rb_node *node; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_node *ref; int ret = 0; delayed_refs = &trans->delayed_refs; spin_lock(&delayed_refs->lock); if (atomic_read(&delayed_refs->num_entries) == 0) { spin_unlock(&delayed_refs->lock); btrfs_info(root->fs_info, "delayed_refs has NO entry"); return ret; } while ((node = rb_first(&delayed_refs->href_root)) != NULL) { struct btrfs_delayed_ref_head *head; struct btrfs_delayed_ref_node *tmp; bool pin_bytes = false; head = rb_entry(node, struct btrfs_delayed_ref_head, href_node); if (!mutex_trylock(&head->mutex)) { atomic_inc(&head->node.refs); spin_unlock(&delayed_refs->lock); mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); spin_lock(&delayed_refs->lock); continue; } spin_lock(&head->lock); list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list, list) { ref->in_tree = 0; list_del(&ref->list); atomic_dec(&delayed_refs->num_entries); btrfs_put_delayed_ref(ref); } if (head->must_insert_reserved) pin_bytes = true; btrfs_free_delayed_extent_op(head->extent_op); delayed_refs->num_heads--; if (head->processing == 0) delayed_refs->num_heads_ready--; atomic_dec(&delayed_refs->num_entries); head->node.in_tree = 0; rb_erase(&head->href_node, &delayed_refs->href_root); spin_unlock(&head->lock); spin_unlock(&delayed_refs->lock); mutex_unlock(&head->mutex); if (pin_bytes) btrfs_pin_extent(root, head->node.bytenr, head->node.num_bytes, 1); btrfs_put_delayed_ref(&head->node); cond_resched(); spin_lock(&delayed_refs->lock); } spin_unlock(&delayed_refs->lock); return ret; } static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) { struct btrfs_inode *btrfs_inode; struct list_head splice; INIT_LIST_HEAD(&splice); spin_lock(&root->delalloc_lock); list_splice_init(&root->delalloc_inodes, &splice); while (!list_empty(&splice)) { btrfs_inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes); list_del_init(&btrfs_inode->delalloc_inodes); clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, &btrfs_inode->runtime_flags); spin_unlock(&root->delalloc_lock); btrfs_invalidate_inodes(btrfs_inode->root); spin_lock(&root->delalloc_lock); } spin_unlock(&root->delalloc_lock); } static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) { struct btrfs_root *root; struct list_head splice; INIT_LIST_HEAD(&splice); spin_lock(&fs_info->delalloc_root_lock); list_splice_init(&fs_info->delalloc_roots, &splice); while (!list_empty(&splice)) { root = list_first_entry(&splice, struct btrfs_root, delalloc_root); list_del_init(&root->delalloc_root); root = btrfs_grab_fs_root(root); BUG_ON(!root); spin_unlock(&fs_info->delalloc_root_lock); btrfs_destroy_delalloc_inodes(root); btrfs_put_fs_root(root); spin_lock(&fs_info->delalloc_root_lock); } spin_unlock(&fs_info->delalloc_root_lock); } static int btrfs_destroy_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages, int mark) { int ret; struct extent_buffer *eb; u64 start = 0; u64 end; while (1) { ret = find_first_extent_bit(dirty_pages, start, &start, &end, mark, NULL); if (ret) break; clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); while (start <= end) { eb = btrfs_find_tree_block(root->fs_info, start); start += root->nodesize; if (!eb) continue; wait_on_extent_buffer_writeback(eb); if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) clear_extent_buffer_dirty(eb); free_extent_buffer_stale(eb); } } return ret; } static int btrfs_destroy_pinned_extent(struct btrfs_root *root, struct extent_io_tree *pinned_extents) { struct extent_io_tree *unpin; u64 start; u64 end; int ret; bool loop = true; unpin = pinned_extents; again: while (1) { ret = find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY, NULL); if (ret) break; clear_extent_dirty(unpin, start, end, GFP_NOFS); btrfs_error_unpin_extent_range(root, start, end); cond_resched(); } if (loop) { if (unpin == &root->fs_info->freed_extents[0]) unpin = &root->fs_info->freed_extents[1]; else unpin = &root->fs_info->freed_extents[0]; loop = false; goto again; } return 0; } static void btrfs_free_pending_ordered(struct btrfs_transaction *cur_trans, struct btrfs_fs_info *fs_info) { struct btrfs_ordered_extent *ordered; spin_lock(&fs_info->trans_lock); while (!list_empty(&cur_trans->pending_ordered)) { ordered = list_first_entry(&cur_trans->pending_ordered, struct btrfs_ordered_extent, trans_list); list_del_init(&ordered->trans_list); spin_unlock(&fs_info->trans_lock); btrfs_put_ordered_extent(ordered); spin_lock(&fs_info->trans_lock); } spin_unlock(&fs_info->trans_lock); } void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, struct btrfs_root *root) { btrfs_destroy_delayed_refs(cur_trans, root); cur_trans->state = TRANS_STATE_COMMIT_START; wake_up(&root->fs_info->transaction_blocked_wait); cur_trans->state = TRANS_STATE_UNBLOCKED; wake_up(&root->fs_info->transaction_wait); btrfs_free_pending_ordered(cur_trans, root->fs_info); btrfs_destroy_delayed_inodes(root); btrfs_assert_delayed_root_empty(root); btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, EXTENT_DIRTY); btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); cur_trans->state =TRANS_STATE_COMPLETED; wake_up(&cur_trans->commit_wait); /* memset(cur_trans, 0, sizeof(*cur_trans)); kmem_cache_free(btrfs_transaction_cachep, cur_trans); */ } static int btrfs_cleanup_transaction(struct btrfs_root *root) { struct btrfs_transaction *t; mutex_lock(&root->fs_info->transaction_kthread_mutex); spin_lock(&root->fs_info->trans_lock); while (!list_empty(&root->fs_info->trans_list)) { t = list_first_entry(&root->fs_info->trans_list, struct btrfs_transaction, list); if (t->state >= TRANS_STATE_COMMIT_START) { atomic_inc(&t->use_count); spin_unlock(&root->fs_info->trans_lock); btrfs_wait_for_commit(root, t->transid); btrfs_put_transaction(t); spin_lock(&root->fs_info->trans_lock); continue; } if (t == root->fs_info->running_transaction) { t->state = TRANS_STATE_COMMIT_DOING; spin_unlock(&root->fs_info->trans_lock); /* * We wait for 0 num_writers since we don't hold a trans * handle open currently for this transaction. */ wait_event(t->writer_wait, atomic_read(&t->num_writers) == 0); } else { spin_unlock(&root->fs_info->trans_lock); } btrfs_cleanup_one_transaction(t, root); spin_lock(&root->fs_info->trans_lock); if (t == root->fs_info->running_transaction) root->fs_info->running_transaction = NULL; list_del_init(&t->list); spin_unlock(&root->fs_info->trans_lock); btrfs_put_transaction(t); trace_btrfs_transaction_commit(root); spin_lock(&root->fs_info->trans_lock); } spin_unlock(&root->fs_info->trans_lock); btrfs_destroy_all_ordered_extents(root->fs_info); btrfs_destroy_delayed_inodes(root); btrfs_assert_delayed_root_empty(root); btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); btrfs_destroy_all_delalloc_inodes(root->fs_info); mutex_unlock(&root->fs_info->transaction_kthread_mutex); return 0; } static const struct extent_io_ops btree_extent_io_ops = { .readpage_end_io_hook = btree_readpage_end_io_hook, .readpage_io_failed_hook = btree_io_failed_hook, .submit_bio_hook = btree_submit_bio_hook, /* note we're sharing with inode.c for the merge bio hook */ .merge_bio_hook = btrfs_merge_bio_hook, };
gpl-2.0
ochiman/e405-kernel
drivers/gpu/drm/i915/intel_fb.c
114
7071
/* * Copyright © 2007 David Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * David Airlie */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/sysrq.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/vga_switcheroo.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" #include "drm_fb_helper.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, .fb_debug_enter = drm_fb_helper_debug_enter, .fb_debug_leave = drm_fb_helper_debug_leave, }; static int intelfb_create(struct intel_fbdev *ifbdev, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = ifbdev->helper.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd mode_cmd; struct drm_i915_gem_object *obj; struct device *device = &dev->pdev->dev; int size, ret; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) sizes->surface_bpp = 32; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.bpp = sizes->surface_bpp; mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64); mode_cmd.depth = sizes->surface_depth; size = mode_cmd.pitch * mode_cmd.height; size = ALIGN(size, PAGE_SIZE); obj = i915_gem_alloc_object(dev, size); if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } mutex_lock(&dev->struct_mutex); /* Flush everything out, we'll be doing GTT only from now on */ ret = intel_pin_and_fence_fb_obj(dev, obj, false); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; } info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; goto out_unpin; } info->par = ifbdev; ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) goto out_unpin; fb = &ifbdev->ifb.base; ifbdev->helper.fb = fb; ifbdev->helper.fbdev = info; strcpy(info->fix.id, "inteldrmfb"); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unpin; } /* setup aperture base/size for vesafb takeover */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto out_unpin; } info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_len = size; info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; } info->screen_size = size; // memset(info->screen_base, 0, size); drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", fb->width, fb->height, obj->gtt_offset, obj); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); return 0; out_unpin: i915_gem_object_unpin(obj); out_unref: drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); out: return ret; } static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; int new_fb = 0; int ret; if (!helper->fb) { ret = intelfb_create(ifbdev, sizes); if (ret) return ret; new_fb = 1; } return new_fb; } static struct drm_fb_helper_funcs intel_fb_helper_funcs = { .gamma_set = intel_crtc_fb_gamma_set, .gamma_get = intel_crtc_fb_gamma_get, .fb_probe = intel_fb_find_or_create_single, }; static void intel_fbdev_destroy(struct drm_device *dev, struct intel_fbdev *ifbdev) { struct fb_info *info; struct intel_framebuffer *ifb = &ifbdev->ifb; if (ifbdev->helper.fbdev) { info = ifbdev->helper.fbdev; unregister_framebuffer(info); iounmap(info->screen_base); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } drm_fb_helper_fini(&ifbdev->helper); drm_framebuffer_cleanup(&ifb->base); if (ifb->obj) { drm_gem_object_unreference_unlocked(&ifb->obj->base); ifb->obj = NULL; } } int intel_fbdev_init(struct drm_device *dev) { struct intel_fbdev *ifbdev; drm_i915_private_t *dev_priv = dev->dev_private; int ret; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); if (!ifbdev) return -ENOMEM; dev_priv->fbdev = ifbdev; ifbdev->helper.funcs = &intel_fb_helper_funcs; ret = drm_fb_helper_init(dev, &ifbdev->helper, dev_priv->num_pipe, INTELFB_CONN_LIMIT); if (ret) { kfree(ifbdev); return ret; } drm_fb_helper_single_add_all_connectors(&ifbdev->helper); drm_fb_helper_initial_config(&ifbdev->helper, 32); return 0; } void intel_fbdev_fini(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; if (!dev_priv->fbdev) return; intel_fbdev_destroy(dev, dev_priv->fbdev); kfree(dev_priv->fbdev); dev_priv->fbdev = NULL; } MODULE_LICENSE("GPL and additional rights"); void intel_fb_output_poll_changed(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); }
gpl-2.0
qwerty97/rk3066
drivers/media/video/gt2005.c
114
94309
/* o* Driver for MT9M001 CMOS Image Sensor from Micron * * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/videodev2.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/log2.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/circ_buf.h> #include <linux/miscdevice.h> #include <media/v4l2-common.h> #include <media/v4l2-chip-ident.h> #include <media/soc_camera.h> #include <plat/rk_camera.h> static int debug; module_param(debug, int, S_IRUGO|S_IWUSR); #define dprintk(level, fmt, arg...) do { \ if (debug >= level) \ printk(KERN_WARNING fmt , ## arg); } while (0) #define SENSOR_TR(format, ...) printk(KERN_ERR format, ## __VA_ARGS__) #define SENSOR_DG(format, ...) dprintk(1, format, ## __VA_ARGS__) #define _CONS(a,b) a##b #define CONS(a,b) _CONS(a,b) #define __STR(x) #x #define _STR(x) __STR(x) #define STR(x) _STR(x) #define MIN(x,y) ((x<y) ? x: y) #define MAX(x,y) ((x>y) ? x: y) /* Sensor Driver Configuration */ #define SENSOR_NAME RK29_CAM_SENSOR_GT2005 #define SENSOR_V4L2_IDENT V4L2_IDENT_GT2005 #define SENSOR_ID 0x5138 #define SENSOR_MIN_WIDTH 640 #define SENSOR_MIN_HEIGHT 480 #define SENSOR_MAX_WIDTH 1600 #define SENSOR_MAX_HEIGHT 1200 #define SENSOR_INIT_WIDTH 640 /* Sensor pixel size for sensor_init_data array */ #define SENSOR_INIT_HEIGHT 480 #define SENSOR_INIT_WINSEQADR sensor_vga #define SENSOR_INIT_PIXFMT V4L2_MBUS_FMT_YUYV8_2X8 #define CONFIG_SENSOR_WhiteBalance 1 #define CONFIG_SENSOR_Brightness 0 #define CONFIG_SENSOR_Contrast 0 #define CONFIG_SENSOR_Saturation 0 #define CONFIG_SENSOR_Effect 1 #define CONFIG_SENSOR_Scene 1 #define CONFIG_SENSOR_DigitalZoom 0 #define CONFIG_SENSOR_Focus 0 #define CONFIG_SENSOR_Exposure 0 #define CONFIG_SENSOR_Flash 1 #define CONFIG_SENSOR_Mirror 0 #define CONFIG_SENSOR_Flip 0 #define CONFIG_SENSOR_I2C_SPEED 250000 /* Hz */ /* Sensor write register continues by preempt_disable/preempt_enable for current process not be scheduled */ #define CONFIG_SENSOR_I2C_NOSCHED 0 #define CONFIG_SENSOR_I2C_RDWRCHK 0 #define SENSOR_BUS_PARAM (SOCAM_MASTER | SOCAM_PCLK_SAMPLE_RISING |\ SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |\ SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8 |SOCAM_MCLK_24MHZ) #define COLOR_TEMPERATURE_CLOUDY_DN 6500 #define COLOR_TEMPERATURE_CLOUDY_UP 8000 #define COLOR_TEMPERATURE_CLEARDAY_DN 5000 #define COLOR_TEMPERATURE_CLEARDAY_UP 6500 #define COLOR_TEMPERATURE_OFFICE_DN 3500 #define COLOR_TEMPERATURE_OFFICE_UP 5000 #define COLOR_TEMPERATURE_HOME_DN 2500 #define COLOR_TEMPERATURE_HOME_UP 3500 #define SENSOR_NAME_STRING(a) STR(CONS(SENSOR_NAME, a)) #define SENSOR_NAME_VARFUN(a) CONS(SENSOR_NAME, a) #define SENSOR_AF_IS_ERR (0x00<<0) #define SENSOR_AF_IS_OK (0x01<<0) #define SENSOR_INIT_IS_ERR (0x00<<28) #define SENSOR_INIT_IS_OK (0x01<<28) struct reginfo { u16 reg; u8 val; }; //flash off in fixed time to prevent from too hot , zyc struct flash_timer{ struct soc_camera_device *icd; struct hrtimer timer; }; static enum hrtimer_restart flash_off_func(struct hrtimer *timer); static struct flash_timer flash_off_timer; //for user defined if user want to customize the series , zyc #ifdef CONFIG_GT2005_USER_DEFINED_SERIES #include "gt2005_user_series.c" #else /* init 352X288 SVGA */ static struct reginfo sensor_init_data[] = { {0x0101 , 0x00}, {0x0103 , 0x00}, {0x0105 , 0x00}, {0x0106 , 0xF0}, {0x0107 , 0x00}, {0x0108 , 0x1C}, {0x0109 , 0x01}, {0x010A , 0x00}, {0x010B , 0x00}, {0x010C , 0x00}, {0x010D , 0x08}, {0x010E , 0x00}, {0x010F , 0x08}, {0x0110 , 0x06}, {0x0111 , 0x40}, {0x0112 , 0x04}, {0x0113 , 0xB0}, {0x0114 , 0x00}, {0x0115 , 0x00}, {0x0116 , 0x02}, {0x0117 , 0x00}, {0x0118 , 0x67}, {0x0119 , 0x02}, {0x011A , 0x04}, {0x011B , 0x01}, {0x011C , 0x01}, {0x011D , 0x02}, {0x011E , 0x00}, {0x011F , 0x00}, {0x0120 , 0x1C}, {0x0121 , 0x00}, {0x0122 , 0x04}, {0x0123 , 0x00}, {0x0124 , 0x00}, {0x0125 , 0x00}, {0x0126 , 0x00}, {0x0127 , 0x00}, {0x0128 , 0x00}, {0x0200 , 0x00}, {0x0201 , 0x00}, {0x0202 , 0x40}, {0x0203 , 0x00}, {0x0204 , 0x03}, {0x0205 , 0x1F}, {0x0206 , 0x0B}, {0x0207 , 0x20}, {0x0208 , 0x00}, {0x0209 , 0x2A}, {0x020A , 0x01}, {0x020B , 0x48}, {0x020C , 0x64}, {0x020D , 0xC8}, {0x020E , 0xBC}, {0x020F , 0x08}, {0x0210 , 0xD6}, {0x0211 , 0x00}, {0x0212 , 0x20}, {0x0213 , 0x81}, {0x0214 , 0x15}, {0x0215 , 0x00}, {0x0216 , 0x00}, {0x0217 , 0x00}, {0x0218 , 0x46}, {0x0219 , 0x30}, {0x021A , 0x03}, {0x021B , 0x28}, {0x021C , 0x02}, {0x021D , 0x60}, {0x021E , 0x00}, {0x021F , 0x00}, {0x0220 , 0x08}, {0x0221 , 0x08}, {0x0222 , 0x04}, {0x0223 , 0x00}, {0x0224 , 0x1F}, {0x0225 , 0x1E}, {0x0226 , 0x18}, {0x0227 , 0x1D}, {0x0228 , 0x1F}, {0x0229 , 0x1F}, {0x022A , 0x01}, {0x022B , 0x04}, {0x022C , 0x05}, {0x022D , 0x05}, {0x022E , 0x04}, {0x022F , 0x03}, {0x0230 , 0x02}, {0x0231 , 0x1F}, {0x0232 , 0x1A}, {0x0233 , 0x19}, {0x0234 , 0x19}, {0x0235 , 0x1B}, {0x0236 , 0x1F}, {0x0237 , 0x04}, {0x0238 , 0xEE}, {0x0239 , 0xFF}, {0x023A , 0x00}, {0x023B , 0x00}, {0x023C , 0x00}, {0x023D , 0x00}, {0x023E , 0x00}, {0x023F , 0x00}, {0x0240 , 0x00}, {0x0241 , 0x00}, {0x0242 , 0x00}, {0x0243 , 0x21}, {0x0244 , 0x42}, {0x0245 , 0x53}, {0x0246 , 0x54}, {0x0247 , 0x54}, {0x0248 , 0x54}, {0x0249 , 0x33}, {0x024A , 0x11}, {0x024B , 0x00}, {0x024C , 0x00}, {0x024D , 0xFF}, {0x024E , 0xEE}, {0x024F , 0xDD}, {0x0250 , 0x00}, {0x0251 , 0x00}, {0x0252 , 0x00}, {0x0253 , 0x00}, {0x0254 , 0x00}, {0x0255 , 0x00}, {0x0256 , 0x00}, {0x0257 , 0x00}, {0x0258 , 0x00}, {0x0259 , 0x00}, {0x025A , 0x00}, {0x025B , 0x00}, {0x025C , 0x00}, {0x025D , 0x00}, {0x025E , 0x00}, {0x025F , 0x00}, {0x0260 , 0x00}, {0x0261 , 0x00}, {0x0262 , 0x00}, {0x0263 , 0x00}, {0x0264 , 0x00}, {0x0265 , 0x00}, {0x0266 , 0x00}, {0x0267 , 0x00}, {0x0268 , 0x8F}, {0x0269 , 0xA3}, {0x026A , 0xB4}, {0x026B , 0x90}, {0x026C , 0x00}, {0x026D , 0xD0}, {0x026E , 0x60}, {0x026F , 0xA0}, {0x0270 , 0x40}, {0x0300 , 0x81}, {0x0301 , 0x80}, {0x0302 , 0x22}, {0x0303 , 0x06}, {0x0304 , 0x03}, {0x0305 , 0x83}, {0x0306 , 0x00}, {0x0307 , 0x22}, {0x0308 , 0x00}, {0x0309 , 0x55}, {0x030A , 0x55}, {0x030B , 0x55}, {0x030C , 0x54}, {0x030D , 0x1F}, {0x030E , 0x13}, {0x030F , 0x10}, {0x0310 , 0x04}, {0x0311 , 0xFF}, {0x0312 , 0x08}, {0x0313 , 0x28}, {0x0314 , 0x66}, {0x0315 , 0x16}, {0x0316 , 0x26}, {0x0317 , 0x02}, {0x0318 , 0x08}, {0x0319 , 0x0C}, {0x031A , 0x81}, {0x031B , 0x00}, {0x031C , 0x3D}, {0x031D , 0x00}, {0x031E , 0xF9}, {0x031F , 0x00}, {0x0320 , 0x24}, {0x0321 , 0x14}, {0x0322 , 0x1A}, {0x0323 , 0x24}, {0x0324 , 0x08}, {0x0325 , 0xF0}, {0x0326 , 0x30}, {0x0327 , 0x17}, {0x0328 , 0x11}, {0x0329 , 0x22}, {0x032A , 0x2F}, {0x032B , 0x21}, {0x032C , 0xDA}, {0x032D , 0x10}, {0x032E , 0xEA}, {0x032F , 0x18}, {0x0330 , 0x29}, {0x0331 , 0x25}, {0x0332 , 0x12}, {0x0333 , 0x0F}, {0x0334 , 0xE0}, {0x0335 , 0x13}, {0x0336 , 0xFF}, {0x0337 , 0x20}, {0x0338 , 0x46}, {0x0339 , 0x04}, {0x033A , 0x04}, {0x033B , 0xFF}, {0x033C , 0x01}, {0x033D , 0x00}, {0x033E , 0x03}, {0x033F , 0x28}, {0x0340 , 0x02}, {0x0341 , 0x60}, {0x0342 , 0xAC}, {0x0343 , 0x97}, {0x0344 , 0x7F}, {0x0400 , 0xE8}, {0x0401 , 0x40}, {0x0402 , 0x00}, {0x0403 , 0x00}, {0x0404 , 0xF8}, {0x0405 , 0x03}, {0x0406 , 0x03}, {0x0407 , 0x85}, {0x0408 , 0x44}, {0x0409 , 0x1F}, {0x040A , 0x40}, {0x040B , 0x33}, {0x040C , 0xA0}, {0x040D , 0x00}, {0x040E , 0x00}, {0x040F , 0x00}, {0x0410 , 0x0D}, {0x0411 , 0x0D}, {0x0412 , 0x0C}, {0x0413 , 0x04}, {0x0414 , 0x00}, {0x0415 , 0x00}, {0x0416 , 0x07}, {0x0417 , 0x09}, {0x0418 , 0x16}, {0x0419 , 0x14}, {0x041A , 0x11}, {0x041B , 0x14}, {0x041C , 0x07}, {0x041D , 0x07}, {0x041E , 0x06}, {0x041F , 0x02}, {0x0420 , 0x42}, {0x0421 , 0x42}, {0x0422 , 0x47}, {0x0423 , 0x39}, {0x0424 , 0x3E}, {0x0425 , 0x4D}, {0x0426 , 0x46}, {0x0427 , 0x3A}, {0x0428 , 0x21}, {0x0429 , 0x21}, {0x042A , 0x26}, {0x042B , 0x1C}, {0x042C , 0x25}, {0x042D , 0x25}, {0x042E , 0x28}, {0x042F , 0x20}, {0x0430 , 0x3E}, {0x0431 , 0x3E}, {0x0432 , 0x33}, {0x0433 , 0x2E}, {0x0434 , 0x54}, {0x0435 , 0x53}, {0x0436 , 0x3C}, {0x0437 , 0x51}, {0x0438 , 0x2B}, {0x0439 , 0x2B}, {0x043A , 0x38}, {0x043B , 0x22}, {0x043C , 0x3B}, {0x043D , 0x3B}, {0x043E , 0x31}, {0x043F , 0x37}, {0x0440 , 0x00}, {0x0441 , 0x4B}, {0x0442 , 0x00}, {0x0443 , 0x00}, {0x0444 , 0x31}, {0x0445 , 0x00}, {0x0446 , 0x00}, {0x0447 , 0x00}, {0x0448 , 0x00}, {0x0449 , 0x00}, {0x044A , 0x00}, {0x044D , 0xE0}, {0x044E , 0x05}, {0x044F , 0x07}, {0x0450 , 0x00}, {0x0451 , 0x00}, {0x0452 , 0x00}, {0x0453 , 0x00}, {0x0454 , 0x00}, {0x0455 , 0x00}, {0x0456 , 0x00}, {0x0457 , 0x00}, {0x0458 , 0x00}, {0x0459 , 0x00}, {0x045A , 0x00}, {0x045B , 0x00}, {0x045C , 0x00}, {0x045D , 0x00}, {0x045E , 0x00}, {0x045F , 0x00}, {0x0460 , 0x80}, {0x0461 , 0x10}, {0x0462 , 0x10}, {0x0463 , 0x10}, {0x0464 , 0x08}, {0x0465 , 0x08}, {0x0466 , 0x11}, {0x0467 , 0x09}, {0x0468 , 0x23}, {0x0469 , 0x2A}, {0x046A , 0x2A}, {0x046B , 0x47}, {0x046C , 0x52}, {0x046D , 0x42}, {0x046E , 0x36}, {0x046F , 0x46}, {0x0470 , 0x3A}, {0x0471 , 0x32}, {0x0472 , 0x32}, {0x0473 , 0x38}, {0x0474 , 0x3D}, {0x0475 , 0x2F}, {0x0476 , 0x29}, {0x0477 , 0x48}, {0x0600 , 0x00}, {0x0601 , 0x24}, {0x0602 , 0x45}, {0x0603 , 0x0E}, {0x0604 , 0x14}, {0x0605 , 0x2F}, {0x0606 , 0x01}, {0x0607 , 0x0E}, {0x0608 , 0x0E}, {0x0609 , 0x37}, {0x060A , 0x18}, {0x060B , 0xA0}, {0x060C , 0x20}, {0x060D , 0x07}, {0x060E , 0x47}, {0x060F , 0x90}, {0x0610 , 0x06}, {0x0611 , 0x0C}, {0x0612 , 0x28}, {0x0613 , 0x13}, {0x0614 , 0x0B}, {0x0615 , 0x10}, {0x0616 , 0x14}, {0x0617 , 0x19}, {0x0618 , 0x52}, {0x0619 , 0xA0}, {0x061A , 0x11}, {0x061B , 0x33}, {0x061C , 0x56}, {0x061D , 0x20}, {0x061E , 0x28}, {0x061F , 0x2B}, {0x0620 , 0x22}, {0x0621 , 0x11}, {0x0622 , 0x75}, {0x0623 , 0x49}, {0x0624 , 0x6E}, {0x0625 , 0x80}, {0x0626 , 0x02}, {0x0627 , 0x0C}, {0x0628 , 0x51}, {0x0629 , 0x25}, {0x062A , 0x01}, {0x062B , 0x3D}, {0x062C , 0x04}, {0x062D , 0x01}, {0x062E , 0x0C}, {0x062F , 0x2C}, {0x0630 , 0x0D}, {0x0631 , 0x14}, {0x0632 , 0x12}, {0x0633 , 0x34}, {0x0634 , 0x00}, {0x0635 , 0x00}, {0x0636 , 0x00}, {0x0637 , 0xB1}, {0x0638 , 0x22}, {0x0639 , 0x32}, {0x063A , 0x0E}, {0x063B , 0x18}, {0x063C , 0x88}, {0x0640 , 0xB2}, {0x0641 , 0xC0}, {0x0642 , 0x01}, {0x0643 , 0x26}, {0x0644 , 0x13}, {0x0645 , 0x88}, {0x0646 , 0x64}, {0x0647 , 0x00}, {0x0681 , 0x1B}, {0x0682 , 0xA0}, {0x0683 , 0x28}, {0x0684 , 0x00}, {0x0685 , 0xB0}, {0x0686 , 0x6F}, {0x0687 , 0x33}, {0x0688 , 0x1F}, {0x0689 , 0x44}, {0x068A , 0xA8}, {0x068B , 0x44}, {0x068C , 0x08}, {0x068D , 0x08}, {0x068E , 0x00}, {0x068F , 0x00}, {0x0690 , 0x01}, {0x0691 , 0x00}, {0x0692 , 0x01}, {0x0693 , 0x00}, {0x0694 , 0x00}, {0x0695 , 0x00}, {0x0696 , 0x00}, {0x0697 , 0x00}, {0x0698 , 0x2A}, {0x0699 , 0x80}, {0x069A , 0x1F}, {0x069B , 0x00}, {0x069C , 0x02}, {0x069D , 0xF5}, {0x069E , 0x03}, {0x069F , 0x6D}, {0x06A0 , 0x0C}, {0x06A1 , 0xB8}, {0x06A2 , 0x0D}, {0x06A3 , 0x74}, {0x06A4 , 0x00}, {0x06A5 , 0x2F}, {0x06A6 , 0x00}, {0x06A7 , 0x2F}, {0x0F00 , 0x00}, {0x0F01 , 0x00}, {0x0100 , 0x01}, {0x0102 , 0x02}, {0x0104 , 0x03}, {0x0101 , 0x02}, /////////////////////////// {0x020B , 0x48}, {0x020C , 0x64}, {0x040A , 0x40}, {0x040B , 0x33}, {0x0109 , 0x00}, {0x010A , 0x04}, {0x010B , 0x03}, #if 0 {0x0110 , 0x02}, {0x0111 , 0x80}, {0x0112 , 0x01}, {0x0113 , 0xe0}, #else {0x0110, 0x03}, {0x0111, 0x20}, {0x0112, 0x02}, {0x0113, 0x58}, #endif {0x0116 , 0x02}, {0x0118 , 0x40}, {0x0119 , 0x01}, {0x011a , 0x04}, {0x011B , 0x00}, {0x0313 , 0x35}, {0x0314 , 0x36}, {0x0315 , 0x16}, }; /* 1600X1200 UXGA */ static struct reginfo sensor_uxga[] = { {0x010c , 0x00}, {0x010d , 0x08}, {0x010e , 0x00}, {0x010f , 0x08}, {0x010a , 0x00}, {0x0110 , 0x06}, {0x0111 , 0x40}, {0x0112 , 0x04}, {0x0113 , 0xb0}, {0x0, 0x0}, }; /* 1280X1024 SXGA */ static struct reginfo sensor_sxga[] = { {0x010c , 0x00}, {0x010d , 0xa8}, {0x010e , 0x00}, {0x010f , 0x60}, {0x010a , 0x00}, {0x0110 , 0x05}, {0x0111 , 0x00}, {0x0112 , 0x04}, {0x0113 , 0x00}, {0x00, 0x00}, }; /* 800X600 SVGA*/ static struct reginfo sensor_svga[] = { #if 0 {0x0101, 0x00}, {0x0103, 0x00}, {0x0105, 0x00}, {0x0106, 0xF0}, {0x0107, 0x00}, {0x0108, 0x1C}, {0x0109, 0x01}, {0x010A, 0x00}, {0x010B, 0x00}, {0x010C, 0x00}, {0x010D, 0x08}, {0x010E, 0x00}, {0x010F, 0x08}, {0x0110, 0x06}, {0x0111, 0x40}, {0x0112, 0x04}, {0x0113, 0xB0}, {0x0114, 0x04}, {0x0115, 0x00}, {0x0116, 0x02}, {0x0117, 0x00}, {0x0118, 0x40}, {0x0119, 0x02}, {0x011A, 0x04}, {0x011B, 0x01}, {0x011C, 0x00}, {0x011D, 0x01}, {0x011E, 0x36}, {0x011F, 0x00}, {0x0120, 0x1C}, {0x0121, 0x00}, {0x0122, 0x04}, {0x0123, 0x00}, {0x0124, 0x00}, {0x0125, 0x00}, {0x0126, 0x00}, {0x0127, 0x00}, {0x0128, 0x00}, {0x0200, 0x1f}, {0x0201, 0x0c}, {0x0202, 0x38}, {0x0203, 0x00}, {0x0204, 0x03}, {0x0205, 0x1F}, {0x0206, 0x0B}, {0x0207, 0x20}, {0x0208, 0x00}, {0x0209, 0x2A}, {0x020A, 0x01}, {0x020B, 0x28}, {0x020C, 0x44}, {0x020D, 0xC8}, {0x020E, 0xBC}, {0x020F, 0x08}, {0x0210, 0xD6}, {0x0211, 0x00}, {0x0212, 0x20}, {0x0213, 0x81}, {0x0214, 0x15}, {0x0215, 0x00}, {0x0216, 0x00}, {0x0217, 0x00}, {0x0218, 0x46}, {0x0219, 0x30}, {0x021A, 0x03}, {0x021B, 0x28}, {0x021C, 0x02}, {0x021D, 0x60}, {0x021E, 0x00}, {0x021F, 0x00}, {0x0220, 0x08}, {0x0221, 0x08}, {0x0222, 0x04}, {0x0223, 0x00}, {0x0224, 0x1F}, {0x0225, 0x1E}, {0x0226, 0x18}, {0x0227, 0x1D}, {0x0228, 0x1F}, {0x0229, 0x1F}, {0x022A, 0x01}, {0x022B, 0x04}, {0x022C, 0x05}, {0x022D, 0x05}, {0x022E, 0x04}, {0x022F, 0x03}, {0x0230, 0x02}, {0x0231, 0x1F}, {0x0232, 0x1A}, {0x0233, 0x19}, {0x0234, 0x19}, {0x0235, 0x1B}, {0x0236, 0x1F}, {0x0237, 0x04}, {0x0238, 0xEE}, {0x0239, 0xFF}, {0x023A, 0x00}, {0x023B, 0x00}, {0x023C, 0x00}, {0x023D, 0x00}, {0x023E, 0x00}, {0x023F, 0x00}, {0x0240, 0x00}, {0x0241, 0x00}, {0x0242, 0x00}, {0x0243, 0x21}, {0x0244, 0x42}, {0x0245, 0x53}, {0x0246, 0x54}, {0x0247, 0x54}, {0x0248, 0x54}, {0x0249, 0x33}, {0x024A, 0x11}, {0x024B, 0x00}, {0x024C, 0x00}, {0x024D, 0xFF}, {0x024E, 0xEE}, {0x024F, 0xDD}, {0x0250, 0x00}, {0x0251, 0x00}, {0x0252, 0x00}, {0x0253, 0x00}, {0x0254, 0x00}, {0x0255, 0x00}, {0x0256, 0x00}, {0x0257, 0x00}, {0x0258, 0x00}, {0x0259, 0x00}, {0x025A, 0x00}, {0x025B, 0x00}, {0x025C, 0x00}, {0x025D, 0x00}, {0x025E, 0x00}, {0x025F, 0x00}, {0x0260, 0x00}, {0x0261, 0x00}, {0x0262, 0x00}, {0x0263, 0x00}, {0x0264, 0x00}, {0x0265, 0x00}, {0x0266, 0x00}, {0x0267, 0x00}, {0x0268, 0x8F}, {0x0269, 0xA3}, {0x026A, 0xB4}, {0x026B, 0x90}, {0x026C, 0x00}, {0x026D, 0xD0}, {0x026E, 0x60}, {0x026F, 0xA0}, {0x0270, 0x40}, {0x0300, 0x81}, {0x0301, 0x80}, {0x0302, 0x22}, {0x0303, 0x06}, {0x0304, 0x03}, {0x0305, 0x83}, {0x0306, 0x00}, {0x0307, 0x22}, {0x0308, 0x00}, {0x0309, 0x55}, {0x030A, 0x55}, {0x030B, 0x55}, {0x030C, 0x54}, {0x030D, 0x1F}, {0x030E, 0x0A}, {0x030F, 0x10}, {0x0310, 0x04}, {0x0311, 0xFF}, {0x0312, 0x08}, {0x0313, 0x35}, {0x0314, 0x36}, {0x0315, 0x15}, {0x0316, 0x26}, {0x0317, 0x02}, {0x0318, 0x08}, {0x0319, 0x0C}, {0x031A, 0x81}, {0x031B, 0x00}, {0x031C, 0x3D}, {0x031D, 0x00}, {0x031E, 0xF9}, {0x031F, 0x00}, {0x0320, 0x24}, {0x0321, 0x14}, {0x0322, 0x1A}, {0x0323, 0x24}, {0x0324, 0x08}, {0x0325, 0xF0}, {0x0326, 0x30}, {0x0327, 0x17}, {0x0328, 0x11}, {0x0329, 0x22}, {0x032A, 0x2F}, {0x032B, 0x21}, {0x032C, 0xDA}, {0x032D, 0x10}, {0x032E, 0xEA}, {0x032F, 0x18}, {0x0330, 0x29}, {0x0331, 0x25}, {0x0332, 0x12}, {0x0333, 0x0F}, {0x0334, 0xE0}, {0x0335, 0x13}, {0x0336, 0xFF}, {0x0337, 0x20}, {0x0338, 0x46}, {0x0339, 0x04}, {0x033A, 0x04}, {0x033B, 0xFF}, {0x033C, 0x01}, {0x033D, 0x00}, {0x033E, 0x03}, {0x033F, 0x28}, {0x0340, 0x02}, {0x0341, 0x60}, {0x0342, 0xAC}, {0x0343, 0x97}, {0x0344, 0x7F}, {0x0400, 0xE8}, {0x0401, 0x40}, {0x0402, 0x00}, {0x0403, 0x00}, {0x0404, 0xF8}, {0x0405, 0x03}, {0x0406, 0x03}, {0x0407, 0x85}, {0x0408, 0x44}, {0x0409, 0x1F}, {0x040A, 0x40}, {0x040B, 0x33}, {0x040C, 0xA0}, {0x040D, 0x00}, {0x040E, 0x00}, {0x040F, 0x00}, {0x0410, 0x0D}, {0x0411, 0x0D}, {0x0412, 0x0C}, {0x0413, 0x04}, {0x0414, 0x00}, {0x0415, 0x00}, {0x0416, 0x07}, {0x0417, 0x09}, {0x0418, 0x16}, {0x0419, 0x14}, {0x041A, 0x11}, {0x041B, 0x14}, {0x041C, 0x07}, {0x041D, 0x07}, {0x041E, 0x06}, {0x041F, 0x02}, {0x0420, 0x42}, {0x0421, 0x42}, {0x0422, 0x47}, {0x0423, 0x39}, {0x0424, 0x3E}, {0x0425, 0x4D}, {0x0426, 0x46}, {0x0427, 0x3A}, {0x0428, 0x21}, {0x0429, 0x21}, {0x042A, 0x26}, {0x042B, 0x1C}, {0x042C, 0x25}, {0x042D, 0x25}, {0x042E, 0x28}, {0x042F, 0x20}, {0x0430, 0x3E}, {0x0431, 0x3E}, {0x0432, 0x33}, {0x0433, 0x2E}, {0x0434, 0x54}, {0x0435, 0x53}, {0x0436, 0x3C}, {0x0437, 0x51}, {0x0438, 0x2B}, {0x0439, 0x2B}, {0x043A, 0x38}, {0x043B, 0x22}, {0x043C, 0x3B}, {0x043D, 0x3B}, {0x043E, 0x31}, {0x043F, 0x37}, {0x0440, 0x00}, {0x0441, 0x4B}, {0x0442, 0x00}, {0x0443, 0x00}, {0x0444, 0x31}, {0x0445, 0x00}, {0x0446, 0x00}, {0x0447, 0x00}, {0x0448, 0x00}, {0x0449, 0x00}, {0x044A, 0x00}, {0x044D, 0xE0}, {0x044E, 0x05}, {0x044F, 0x07}, {0x0450, 0x00}, {0x0451, 0x00}, {0x0452, 0x00}, {0x0453, 0x00}, {0x0454, 0x00}, {0x0455, 0x00}, {0x0456, 0x00}, {0x0457, 0x00}, {0x0458, 0x00}, {0x0459, 0x00}, {0x045A, 0x00}, {0x045B, 0x00}, {0x045C, 0x00}, {0x045D, 0x00}, {0x045E, 0x00}, {0x045F, 0x00}, {0x0460, 0x80}, {0x0461, 0x10}, {0x0462, 0x10}, {0x0463, 0x10}, {0x0464, 0x08}, {0x0465, 0x08}, {0x0466, 0x11}, {0x0467, 0x09}, {0x0468, 0x23}, {0x0469, 0x2A}, {0x046A, 0x2A}, {0x046B, 0x47}, {0x046C, 0x52}, {0x046D, 0x42}, {0x046E, 0x36}, {0x046F, 0x46}, {0x0470, 0x3A}, {0x0471, 0x32}, {0x0472, 0x32}, {0x0473, 0x38}, {0x0474, 0x3D}, {0x0475, 0x2F}, {0x0476, 0x29}, {0x0477, 0x48}, {0x0600, 0x00}, {0x0601, 0x24}, {0x0602, 0x45}, {0x0603, 0x0E}, {0x0604, 0x14}, {0x0605, 0x2F}, {0x0606, 0x01}, {0x0607, 0x0E}, {0x0608, 0x0E}, {0x0609, 0x37}, {0x060A, 0x18}, {0x060B, 0xA0}, {0x060C, 0x20}, {0x060D, 0x07}, {0x060E, 0x47}, {0x060F, 0x90}, {0x0610, 0x06}, {0x0611, 0x0C}, {0x0612, 0x28}, {0x0613, 0x13}, {0x0614, 0x0B}, {0x0615, 0x10}, {0x0616, 0x14}, {0x0617, 0x19}, {0x0618, 0x52}, {0x0619, 0xA0}, {0x061A, 0x11}, {0x061B, 0x33}, {0x061C, 0x56}, {0x061D, 0x20}, {0x061E, 0x28}, {0x061F, 0x2B}, {0x0620, 0x22}, {0x0621, 0x11}, {0x0622, 0x75}, {0x0623, 0x49}, {0x0624, 0x6E}, {0x0625, 0x80}, {0x0626, 0x02}, {0x0627, 0x0C}, {0x0628, 0x51}, {0x0629, 0x25}, {0x062A, 0x01}, {0x062B, 0x3D}, {0x062C, 0x04}, {0x062D, 0x01}, {0x062E, 0x0C}, {0x062F, 0x2C}, {0x0630, 0x0D}, {0x0631, 0x14}, {0x0632, 0x12}, {0x0633, 0x34}, {0x0634, 0x00}, {0x0635, 0x00}, {0x0636, 0x00}, {0x0637, 0xB1}, {0x0638, 0x22}, {0x0639, 0x32}, {0x063A, 0x0E}, {0x063B, 0x18}, {0x063C, 0x88}, {0x0640, 0xB2}, {0x0641, 0xC0}, {0x0642, 0x01}, {0x0643, 0x26}, {0x0644, 0x13}, {0x0645, 0x88}, {0x0646, 0x64}, {0x0647, 0x00}, {0x0681, 0x1B}, {0x0682, 0xA0}, {0x0683, 0x28}, {0x0684, 0x00}, {0x0685, 0xB0}, {0x0686, 0x6F}, {0x0687, 0x33}, {0x0688, 0x1F}, {0x0689, 0x44}, {0x068A, 0xA8}, {0x068B, 0x44}, {0x068C, 0x08}, {0x068D, 0x08}, {0x068E, 0x00}, {0x068F, 0x00}, {0x0690, 0x01}, {0x0691, 0x00}, {0x0692, 0x01}, {0x0693, 0x00}, {0x0694, 0x00}, {0x0695, 0x00}, {0x0696, 0x00}, {0x0697, 0x00}, {0x0698, 0x2A}, {0x0699, 0x80}, {0x069A, 0x1F}, {0x069B, 0x00}, {0x069C, 0x02}, {0x069D, 0xF5}, {0x069E, 0x03}, {0x069F, 0x6D}, {0x06A0, 0x0C}, {0x06A1, 0xB8}, {0x06A2, 0x0D}, {0x06A3, 0x74}, {0x06A4, 0x00}, {0x06A5, 0x2F}, {0x06A6, 0x00}, {0x06A7, 0x2F}, {0x0F00, 0x00}, {0x0F01, 0x00}, {0x0100, 0x01}, {0x0102, 0x02}, {0x0104, 0x03}, #endif {0x020B, 0x48}, {0x020C, 0x64}, {0x040A, 0x40}, {0x040B, 0x33}, {0x010c , 0x00}, {0x010d , 0x08}, {0x010e , 0x00}, {0x010f , 0x08}, {0x010a , 0x00}, {0x0109, 0x00}, {0x010A, 0x04}, {0x010B, 0x03}, {0x0110, 0x03}, {0x0111, 0x20}, {0x0112, 0x02}, {0x0113, 0x58}, {0x0116, 0x02}, {0x0118, 0x40}, {0x0119, 0x02}, {0x011a, 0x04}, {0x011B, 0x01}, {0x0, 0x0}, }; /* 640X480 VGA */ static struct reginfo sensor_vga[] = { {0x020B , 0x48}, {0x020C , 0x64}, {0x040A , 0x40}, {0x040B , 0x33}, {0x0109 , 0x00}, {0x010A , 0x04}, {0x010B , 0x03}, {0x010c , 0x00}, {0x010d , 0xa8}, {0x010e , 0x00}, {0x010f , 0x60}, {0x010a , 0x04}, #if 1 {0x0110 , 0x02}, {0x0111 , 0x80}, {0x0112 , 0x01}, {0x0113 , 0xe0}, #else {0x0110, 0x03}, {0x0111, 0x20}, {0x0112, 0x02}, {0x0113, 0x58}, #endif {0x0116 , 0x02}, {0x0118 , 0x40}, {0x0119 , 0x01}, {0x011a , 0x04}, {0x011B , 0x00}, {0x0313 , 0x35}, {0x0314 , 0x36}, {0x0315 , 0x16}, {0x0, 0x0}, }; /* 352X288 CIF */ static struct reginfo sensor_cif[] = { {0x0, 0x0}, }; /* 320*240 QVGA */ static struct reginfo sensor_qvga[] = { {0x0, 0x0}, }; /* 176X144 QCIF*/ static struct reginfo sensor_qcif[] = { {0x0, 0x0}, }; #endif #if 0 /* 160X120 QQVGA*/ static struct reginfo gt2005_qqvga[] = { {0x300E, 0x34}, {0x3011, 0x01}, {0x3012, 0x10}, {0x302a, 0x02}, {0x302b, 0xE6}, {0x306f, 0x14}, {0x3362, 0x90}, {0x3070, 0x5d}, {0x3072, 0x5d}, {0x301c, 0x07}, {0x301d, 0x07}, {0x3020, 0x01}, {0x3021, 0x18}, {0x3022, 0x00}, {0x3023, 0x06}, {0x3024, 0x06}, {0x3025, 0x58}, {0x3026, 0x02}, {0x3027, 0x61}, {0x3088, 0x00}, {0x3089, 0xa0}, {0x308a, 0x00}, {0x308b, 0x78}, {0x3316, 0x64}, {0x3317, 0x25}, {0x3318, 0x80}, {0x3319, 0x08}, {0x331a, 0x0a}, {0x331b, 0x07}, {0x331c, 0x80}, {0x331d, 0x38}, {0x3100, 0x00}, {0x3302, 0x11}, {0x0, 0x0}, }; static struct reginfo gt2005_Sharpness_auto[] = { {0x3306, 0x00}, }; static struct reginfo gt2005_Sharpness1[] = { {0x3306, 0x08}, {0x3371, 0x00}, }; static struct reginfo gt2005_Sharpness2[][3] = { //Sharpness 2 {0x3306, 0x08}, {0x3371, 0x01}, }; static struct reginfo gt2005_Sharpness3[] = { //default {0x3306, 0x08}, {0x332d, 0x02}, }; static struct reginfo gt2005_Sharpness4[]= { //Sharpness 4 {0x3306, 0x08}, {0x332d, 0x03}, }; static struct reginfo gt2005_Sharpness5[] = { //Sharpness 5 {0x3306, 0x08}, {0x332d, 0x04}, }; #endif static struct reginfo sensor_ClrFmt_YUYV[]= { //{0x3400, 0x00}, {0x0000, 0x00} }; static struct reginfo sensor_ClrFmt_UYVY[]= { //{0x3400, 0x02}, {0x0000, 0x00} }; #if CONFIG_SENSOR_WhiteBalance static struct reginfo sensor_WhiteB_Auto[]= { {0x3306, 0x00}, //AWB auto, bit[1]:0,auto {0x0000, 0x00} }; /* Cloudy Colour Temperature : 6500K - 8000K */ static struct reginfo sensor_WhiteB_Cloudy[]= { {0x3306, 0x82}, {0x3337, 0x68}, {0x3338, 0x40}, {0x3339, 0x4e}, {0x0000, 0x00} }; /* ClearDay Colour Temperature : 5000K - 6500K */ static struct reginfo sensor_WhiteB_ClearDay[]= { //Sunny {0x3306, 0x02}, //AWB off {0x3337, 0x5e}, {0x3338, 0x40}, {0x3339, 0x46}, {0x0000, 0x00} }; /* Office Colour Temperature : 3500K - 5000K */ static struct reginfo sensor_WhiteB_TungstenLamp1[]= { //Office {0x3306, 0x02}, {0x3337, 0x52}, {0x3338, 0x40}, {0x3339, 0x58}, {0x0000, 0x00} }; /* Home Colour Temperature : 2500K - 3500K */ static struct reginfo sensor_WhiteB_TungstenLamp2[]= { //Home {0x3306, 0x02}, {0x3337, 0x44}, {0x3338, 0x40}, {0x3339, 0x70}, {0x0000, 0x00} }; static struct reginfo *sensor_WhiteBalanceSeqe[] = {sensor_WhiteB_Auto, sensor_WhiteB_TungstenLamp1,sensor_WhiteB_TungstenLamp2, sensor_WhiteB_ClearDay, sensor_WhiteB_Cloudy,NULL, }; #endif #if CONFIG_SENSOR_Brightness static struct reginfo sensor_Brightness0[]= { // Brightness -2 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, {0x3390, 0x49}, {0x339a, 0x20}, {0x0000, 0x00} }; static struct reginfo sensor_Brightness1[]= { // Brightness -1 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, {0x3390, 0x49}, {0x339a, 0x10}, {0x0000, 0x00} }; static struct reginfo sensor_Brightness2[]= { // Brightness 0 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x00}, {0x3390, 0x41}, {0x339a, 0x00}, {0x0000, 0x00} }; static struct reginfo sensor_Brightness3[]= { // Brightness +1 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, {0x3390, 0x41}, {0x339a, 0x10}, {0x0000, 0x00} }; static struct reginfo sensor_Brightness4[]= { // Brightness +2 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, {0x3390, 0x41}, {0x339a, 0x20}, {0x0000, 0x00} }; static struct reginfo sensor_Brightness5[]= { // Brightness +3 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, //bit[2] enable {0x3390, 0x41}, //bit[3] sign of brightness {0x339a, 0x30}, {0x0000, 0x00} }; static struct reginfo *sensor_BrightnessSeqe[] = {sensor_Brightness0, sensor_Brightness1, sensor_Brightness2, sensor_Brightness3, sensor_Brightness4, sensor_Brightness5,NULL, }; #endif #if CONFIG_SENSOR_Effect static struct reginfo sensor_Effect_Normal[] = { {0x3391, 0x00}, {0x0000, 0x00} }; static struct reginfo sensor_Effect_WandB[] = { {0x3391, 0x20}, {0x0000, 0x00} }; static struct reginfo sensor_Effect_Sepia[] = { {0x3391, 0x18}, {0x3396, 0x40}, {0x3397, 0xa6}, {0x0000, 0x00} }; static struct reginfo sensor_Effect_Negative[] = { //Negative {0x3391, 0x40}, //bit[6] negative {0x0000, 0x00} }; static struct reginfo sensor_Effect_Bluish[] = { // Bluish {0x3391, 0x18}, {0x3396, 0xa0}, {0x3397, 0x40}, {0x0000, 0x00} }; static struct reginfo sensor_Effect_Green[] = { // Greenish {0x3391, 0x18}, {0x3396, 0x60}, {0x3397, 0x60}, {0x0000, 0x00} }; static struct reginfo *sensor_EffectSeqe[] = {sensor_Effect_Normal, sensor_Effect_WandB, sensor_Effect_Negative,sensor_Effect_Sepia, sensor_Effect_Bluish, sensor_Effect_Green,NULL, }; #endif #if CONFIG_SENSOR_Exposure static struct reginfo sensor_Exposure0[]= { //-3 {0x3047, 0x05}, {0x3018, 0x40}, {0x3019, 0x30}, {0x301a, 0x71}, {0x0000, 0x00} }; static struct reginfo sensor_Exposure1[]= { //-2 {0x3047, 0x05}, {0x3018, 0x5a}, {0x3019, 0x4a}, {0x301a, 0xc2}, {0x0000, 0x00} }; static struct reginfo sensor_Exposure2[]= { //-0.3EV {0x3047, 0x05}, {0x3018, 0x6a}, {0x3019, 0x5a}, {0x301a, 0xd4}, {0x0000, 0x00} }; static struct reginfo sensor_Exposure3[]= { //default {0x3047, 0x05}, {0x3018, 0x78}, {0x3019, 0x68}, {0x301a, 0xd4}, {0x0000, 0x00} }; static struct reginfo sensor_Exposure4[]= { // 1 {0x3047, 0x05}, {0x3018, 0x88}, {0x3019, 0x78}, {0x301a, 0xd5}, {0x0000, 0x00} }; static struct reginfo sensor_Exposure5[]= { // 2 {0x3047, 0x05}, {0x3018, 0xa8}, {0x3019, 0x98}, {0x301a, 0xe6}, {0x0000, 0x00} }; static struct reginfo sensor_Exposure6[]= { // 3 {0x3047, 0x05}, {0x3018, 0xc8}, {0x3019, 0xb8}, {0x301a, 0xf7}, {0x0000, 0x00} }; static struct reginfo *sensor_ExposureSeqe[] = {sensor_Exposure0, sensor_Exposure1, sensor_Exposure2, sensor_Exposure3, sensor_Exposure4, sensor_Exposure5,sensor_Exposure6,NULL, }; #endif #if CONFIG_SENSOR_Saturation static struct reginfo sensor_Saturation0[]= { {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x02}, {0x3394, 0x40}, {0x3395, 0x40}, {0x0000, 0x00} }; static struct reginfo sensor_Saturation1[]= { {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x02}, {0x3394, 0x50}, {0x3395, 0x50}, {0x0000, 0x00} }; static struct reginfo sensor_Saturation2[]= { {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x02}, //enable color saturation {0x3394, 0x70}, {0x3395, 0x70}, {0x0000, 0x00} }; static struct reginfo *sensor_SaturationSeqe[] = {sensor_Saturation0, sensor_Saturation1, sensor_Saturation2, NULL,}; #endif #if CONFIG_SENSOR_Contrast static struct reginfo sensor_Contrast0[]= { //Contrast -3 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, {0x3390, 0x45}, {0x3398, 0x18}, {0x3399, 0x18}, {0x0000, 0x00} }; static struct reginfo sensor_Contrast1[]= { //Contrast -2 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, {0x3390, 0x45}, {0x3398, 0x18}, {0x3399, 0x18}, {0x0000, 0x00} }; static struct reginfo sensor_Contrast2[]= { // Contrast -1 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, {0x3390, 0x45}, {0x3398, 0x1c}, {0x3399, 0x1c}, {0x0000, 0x00} }; static struct reginfo sensor_Contrast3[]= { //Contrast 0 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x00}, {0x3390, 0x41}, {0x3398, 0x20}, {0x3399, 0x20}, {0x0000, 0x00} }; static struct reginfo sensor_Contrast4[]= { //Contrast +1 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, {0x3390, 0x45}, {0x3398, 0x24}, {0x3399, 0x24}, {0x0000, 0x00} }; static struct reginfo sensor_Contrast5[]= { //Contrast +2 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, {0x3390, 0x45}, {0x3398, 0x28}, {0x3399, 0x28}, {0x0000, 0x00} }; static struct reginfo sensor_Contrast6[]= { //Contrast +3 {0x3301, 0xff},//bit[7]:1, enable SDE {0x3391, 0x04}, //bit[2] enable contrast/brightness {0x3390, 0x45}, //bit[2] Yoffset sign {0x3398, 0x2c}, {0x3399, 0x2c}, {0x0000, 0x00} }; static struct reginfo *sensor_ContrastSeqe[] = {sensor_Contrast0, sensor_Contrast1, sensor_Contrast2, sensor_Contrast3, sensor_Contrast4, sensor_Contrast5, sensor_Contrast6, NULL, }; #endif #if CONFIG_SENSOR_Mirror static struct reginfo sensor_MirrorOn[]= { {0x3069, 0x84}, {0x307c, 0x13}, {0x3087, 0x02}, {0x0000, 0x00} }; static struct reginfo sensor_MirrorOff[]= { {0x3069, 0x84}, {0x307c, 0x10}, {0x3087, 0x02}, {0x0000, 0x00} }; static struct reginfo *sensor_MirrorSeqe[] = {sensor_MirrorOff, sensor_MirrorOn,NULL,}; #endif #if CONFIG_SENSOR_Flip static struct reginfo sensor_FlipOn[]= { {0x300e, 0x34}, {0x300f, 0xa6}, {0x3010, 0x81}, {0x3082, 0x01}, {0x30f4, 0x01}, {0x3090, 0x3b}, {0x3091, 0xc0}, {0x30ac, 0x42}, {0x0000, 0x00} }; static struct reginfo sensor_FlipOff[]= { {0x300e, 0x34}, {0x300f, 0xa6}, {0x3010, 0x81}, {0x3082, 0x01}, {0x30f4, 0x01}, {0x3090, 0x33}, {0x3091, 0xc0}, {0x30ac, 0x42}, {0x0000, 0x00} }; static struct reginfo *sensor_FlipSeqe[] = {sensor_FlipOff, sensor_FlipOn,NULL,}; #endif #if CONFIG_SENSOR_Scene static struct reginfo sensor_SceneAuto[] = { #if 0 /* ddl@rock-chips.com : */ {0x3014, 0x04}, {0x3015, 0x00}, {0x302e, 0x00}, {0x302d, 0x00}, {0x0000, 0x00} #else {0x3014, 0x84}, {0x3015, 0x02}, {0x302e, 0x00}, {0x302d, 0x00}, {0x0000, 0x00} #endif }; static struct reginfo sensor_SceneNight[] = { #if 1 //30fps ~ 5fps night mode for 60/50Hz light environment, 24Mhz clock input,36Mzh pclk {0x300e, 0x34}, {0x3011, 0x00}, {0x302c, 0x00}, {0x3071, 0x00}, {0x3070, 0xb9}, {0x301c, 0x02}, {0x3073, 0x00}, {0x3072, 0x9a}, {0x301d, 0x03}, {0x3014, 0x0c}, {0x3015, 0x50},//add 5 dummy frame {0x302e, 0x00}, {0x302d, 0x00}, {0x0000, 0x00} #else //15fps ~ 5fps night mode for 60/50Hz light environment, 24Mhz clock input,18Mhz pclk {0x300e, 0x34}, {0x3011, 0x01}, {0x302c, 0x00}, {0x3071, 0x00}, {0x3070, 0x5d}, {0x301c, 0x05}, {0x3073, 0x00}, {0x3072, 0x4d}, {0x301d, 0x07}, {0x3014, 0x0c}, {0x3015, 0x50}, {0x302e, 0x00}, {0x302d, 0x00}, #endif }; static struct reginfo *sensor_SceneSeqe[] = {sensor_SceneAuto, sensor_SceneNight,NULL,}; #endif #if CONFIG_SENSOR_DigitalZoom static struct reginfo sensor_Zoom0[] = { {0x0, 0x0}, }; static struct reginfo sensor_Zoom1[] = { {0x0, 0x0}, }; static struct reginfo sensor_Zoom2[] = { {0x0, 0x0}, }; static struct reginfo sensor_Zoom3[] = { {0x0, 0x0}, }; static struct reginfo *sensor_ZoomSeqe[] = {sensor_Zoom0, sensor_Zoom1, sensor_Zoom2, sensor_Zoom3, NULL,}; #endif static const struct v4l2_querymenu sensor_menus[] = { #if CONFIG_SENSOR_WhiteBalance { .id = V4L2_CID_DO_WHITE_BALANCE, .index = 0, .name = "auto", .reserved = 0, }, { .id = V4L2_CID_DO_WHITE_BALANCE, .index = 1, .name = "incandescent", .reserved = 0,}, { .id = V4L2_CID_DO_WHITE_BALANCE, .index = 2, .name = "fluorescent", .reserved = 0,}, { .id = V4L2_CID_DO_WHITE_BALANCE, .index = 3, .name = "daylight", .reserved = 0,}, { .id = V4L2_CID_DO_WHITE_BALANCE, .index = 4, .name = "cloudy-daylight", .reserved = 0,}, #endif #if CONFIG_SENSOR_Effect { .id = V4L2_CID_EFFECT, .index = 0, .name = "none", .reserved = 0, }, { .id = V4L2_CID_EFFECT, .index = 1, .name = "mono", .reserved = 0,}, { .id = V4L2_CID_EFFECT, .index = 2, .name = "negative", .reserved = 0,}, { .id = V4L2_CID_EFFECT, .index = 3, .name = "sepia", .reserved = 0,}, { .id = V4L2_CID_EFFECT, .index = 4, .name = "posterize", .reserved = 0,} ,{ .id = V4L2_CID_EFFECT, .index = 5, .name = "aqua", .reserved = 0,}, #endif #if CONFIG_SENSOR_Scene { .id = V4L2_CID_SCENE, .index = 0, .name = "auto", .reserved = 0,} ,{ .id = V4L2_CID_SCENE, .index = 1, .name = "night", .reserved = 0,}, #endif #if CONFIG_SENSOR_Flash { .id = V4L2_CID_FLASH, .index = 0, .name = "off", .reserved = 0, }, { .id = V4L2_CID_FLASH, .index = 1, .name = "auto", .reserved = 0,}, { .id = V4L2_CID_FLASH, .index = 2, .name = "on", .reserved = 0,}, { .id = V4L2_CID_FLASH, .index = 3, .name = "torch", .reserved = 0,}, #endif }; static struct v4l2_queryctrl sensor_controls[] = { #if CONFIG_SENSOR_WhiteBalance { .id = V4L2_CID_DO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_MENU, .name = "White Balance Control", .minimum = 0, .maximum = 4, .step = 1, .default_value = 0, }, #endif #if CONFIG_SENSOR_Brightness { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness Control", .minimum = -3, .maximum = 2, .step = 1, .default_value = 0, }, #endif #if CONFIG_SENSOR_Effect { .id = V4L2_CID_EFFECT, .type = V4L2_CTRL_TYPE_MENU, .name = "Effect Control", .minimum = 0, .maximum = 5, .step = 1, .default_value = 0, }, #endif #if CONFIG_SENSOR_Exposure { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure Control", .minimum = 0, .maximum = 6, .step = 1, .default_value = 0, }, #endif #if CONFIG_SENSOR_Saturation { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Saturation Control", .minimum = 0, .maximum = 2, .step = 1, .default_value = 0, }, #endif #if CONFIG_SENSOR_Contrast { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast Control", .minimum = -3, .maximum = 3, .step = 1, .default_value = 0, }, #endif #if CONFIG_SENSOR_Mirror { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mirror Control", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1, }, #endif #if CONFIG_SENSOR_Flip { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Flip Control", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1, }, #endif #if CONFIG_SENSOR_Scene { .id = V4L2_CID_SCENE, .type = V4L2_CTRL_TYPE_MENU, .name = "Scene Control", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, #endif #if CONFIG_SENSOR_DigitalZoom { .id = V4L2_CID_ZOOM_RELATIVE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "DigitalZoom Control", .minimum = -1, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_ZOOM_ABSOLUTE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "DigitalZoom Control", .minimum = 0, .maximum = 3, .step = 1, .default_value = 0, }, #endif #if CONFIG_SENSOR_Focus { .id = V4L2_CID_FOCUS_RELATIVE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Focus Control", .minimum = -1, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_FOCUS_ABSOLUTE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Focus Control", .minimum = 0, .maximum = 255, .step = 1, .default_value = 125, }, #endif #if CONFIG_SENSOR_Flash { .id = V4L2_CID_FLASH, .type = V4L2_CTRL_TYPE_MENU, .name = "Flash Control", .minimum = 0, .maximum = 3, .step = 1, .default_value = 0, }, #endif }; static int sensor_probe(struct i2c_client *client, const struct i2c_device_id *did); static int sensor_video_probe(struct soc_camera_device *icd, struct i2c_client *client); static int sensor_g_control(struct v4l2_subdev *sd, struct v4l2_control *ctrl); static int sensor_s_control(struct v4l2_subdev *sd, struct v4l2_control *ctrl); static int sensor_g_ext_controls(struct v4l2_subdev *sd, struct v4l2_ext_controls *ext_ctrl); static int sensor_s_ext_controls(struct v4l2_subdev *sd, struct v4l2_ext_controls *ext_ctrl); static int sensor_suspend(struct soc_camera_device *icd, pm_message_t pm_msg); static int sensor_resume(struct soc_camera_device *icd); static int sensor_set_bus_param(struct soc_camera_device *icd,unsigned long flags); static unsigned long sensor_query_bus_param(struct soc_camera_device *icd); static int sensor_set_effect(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value); static int sensor_set_whiteBalance(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value); static int sensor_deactivate(struct i2c_client *client); static struct soc_camera_ops sensor_ops = { .suspend = sensor_suspend, .resume = sensor_resume, .set_bus_param = sensor_set_bus_param, .query_bus_param = sensor_query_bus_param, .controls = sensor_controls, .menus = sensor_menus, .num_controls = ARRAY_SIZE(sensor_controls), .num_menus = ARRAY_SIZE(sensor_menus), }; /* only one fixed colorspace per pixelcode */ struct sensor_datafmt { enum v4l2_mbus_pixelcode code; enum v4l2_colorspace colorspace; }; /* Find a data format by a pixel code in an array */ static const struct sensor_datafmt *sensor_find_datafmt( enum v4l2_mbus_pixelcode code, const struct sensor_datafmt *fmt, int n) { int i; for (i = 0; i < n; i++) if (fmt[i].code == code) return fmt + i; return NULL; } static const struct sensor_datafmt sensor_colour_fmts[] = { {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG}, {V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG} }; typedef struct sensor_info_priv_s { int whiteBalance; int brightness; int contrast; int saturation; int effect; int scene; int digitalzoom; int focus; int flash; int exposure; bool snap2preview; bool video2preview; unsigned char mirror; /* HFLIP */ unsigned char flip; /* VFLIP */ unsigned int winseqe_cur_addr; struct sensor_datafmt fmt; unsigned int funmodule_state; } sensor_info_priv_t; struct sensor { struct v4l2_subdev subdev; struct i2c_client *client; sensor_info_priv_t info_priv; int model; /* V4L2_IDENT_OV* codes from v4l2-chip-ident.h */ #if CONFIG_SENSOR_I2C_NOSCHED atomic_t tasklock_cnt; #endif struct rk29camera_platform_data *sensor_io_request; struct rk29camera_gpio_res *sensor_gpio_res; }; static struct sensor* to_sensor(const struct i2c_client *client) { return container_of(i2c_get_clientdata(client), struct sensor, subdev); } static int sensor_task_lock(struct i2c_client *client, int lock) { #if CONFIG_SENSOR_I2C_NOSCHED int cnt = 3; struct sensor *sensor = to_sensor(client); if (lock) { if (atomic_read(&sensor->tasklock_cnt) == 0) { while ((atomic_read(&client->adapter->bus_lock.count) < 1) && (cnt>0)) { SENSOR_TR("\n %s will obtain i2c in atomic, but i2c bus is locked! Wait...\n",SENSOR_NAME_STRING()); msleep(35); cnt--; } if ((atomic_read(&client->adapter->bus_lock.count) < 1) && (cnt<=0)) { SENSOR_TR("\n %s obtain i2c fail in atomic!!\n",SENSOR_NAME_STRING()); goto sensor_task_lock_err; } preempt_disable(); } atomic_add(1, &sensor->tasklock_cnt); } else { if (atomic_read(&sensor->tasklock_cnt) > 0) { atomic_sub(1, &sensor->tasklock_cnt); if (atomic_read(&sensor->tasklock_cnt) == 0) preempt_enable(); } } return 0; sensor_task_lock_err: return -1; #else return 0; #endif } /* sensor register write */ static int sensor_write(struct i2c_client *client, u16 reg, u8 val) { int err,cnt; u8 buf[3]; struct i2c_msg msg[1]; buf[0] = reg >> 8; buf[1] = reg & 0xFF; buf[2] = val; msg->addr = client->addr; msg->flags = client->flags; msg->buf = buf; msg->len = sizeof(buf); msg->scl_rate = CONFIG_SENSOR_I2C_SPEED; /* ddl@rock-chips.com : 100kHz */ msg->read_type = 0; /* fpga i2c:0==I2C_NORMAL : direct use number not enum for don't want include spi_fpga.h */ cnt = 3; err = -EAGAIN; while ((cnt-- > 0) && (err < 0)) { /* ddl@rock-chips.com : Transfer again if transent is failed */ err = i2c_transfer(client->adapter, msg, 1); if (err >= 0) { return 0; } else { SENSOR_TR("\n %s write reg(0x%x, val:0x%x) failed, try to write again!\n",SENSOR_NAME_STRING(),reg, val); udelay(10); } } return err; } /* sensor register read */ static int sensor_read(struct i2c_client *client, u16 reg, u8 *val) { int err,cnt; u8 buf[2]; struct i2c_msg msg[2]; buf[0] = reg >> 8; buf[1] = reg & 0xFF; msg[0].addr = client->addr; msg[0].flags = client->flags; msg[0].buf = buf; msg[0].len = sizeof(buf); msg[0].scl_rate = CONFIG_SENSOR_I2C_SPEED; /* ddl@rock-chips.com : 100kHz */ msg[0].read_type = 2; /* fpga i2c:0==I2C_NO_STOP : direct use number not enum for don't want include spi_fpga.h */ msg[1].addr = client->addr; msg[1].flags = client->flags|I2C_M_RD; msg[1].buf = buf; msg[1].len = 1; msg[1].scl_rate = CONFIG_SENSOR_I2C_SPEED; /* ddl@rock-chips.com : 100kHz */ msg[1].read_type = 2; /* fpga i2c:0==I2C_NO_STOP : direct use number not enum for don't want include spi_fpga.h */ cnt = 3; err = -EAGAIN; while ((cnt-- > 0) && (err < 0)) { /* ddl@rock-chips.com : Transfer again if transent is failed */ err = i2c_transfer(client->adapter, msg, 2); if (err >= 0) { *val = buf[0]; return 0; } else { SENSOR_TR("\n %s read reg(0x%x val:0x%x) failed, try to read again! \n",SENSOR_NAME_STRING(),reg, *val); udelay(10); } } return err; } /* write a array of registers */ static int sensor_write_array(struct i2c_client *client, struct reginfo *regarray) { int err = 0, cnt; int i = 0; #if CONFIG_SENSOR_I2C_RDWRCHK char valchk; #endif cnt = 0; if (sensor_task_lock(client, 1) < 0) goto sensor_write_array_end; while (regarray[i].reg != 0) { err = sensor_write(client, regarray[i].reg, regarray[i].val); if (err < 0) { if (cnt-- > 0) { SENSOR_TR("%s..write failed current reg:0x%x, Write array again !\n", SENSOR_NAME_STRING(),regarray[i].reg); i = 0; continue; } else { SENSOR_TR("%s..write array failed!!!\n", SENSOR_NAME_STRING()); err = -EPERM; goto sensor_write_array_end; } } else { #if CONFIG_SENSOR_I2C_RDWRCHK sensor_read(client, regarray[i].reg, &valchk); if (valchk != regarray[i].val) SENSOR_TR("%s Reg:0x%x write(0x%x, 0x%x) fail\n",SENSOR_NAME_STRING(), regarray[i].reg, regarray[i].val, valchk); #endif } i++; } sensor_write_array_end: sensor_task_lock(client,0); return err; } #if CONFIG_SENSOR_I2C_RDWRCHK static int sensor_check_array(struct i2c_client *client, struct reginfo *regarray) { int ret; int i = 0,j=0; u8 value; SENSOR_DG("%s >>>>>>>>>>>>>>>>>>>>>>\n",__FUNCTION__); while(regarray[i].reg != 0) { ret = sensor_read(client,regarray[i].reg,&value); if(ret !=0) { SENSOR_TR("read value failed\n"); } if(regarray[i].val != value) { SENSOR_DG("%s reg[0x%x] check err,writte :0x%x read:0x%x\n",__FUNCTION__,regarray[i].reg,regarray[i].val,value); } else j++; i++; } if(i==j) SENSOR_DG("%s check success\n",__FUNCTION__); return 0; } #endif static int sensor_ioctrl(struct soc_camera_device *icd,enum rk29sensor_power_cmd cmd, int on) { struct soc_camera_link *icl = to_soc_camera_link(icd); int ret = 0; SENSOR_DG("%s %s cmd(%d) on(%d)\n",SENSOR_NAME_STRING(),__FUNCTION__,cmd,on); switch (cmd) { case Sensor_PowerDown: { if (icl->powerdown) { ret = icl->powerdown(icd->pdev, on); if (ret == RK29_CAM_IO_SUCCESS) { if (on == 0) { mdelay(2); if (icl->reset) icl->reset(icd->pdev); } } else if (ret == RK29_CAM_EIO_REQUESTFAIL) { ret = -ENODEV; goto sensor_power_end; } } break; } case Sensor_Flash: { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct sensor *sensor = to_sensor(client); if (sensor->sensor_io_request && sensor->sensor_io_request->sensor_ioctrl) { sensor->sensor_io_request->sensor_ioctrl(icd->pdev,Cam_Flash, on); if(on){ //flash off after 2 secs hrtimer_cancel(&(flash_off_timer.timer)); hrtimer_start(&(flash_off_timer.timer),ktime_set(0, 800*1000*1000),HRTIMER_MODE_REL); } } break; } default: { SENSOR_TR("%s %s cmd(0x%x) is unknown!",SENSOR_NAME_STRING(),__FUNCTION__,cmd); break; } } sensor_power_end: return ret; } static enum hrtimer_restart flash_off_func(struct hrtimer *timer){ struct flash_timer *fps_timer = container_of(timer, struct flash_timer, timer); sensor_ioctrl(fps_timer->icd,Sensor_Flash,0); SENSOR_DG("%s %s !!!!!!",SENSOR_NAME_STRING(),__FUNCTION__); return 0; } static int sensor_init(struct v4l2_subdev *sd, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_device *icd = client->dev.platform_data; struct sensor *sensor = to_sensor(client); const struct v4l2_queryctrl *qctrl; const struct sensor_datafmt *fmt; char value; int ret,pid = 0; SENSOR_DG("\n%s..%s.. \n",SENSOR_NAME_STRING(),__FUNCTION__); if (sensor_ioctrl(icd, Sensor_PowerDown, 0) < 0) { ret = -ENODEV; goto sensor_INIT_ERR; } /* soft reset */ if (sensor_task_lock(client,1)<0) goto sensor_INIT_ERR; /* ret = sensor_write(client, 0x3012, 0x80); if (ret != 0) { SENSOR_TR("%s soft reset sensor failed\n",SENSOR_NAME_STRING()); ret = -ENODEV; goto sensor_INIT_ERR; } mdelay(5); */ //delay 5 microseconds /* check if it is an sensor sensor */ ret = sensor_read(client, 0x0000, &value); if (ret != 0) { SENSOR_TR("read chip id high byte failed\n"); ret = -ENODEV; goto sensor_INIT_ERR; } pid |= (value << 8); ret = sensor_read(client, 0x0001, &value); if (ret != 0) { SENSOR_TR("read chip id low byte failed\n"); ret = -ENODEV; goto sensor_INIT_ERR; } pid |= (value & 0xff); SENSOR_DG("\n %s pid = 0x%x\n", SENSOR_NAME_STRING(), pid); if (pid == SENSOR_ID) { sensor->model = SENSOR_V4L2_IDENT; } else { SENSOR_TR("error: %s mismatched pid = 0x%x\n", SENSOR_NAME_STRING(), pid); ret = -ENODEV; goto sensor_INIT_ERR; } ret = sensor_write_array(client, sensor_init_data); if (ret != 0) { SENSOR_TR("error: %s initial failed\n",SENSOR_NAME_STRING()); goto sensor_INIT_ERR; } sensor_task_lock(client,0); sensor->info_priv.winseqe_cur_addr = (int)SENSOR_INIT_WINSEQADR; fmt = sensor_find_datafmt(SENSOR_INIT_PIXFMT,sensor_colour_fmts, ARRAY_SIZE(sensor_colour_fmts)); if (!fmt) { SENSOR_TR("error: %s initial array colour fmts is not support!!",SENSOR_NAME_STRING()); ret = -EINVAL; goto sensor_INIT_ERR; } sensor->info_priv.fmt = *fmt; /* sensor sensor information for initialization */ qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_DO_WHITE_BALANCE); if (qctrl) sensor->info_priv.whiteBalance = qctrl->default_value; qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_BRIGHTNESS); if (qctrl) sensor->info_priv.brightness = qctrl->default_value; qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_EFFECT); if (qctrl) sensor->info_priv.effect = qctrl->default_value; qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_EXPOSURE); if (qctrl) sensor->info_priv.exposure = qctrl->default_value; qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_SATURATION); if (qctrl) sensor->info_priv.saturation = qctrl->default_value; qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_CONTRAST); if (qctrl) sensor->info_priv.contrast = qctrl->default_value; qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_HFLIP); if (qctrl) sensor->info_priv.mirror = qctrl->default_value; qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_VFLIP); if (qctrl) sensor->info_priv.flip = qctrl->default_value; qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_SCENE); if (qctrl) sensor->info_priv.scene = qctrl->default_value; qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_ZOOM_ABSOLUTE); if (qctrl) sensor->info_priv.digitalzoom = qctrl->default_value; /* ddl@rock-chips.com : if sensor support auto focus and flash, programer must run focus and flash code */ #if CONFIG_SENSOR_Focus sensor_set_focus(); qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_FOCUS_ABSOLUTE); if (qctrl) sensor->info_priv.focus = qctrl->default_value; #endif #if CONFIG_SENSOR_Flash qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_FLASH); if (qctrl) sensor->info_priv.flash = qctrl->default_value; flash_off_timer.icd = icd; flash_off_timer.timer.function = flash_off_func; #endif SENSOR_DG("\n%s..%s.. icd->width = %d.. icd->height %d\n",SENSOR_NAME_STRING(),((val == 0)?__FUNCTION__:"sensor_reinit"),icd->user_width,icd->user_height); sensor->info_priv.funmodule_state |= SENSOR_INIT_IS_OK; return 0; sensor_INIT_ERR: sensor->info_priv.funmodule_state &= ~SENSOR_INIT_IS_OK; sensor_task_lock(client,0); sensor_deactivate(client); return ret; } static int sensor_deactivate(struct i2c_client *client) { struct soc_camera_device *icd = client->dev.platform_data; struct sensor *sensor = to_sensor(client); SENSOR_DG("\n%s..%s.. Enter\n",SENSOR_NAME_STRING(),__FUNCTION__); /* ddl@rock-chips.com : all sensor output pin must change to input for other sensor */ if (sensor->info_priv.funmodule_state & SENSOR_INIT_IS_OK) { sensor_write(client, 0x30b0, 0x00); sensor_write(client, 0x30b1, 0x00); } sensor_ioctrl(icd, Sensor_PowerDown, 1); msleep(100); /* ddl@rock-chips.com : sensor config init width , because next open sensor quickly(soc_camera_open -> Try to configure with default parameters) */ icd->user_width = SENSOR_INIT_WIDTH; icd->user_height = SENSOR_INIT_HEIGHT; sensor->info_priv.funmodule_state &= ~SENSOR_INIT_IS_OK; return 0; } static struct reginfo sensor_power_down_sequence[]= { {0x30ab, 0x00}, {0x30ad, 0x0a}, {0x30ae,0x27}, {0x363b,0x01}, {0x00,0x00} }; static int sensor_suspend(struct soc_camera_device *icd, pm_message_t pm_msg) { int ret; struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if (pm_msg.event == PM_EVENT_SUSPEND) { SENSOR_DG("\n %s Enter Suspend.. \n", SENSOR_NAME_STRING()); ret = sensor_write_array(client, sensor_power_down_sequence) ; if (ret != 0) { SENSOR_TR("\n %s..%s WriteReg Fail.. \n", SENSOR_NAME_STRING(),__FUNCTION__); return ret; } else { ret = sensor_ioctrl(icd, Sensor_PowerDown, 1); if (ret < 0) { SENSOR_TR("\n %s suspend fail for turn on power!\n", SENSOR_NAME_STRING()); return -EINVAL; } } } else { SENSOR_TR("\n %s cann't suppout Suspend..\n",SENSOR_NAME_STRING()); return -EINVAL; } return 0; } static int sensor_resume(struct soc_camera_device *icd) { int ret; ret = sensor_ioctrl(icd, Sensor_PowerDown, 0); if (ret < 0) { SENSOR_TR("\n %s resume fail for turn on power!\n", SENSOR_NAME_STRING()); return -EINVAL; } SENSOR_DG("\n %s Enter Resume.. \n", SENSOR_NAME_STRING()); return 0; } static int sensor_set_bus_param(struct soc_camera_device *icd, unsigned long flags) { return 0; } static unsigned long sensor_query_bus_param(struct soc_camera_device *icd) { struct soc_camera_link *icl = to_soc_camera_link(icd); unsigned long flags = SENSOR_BUS_PARAM; return soc_camera_apply_sensor_flags(icl, flags); } static int sensor_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_device *icd = client->dev.platform_data; struct sensor *sensor = to_sensor(client); mf->width = icd->user_width; mf->height = icd->user_height; mf->code = sensor->info_priv.fmt.code; mf->colorspace = sensor->info_priv.fmt.colorspace; mf->field = V4L2_FIELD_NONE; return 0; } static bool sensor_fmt_capturechk(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { bool ret = false; if ((mf->width == 1024) && (mf->height == 768)) { ret = true; } else if ((mf->width == 1280) && (mf->height == 1024)) { ret = true; } else if ((mf->width == 1600) && (mf->height == 1200)) { ret = true; } else if ((mf->width == 2048) && (mf->height == 1536)) { ret = true; } else if ((mf->width == 2592) && (mf->height == 1944)) { ret = true; } if (ret == true) SENSOR_DG("%s %dx%d is capture format\n", __FUNCTION__, mf->width, mf->height); return ret; } static bool sensor_fmt_videochk(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { bool ret = false; if ((mf->width == 1280) && (mf->height == 720)) { ret = true; } else if ((mf->width == 1920) && (mf->height == 1080)) { ret = true; } if (ret == true) SENSOR_DG("%s %dx%d is video format\n", __FUNCTION__, mf->width, mf->height); return ret; } static int sensor_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); const struct sensor_datafmt *fmt; struct sensor *sensor = to_sensor(client); const struct v4l2_queryctrl *qctrl; struct soc_camera_device *icd = client->dev.platform_data; struct reginfo *winseqe_set_addr=NULL; int ret=0, set_w,set_h; fmt = sensor_find_datafmt(mf->code, sensor_colour_fmts, ARRAY_SIZE(sensor_colour_fmts)); if (!fmt) { ret = -EINVAL; goto sensor_s_fmt_end; } if (sensor->info_priv.fmt.code != mf->code) { switch (mf->code) { case V4L2_MBUS_FMT_YUYV8_2X8: { winseqe_set_addr = sensor_ClrFmt_YUYV; break; } case V4L2_MBUS_FMT_UYVY8_2X8: { winseqe_set_addr = sensor_ClrFmt_UYVY; break; } default: break; } if (winseqe_set_addr != NULL) { sensor_write_array(client, winseqe_set_addr); sensor->info_priv.fmt.code = mf->code; sensor->info_priv.fmt.colorspace= mf->colorspace; SENSOR_DG("%s v4l2_mbus_code:%d set success!\n", SENSOR_NAME_STRING(),mf->code); } else { SENSOR_TR("%s v4l2_mbus_code:%d is invalidate!\n", SENSOR_NAME_STRING(),mf->code); } } set_w = mf->width; set_h = mf->height; if (((set_w <= 176) && (set_h <= 144)) && sensor_qcif[0].reg) { winseqe_set_addr = sensor_qcif; set_w = 176; set_h = 144; } else if (((set_w <= 320) && (set_h <= 240)) && sensor_qvga[0].reg) { winseqe_set_addr = sensor_qvga; set_w = 320; set_h = 240; } else if (((set_w <= 352) && (set_h<= 288)) && sensor_cif[0].reg) { winseqe_set_addr = sensor_cif; set_w = 352; set_h = 288; } else if (((set_w <= 640) && (set_h <= 480)) && sensor_vga[0].reg) { winseqe_set_addr = sensor_vga; set_w = 640; set_h = 480; } else if (((set_w <= 800) && (set_h <= 600)) && sensor_svga[0].reg) { winseqe_set_addr = sensor_svga; set_w = 800; set_h = 600; } else if (((set_w <= 1280) && (set_h <= 1024)) && sensor_sxga[0].reg) { winseqe_set_addr = sensor_sxga; set_w = 1280; set_h = 1024; } else if (((set_w <= 1600) && (set_h <= 1200)) && sensor_uxga[0].reg) { winseqe_set_addr = sensor_uxga; set_w = 1600; set_h = 1200; } else { winseqe_set_addr = SENSOR_INIT_WINSEQADR; /* ddl@rock-chips.com : Sensor output smallest size if isn't support app */ set_w = SENSOR_INIT_WIDTH; set_h = SENSOR_INIT_HEIGHT; SENSOR_TR("\n %s..%s Format is Invalidate. pix->width = %d.. pix->height = %d\n",SENSOR_NAME_STRING(),__FUNCTION__,mf->width,mf->height); } if ((int)winseqe_set_addr != sensor->info_priv.winseqe_cur_addr) { #if CONFIG_SENSOR_Flash if (sensor_fmt_capturechk(sd,mf) == true) { /* ddl@rock-chips.com : Capture */ if ((sensor->info_priv.flash == 1) || (sensor->info_priv.flash == 2)) { sensor_ioctrl(icd, Sensor_Flash, Flash_On); SENSOR_DG("%s flash on in capture!\n", SENSOR_NAME_STRING()); } } else { /* ddl@rock-chips.com : Video */ if ((sensor->info_priv.flash == 1) || (sensor->info_priv.flash == 2)) { sensor_ioctrl(icd, Sensor_Flash, Flash_Off); SENSOR_DG("%s flash off in preivew!\n", SENSOR_NAME_STRING()); } } #endif ret |= sensor_write_array(client, winseqe_set_addr); if (ret != 0) { SENSOR_TR("%s set format capability failed\n", SENSOR_NAME_STRING()); #if CONFIG_SENSOR_Flash if (sensor_fmt_capturechk(sd,mf) == true) { if ((sensor->info_priv.flash == 1) || (sensor->info_priv.flash == 2)) { sensor_ioctrl(icd, Sensor_Flash, Flash_Off); SENSOR_TR("%s Capture format set fail, flash off !\n", SENSOR_NAME_STRING()); } } #endif goto sensor_s_fmt_end; } sensor->info_priv.winseqe_cur_addr = (int)winseqe_set_addr; if (sensor_fmt_capturechk(sd,mf) == true) { /* ddl@rock-chips.com : Capture */ qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_EFFECT); sensor_set_effect(icd, qctrl,sensor->info_priv.effect); if (sensor->info_priv.whiteBalance != 0) { qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_DO_WHITE_BALANCE); sensor_set_whiteBalance(icd, qctrl,sensor->info_priv.whiteBalance); } sensor->info_priv.snap2preview = true; } else if (sensor_fmt_videochk(sd,mf) == true) { /* ddl@rock-chips.com : Video */ qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_EFFECT); sensor_set_effect(icd, qctrl,sensor->info_priv.effect); qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_DO_WHITE_BALANCE); sensor_set_whiteBalance(icd, qctrl,sensor->info_priv.whiteBalance); sensor->info_priv.video2preview = true; } else if ((sensor->info_priv.snap2preview == true) || (sensor->info_priv.video2preview == true)) { qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_EFFECT); sensor_set_effect(icd, qctrl,sensor->info_priv.effect); qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_DO_WHITE_BALANCE); sensor_set_whiteBalance(icd, qctrl,sensor->info_priv.whiteBalance); msleep(600); sensor->info_priv.video2preview = false; sensor->info_priv.snap2preview = false; } SENSOR_DG("\n%s..%s.. icd->width = %d.. icd->height %d\n",SENSOR_NAME_STRING(),__FUNCTION__,set_w,set_h); } else { SENSOR_DG("\n %s .. Current Format is validate. icd->width = %d.. icd->height %d\n",SENSOR_NAME_STRING(),set_w,set_h); } mf->width = set_w; mf->height = set_h; sensor_s_fmt_end: return ret; } static int sensor_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sensor *sensor = to_sensor(client); const struct sensor_datafmt *fmt; int ret = 0,set_w,set_h; fmt = sensor_find_datafmt(mf->code, sensor_colour_fmts, ARRAY_SIZE(sensor_colour_fmts)); if (fmt == NULL) { fmt = &sensor->info_priv.fmt; mf->code = fmt->code; } if (mf->height > SENSOR_MAX_HEIGHT) mf->height = SENSOR_MAX_HEIGHT; else if (mf->height < SENSOR_MIN_HEIGHT) mf->height = SENSOR_MIN_HEIGHT; if (mf->width > SENSOR_MAX_WIDTH) mf->width = SENSOR_MAX_WIDTH; else if (mf->width < SENSOR_MIN_WIDTH) mf->width = SENSOR_MIN_WIDTH; set_w = mf->width; set_h = mf->height; if (((set_w <= 176) && (set_h <= 144)) && sensor_qcif[0].reg) { set_w = 176; set_h = 144; } else if (((set_w <= 320) && (set_h <= 240)) && sensor_qvga[0].reg) { set_w = 320; set_h = 240; } else if (((set_w <= 352) && (set_h<= 288)) && sensor_cif[0].reg) { set_w = 352; set_h = 288; } else if (((set_w <= 640) && (set_h <= 480)) && sensor_vga[0].reg) { set_w = 640; set_h = 480; } else if (((set_w <= 800) && (set_h <= 600)) && sensor_svga[0].reg) { set_w = 800; set_h = 600; } else if (((set_w <= 1280) && (set_h <= 1024)) && sensor_sxga[0].reg) { set_w = 1280; set_h = 1024; } else if (((set_w <= 1600) && (set_h <= 1200)) && sensor_uxga[0].reg) { set_w = 1600; set_h = 1200; } else { set_w = SENSOR_INIT_WIDTH; set_h = SENSOR_INIT_HEIGHT; } mf->width = set_w; mf->height = set_h; mf->colorspace = fmt->colorspace; return ret; } static int sensor_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *id) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; if (id->match.addr != client->addr) return -ENODEV; id->ident = SENSOR_V4L2_IDENT; /* ddl@rock-chips.com : Return gt2005 identifier */ id->revision = 0; return 0; } #if CONFIG_SENSOR_Brightness static int sensor_set_brightness(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) { if (sensor_BrightnessSeqe[value - qctrl->minimum] != NULL) { if (sensor_write_array(client, sensor_BrightnessSeqe[value - qctrl->minimum]) != 0) { SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__); return -EINVAL; } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } } SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value); return -EINVAL; } #endif #if CONFIG_SENSOR_Effect static int sensor_set_effect(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) { if (sensor_EffectSeqe[value - qctrl->minimum] != NULL) { if (sensor_write_array(client, sensor_EffectSeqe[value - qctrl->minimum]) != 0) { SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__); return -EINVAL; } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } } SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value); return -EINVAL; } #endif #if CONFIG_SENSOR_Exposure static int sensor_set_exposure(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) { if (sensor_ExposureSeqe[value - qctrl->minimum] != NULL) { if (sensor_write_array(client, sensor_ExposureSeqe[value - qctrl->minimum]) != 0) { SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__); return -EINVAL; } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } } SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value); return -EINVAL; } #endif #if CONFIG_SENSOR_Saturation static int sensor_set_saturation(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) { if (sensor_SaturationSeqe[value - qctrl->minimum] != NULL) { if (sensor_write_array(client, sensor_SaturationSeqe[value - qctrl->minimum]) != 0) { SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__); return -EINVAL; } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } } SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value); return -EINVAL; } #endif #if CONFIG_SENSOR_Contrast static int sensor_set_contrast(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) { if (sensor_ContrastSeqe[value - qctrl->minimum] != NULL) { if (sensor_write_array(client, sensor_ContrastSeqe[value - qctrl->minimum]) != 0) { SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__); return -EINVAL; } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } } SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value); return -EINVAL; } #endif #if CONFIG_SENSOR_Mirror static int sensor_set_mirror(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) { if (sensor_MirrorSeqe[value - qctrl->minimum] != NULL) { if (sensor_write_array(client, sensor_MirrorSeqe[value - qctrl->minimum]) != 0) { SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__); return -EINVAL; } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } } SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value); return -EINVAL; } #endif #if CONFIG_SENSOR_Flip static int sensor_set_flip(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) { if (sensor_FlipSeqe[value - qctrl->minimum] != NULL) { if (sensor_write_array(client, sensor_FlipSeqe[value - qctrl->minimum]) != 0) { SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__); return -EINVAL; } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } } SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value); return -EINVAL; } #endif #if CONFIG_SENSOR_Scene static int sensor_set_scene(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) { if (sensor_SceneSeqe[value - qctrl->minimum] != NULL) { if (sensor_write_array(client, sensor_SceneSeqe[value - qctrl->minimum]) != 0) { SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__); return -EINVAL; } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } } SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value); return -EINVAL; } #endif #if CONFIG_SENSOR_WhiteBalance static int sensor_set_whiteBalance(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) { if (sensor_WhiteBalanceSeqe[value - qctrl->minimum] != NULL) { if (sensor_write_array(client, sensor_WhiteBalanceSeqe[value - qctrl->minimum]) != 0) { SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__); return -EINVAL; } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } } SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value); return -EINVAL; } #endif #if CONFIG_SENSOR_DigitalZoom static int sensor_set_digitalzoom(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int *value) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct sensor *sensor = to_sensor(client); const struct v4l2_queryctrl *qctrl_info; int digitalzoom_cur, digitalzoom_total; qctrl_info = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_ZOOM_ABSOLUTE); if (qctrl_info) return -EINVAL; digitalzoom_cur = sensor->info_priv.digitalzoom; digitalzoom_total = qctrl_info->maximum; if ((value > 0) && (digitalzoom_cur >= digitalzoom_total)) { SENSOR_TR("%s digitalzoom is maximum - %x\n", SENSOR_NAME_STRING(), digitalzoom_cur); return -EINVAL; } if ((value < 0) && (digitalzoom_cur <= qctrl_info->minimum)) { SENSOR_TR("%s digitalzoom is minimum - %x\n", SENSOR_NAME_STRING(), digitalzoom_cur); return -EINVAL; } if ((value > 0) && ((digitalzoom_cur + value) > digitalzoom_total)) { value = digitalzoom_total - digitalzoom_cur; } if ((value < 0) && ((digitalzoom_cur + value) < 0)) { value = 0 - digitalzoom_cur; } digitalzoom_cur += value; if (sensor_ZoomSeqe[digitalzoom_cur] != NULL) { if (sensor_write_array(client, sensor_ZoomSeqe[digitalzoom_cur]) != 0) { SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__); return -EINVAL; } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } return -EINVAL; } #endif #if CONFIG_SENSOR_Flash static int sensor_set_flash(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value) { if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) { if (value == 3) { /* ddl@rock-chips.com: torch */ sensor_ioctrl(icd, Sensor_Flash, Flash_Torch); /* Flash On */ } else { sensor_ioctrl(icd, Sensor_Flash, Flash_Off); } SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value); return 0; } SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value); return -EINVAL; } #endif static int sensor_g_control(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sensor *sensor = to_sensor(client); const struct v4l2_queryctrl *qctrl; qctrl = soc_camera_find_qctrl(&sensor_ops, ctrl->id); if (!qctrl) { SENSOR_TR("\n %s ioctrl id = %d is invalidate \n", SENSOR_NAME_STRING(), ctrl->id); return -EINVAL; } switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: { ctrl->value = sensor->info_priv.brightness; break; } case V4L2_CID_SATURATION: { ctrl->value = sensor->info_priv.saturation; break; } case V4L2_CID_CONTRAST: { ctrl->value = sensor->info_priv.contrast; break; } case V4L2_CID_DO_WHITE_BALANCE: { ctrl->value = sensor->info_priv.whiteBalance; break; } case V4L2_CID_EXPOSURE: { ctrl->value = sensor->info_priv.exposure; break; } case V4L2_CID_HFLIP: { ctrl->value = sensor->info_priv.mirror; break; } case V4L2_CID_VFLIP: { ctrl->value = sensor->info_priv.flip; break; } default : break; } return 0; } static int sensor_s_control(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sensor *sensor = to_sensor(client); struct soc_camera_device *icd = client->dev.platform_data; const struct v4l2_queryctrl *qctrl; qctrl = soc_camera_find_qctrl(&sensor_ops, ctrl->id); if (!qctrl) { SENSOR_TR("\n %s ioctrl id = %d is invalidate \n", SENSOR_NAME_STRING(), ctrl->id); return -EINVAL; } switch (ctrl->id) { #if CONFIG_SENSOR_Brightness case V4L2_CID_BRIGHTNESS: { if (ctrl->value != sensor->info_priv.brightness) { if (sensor_set_brightness(icd, qctrl,ctrl->value) != 0) { return -EINVAL; } sensor->info_priv.brightness = ctrl->value; } break; } #endif #if CONFIG_SENSOR_Exposure case V4L2_CID_EXPOSURE: { if (ctrl->value != sensor->info_priv.exposure) { if (sensor_set_exposure(icd, qctrl,ctrl->value) != 0) { return -EINVAL; } sensor->info_priv.exposure = ctrl->value; } break; } #endif #if CONFIG_SENSOR_Saturation case V4L2_CID_SATURATION: { if (ctrl->value != sensor->info_priv.saturation) { if (sensor_set_saturation(icd, qctrl,ctrl->value) != 0) { return -EINVAL; } sensor->info_priv.saturation = ctrl->value; } break; } #endif #if CONFIG_SENSOR_Contrast case V4L2_CID_CONTRAST: { if (ctrl->value != sensor->info_priv.contrast) { if (sensor_set_contrast(icd, qctrl,ctrl->value) != 0) { return -EINVAL; } sensor->info_priv.contrast = ctrl->value; } break; } #endif #if CONFIG_SENSOR_WhiteBalance case V4L2_CID_DO_WHITE_BALANCE: { if (ctrl->value != sensor->info_priv.whiteBalance) { if (sensor_set_whiteBalance(icd, qctrl,ctrl->value) != 0) { return -EINVAL; } sensor->info_priv.whiteBalance = ctrl->value; } break; } #endif #if CONFIG_SENSOR_Mirror case V4L2_CID_HFLIP: { if (ctrl->value != sensor->info_priv.mirror) { if (sensor_set_mirror(icd, qctrl,ctrl->value) != 0) return -EINVAL; sensor->info_priv.mirror = ctrl->value; } break; } #endif #if CONFIG_SENSOR_Flip case V4L2_CID_VFLIP: { if (ctrl->value != sensor->info_priv.flip) { if (sensor_set_flip(icd, qctrl,ctrl->value) != 0) return -EINVAL; sensor->info_priv.flip = ctrl->value; } break; } #endif default: break; } return 0; } static int sensor_g_ext_control(struct soc_camera_device *icd , struct v4l2_ext_control *ext_ctrl) { const struct v4l2_queryctrl *qctrl; struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct sensor *sensor = to_sensor(client); qctrl = soc_camera_find_qctrl(&sensor_ops, ext_ctrl->id); if (!qctrl) { SENSOR_TR("\n %s ioctrl id = %d is invalidate \n", SENSOR_NAME_STRING(), ext_ctrl->id); return -EINVAL; } switch (ext_ctrl->id) { case V4L2_CID_SCENE: { ext_ctrl->value = sensor->info_priv.scene; break; } case V4L2_CID_EFFECT: { ext_ctrl->value = sensor->info_priv.effect; break; } case V4L2_CID_ZOOM_ABSOLUTE: { ext_ctrl->value = sensor->info_priv.digitalzoom; break; } case V4L2_CID_ZOOM_RELATIVE: { return -EINVAL; } case V4L2_CID_FOCUS_ABSOLUTE: { ext_ctrl->value = sensor->info_priv.focus; break; } case V4L2_CID_FOCUS_RELATIVE: { return -EINVAL; } case V4L2_CID_FLASH: { ext_ctrl->value = sensor->info_priv.flash; break; } default : break; } return 0; } static int sensor_s_ext_control(struct soc_camera_device *icd, struct v4l2_ext_control *ext_ctrl) { const struct v4l2_queryctrl *qctrl; struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); struct sensor *sensor = to_sensor(client); int val_offset; qctrl = soc_camera_find_qctrl(&sensor_ops, ext_ctrl->id); if (!qctrl) { SENSOR_TR("\n %s ioctrl id = %d is invalidate \n", SENSOR_NAME_STRING(), ext_ctrl->id); return -EINVAL; } val_offset = 0; switch (ext_ctrl->id) { #if CONFIG_SENSOR_Scene case V4L2_CID_SCENE: { if (ext_ctrl->value != sensor->info_priv.scene) { if (sensor_set_scene(icd, qctrl,ext_ctrl->value) != 0) return -EINVAL; sensor->info_priv.scene = ext_ctrl->value; } break; } #endif #if CONFIG_SENSOR_Effect case V4L2_CID_EFFECT: { if (ext_ctrl->value != sensor->info_priv.effect) { if (sensor_set_effect(icd, qctrl,ext_ctrl->value) != 0) return -EINVAL; sensor->info_priv.effect= ext_ctrl->value; } break; } #endif #if CONFIG_SENSOR_DigitalZoom case V4L2_CID_ZOOM_ABSOLUTE: { if ((ext_ctrl->value < qctrl->minimum) || (ext_ctrl->value > qctrl->maximum)) return -EINVAL; if (ext_ctrl->value != sensor->info_priv.digitalzoom) { val_offset = ext_ctrl->value -sensor->info_priv.digitalzoom; if (sensor_set_digitalzoom(icd, qctrl,&val_offset) != 0) return -EINVAL; sensor->info_priv.digitalzoom += val_offset; SENSOR_DG("%s digitalzoom is %x\n",SENSOR_NAME_STRING(), sensor->info_priv.digitalzoom); } break; } case V4L2_CID_ZOOM_RELATIVE: { if (ext_ctrl->value) { if (sensor_set_digitalzoom(icd, qctrl,&ext_ctrl->value) != 0) return -EINVAL; sensor->info_priv.digitalzoom += ext_ctrl->value; SENSOR_DG("%s digitalzoom is %x\n", SENSOR_NAME_STRING(), sensor->info_priv.digitalzoom); } break; } #endif #if CONFIG_SENSOR_Focus case V4L2_CID_FOCUS_ABSOLUTE: { if ((ext_ctrl->value < qctrl->minimum) || (ext_ctrl->value > qctrl->maximum)) return -EINVAL; if (ext_ctrl->value != sensor->info_priv.focus) { val_offset = ext_ctrl->value -sensor->info_priv.focus; sensor->info_priv.focus += val_offset; } break; } case V4L2_CID_FOCUS_RELATIVE: { if (ext_ctrl->value) { sensor->info_priv.focus += ext_ctrl->value; SENSOR_DG("%s focus is %x\n", SENSOR_NAME_STRING(), sensor->info_priv.focus); } break; } #endif #if CONFIG_SENSOR_Flash case V4L2_CID_FLASH: { if (sensor_set_flash(icd, qctrl,ext_ctrl->value) != 0) return -EINVAL; sensor->info_priv.flash = ext_ctrl->value; SENSOR_DG("%s flash is %x\n",SENSOR_NAME_STRING(), sensor->info_priv.flash); break; } #endif default: break; } return 0; } static int sensor_g_ext_controls(struct v4l2_subdev *sd, struct v4l2_ext_controls *ext_ctrl) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_device *icd = client->dev.platform_data; int i, error_cnt=0, error_idx=-1; for (i=0; i<ext_ctrl->count; i++) { if (sensor_g_ext_control(icd, &ext_ctrl->controls[i]) != 0) { error_cnt++; error_idx = i; } } if (error_cnt > 1) error_idx = ext_ctrl->count; if (error_idx != -1) { ext_ctrl->error_idx = error_idx; return -EINVAL; } else { return 0; } } static int sensor_s_ext_controls(struct v4l2_subdev *sd, struct v4l2_ext_controls *ext_ctrl) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_device *icd = client->dev.platform_data; int i, error_cnt=0, error_idx=-1; for (i=0; i<ext_ctrl->count; i++) { if (sensor_s_ext_control(icd, &ext_ctrl->controls[i]) != 0) { error_cnt++; error_idx = i; } } if (error_cnt > 1) error_idx = ext_ctrl->count; if (error_idx != -1) { ext_ctrl->error_idx = error_idx; return -EINVAL; } else { return 0; } } /* Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ static int sensor_video_probe(struct soc_camera_device *icd, struct i2c_client *client) { char value; int ret,pid = 0; struct sensor *sensor = to_sensor(client); /* We must have a parent by now. And it cannot be a wrong one. * So this entire test is completely redundant. */ if (!icd->dev.parent || to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; if (sensor_ioctrl(icd, Sensor_PowerDown, 0) < 0) { ret = -ENODEV; goto sensor_video_probe_err; } /* soft reset */ /* ret = sensor_write(client, 0x3012, 0x80); if (ret != 0) { SENSOR_TR("soft reset %s failed\n",SENSOR_NAME_STRING()); return -ENODEV; } mdelay(5); */ //delay 5 microseconds /* check if it is an sensor sensor */ ret = sensor_read(client, 0x0000, &value); if (ret != 0) { SENSOR_TR("read chip id high byte failed\n"); ret = -ENODEV; goto sensor_video_probe_err; } pid |= (value << 8); ret = sensor_read(client, 0x0001, &value); if (ret != 0) { SENSOR_TR("read chip id low byte failed\n"); ret = -ENODEV; goto sensor_video_probe_err; } pid |= (value & 0xff); SENSOR_DG("\n %s pid = 0x%x\n", SENSOR_NAME_STRING(), pid); if (pid == SENSOR_ID) { sensor->model = SENSOR_V4L2_IDENT; } else { SENSOR_TR("error: %s mismatched pid = 0x%x\n", SENSOR_NAME_STRING(), pid); ret = -ENODEV; goto sensor_video_probe_err; } return 0; sensor_video_probe_err: return ret; } static long sensor_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_device *icd = client->dev.platform_data; struct sensor *sensor = to_sensor(client); #if CONFIG_SENSOR_Flash int i; #endif int ret = 0; SENSOR_DG("\n%s..%s..cmd:%x \n",SENSOR_NAME_STRING(),__FUNCTION__,cmd); switch (cmd) { case RK29_CAM_SUBDEV_DEACTIVATE: { sensor_deactivate(client); break; } case RK29_CAM_SUBDEV_IOREQUEST: { sensor->sensor_io_request = (struct rk29camera_platform_data*)arg; if (sensor->sensor_io_request != NULL) { sensor->sensor_gpio_res = NULL; for (i=0; i<RK29_CAM_SUPPORT_NUMS;i++) { if (sensor->sensor_io_request->gpio_res[i].dev_name && (strcmp(sensor->sensor_io_request->gpio_res[i].dev_name, dev_name(icd->pdev)) == 0)) { sensor->sensor_gpio_res = (struct rk29camera_gpio_res*)&sensor->sensor_io_request->gpio_res[i]; } } if (sensor->sensor_gpio_res == NULL) { SENSOR_TR("%s %s obtain gpio resource failed when RK29_CAM_SUBDEV_IOREQUEST \n",SENSOR_NAME_STRING(),__FUNCTION__); ret = -EINVAL; goto sensor_ioctl_end; } } else { SENSOR_TR("%s %s RK29_CAM_SUBDEV_IOREQUEST fail\n",SENSOR_NAME_STRING(),__FUNCTION__); ret = -EINVAL; goto sensor_ioctl_end; } /* ddl@rock-chips.com : if gpio_flash havn't been set in board-xxx.c, sensor driver must notify is not support flash control for this project */ #if CONFIG_SENSOR_Flash if (sensor->sensor_gpio_res) { if (sensor->sensor_gpio_res->gpio_flash == INVALID_GPIO) { for (i = 0; i < icd->ops->num_controls; i++) { if (V4L2_CID_FLASH == icd->ops->controls[i].id) { //memset((char*)&icd->ops->controls[i],0x00,sizeof(struct v4l2_queryctrl)); sensor_controls[i].id=0xffff; } } sensor->info_priv.flash = 0xff; SENSOR_DG("%s flash gpio is invalidate!\n",SENSOR_NAME_STRING()); }else{ //two cameras are the same,need to deal diffrently ,zyc for (i = 0; i < icd->ops->num_controls; i++) { if(0xffff == icd->ops->controls[i].id){ sensor_controls[i].id=V4L2_CID_FLASH; } } } } #endif break; } default: { SENSOR_TR("%s %s cmd(0x%x) is unknown !\n",SENSOR_NAME_STRING(),__FUNCTION__,cmd); break; } } sensor_ioctl_end: return ret; } static int sensor_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (index >= ARRAY_SIZE(sensor_colour_fmts)) return -EINVAL; *code = sensor_colour_fmts[index].code; return 0; } static struct v4l2_subdev_core_ops sensor_subdev_core_ops = { .init = sensor_init, .g_ctrl = sensor_g_control, .s_ctrl = sensor_s_control, .g_ext_ctrls = sensor_g_ext_controls, .s_ext_ctrls = sensor_s_ext_controls, .g_chip_ident = sensor_g_chip_ident, .ioctl = sensor_ioctl, }; static struct v4l2_subdev_video_ops sensor_subdev_video_ops = { .s_mbus_fmt = sensor_s_fmt, .g_mbus_fmt = sensor_g_fmt, .try_mbus_fmt = sensor_try_fmt, .enum_mbus_fmt = sensor_enum_fmt, }; static struct v4l2_subdev_ops sensor_subdev_ops = { .core = &sensor_subdev_core_ops, .video = &sensor_subdev_video_ops, }; static int sensor_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct sensor *sensor; struct soc_camera_device *icd = client->dev.platform_data; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct soc_camera_link *icl; int ret; SENSOR_DG("\n%s..%s..%d..\n",__FUNCTION__,__FILE__,__LINE__); if (!icd) { dev_err(&client->dev, "%s: missing soc-camera data!\n",SENSOR_NAME_STRING()); return -EINVAL; } icl = to_soc_camera_link(icd); if (!icl) { dev_err(&client->dev, "%s driver needs platform data\n", SENSOR_NAME_STRING()); return -EINVAL; } if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { dev_warn(&adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_I2C\n"); return -EIO; } sensor = kzalloc(sizeof(struct sensor), GFP_KERNEL); if (!sensor) return -ENOMEM; v4l2_i2c_subdev_init(&sensor->subdev, client, &sensor_subdev_ops); /* Second stage probe - when a capture adapter is there */ icd->ops = &sensor_ops; sensor->info_priv.fmt = sensor_colour_fmts[0]; #if CONFIG_SENSOR_I2C_NOSCHED atomic_set(&sensor->tasklock_cnt,0); #endif ret = sensor_video_probe(icd, client); if (ret < 0) { icd->ops = NULL; i2c_set_clientdata(client, NULL); kfree(sensor); sensor = NULL; } hrtimer_init(&(flash_off_timer.timer), CLOCK_MONOTONIC, HRTIMER_MODE_REL); SENSOR_DG("\n%s..%s..%d ret = %x \n",__FUNCTION__,__FILE__,__LINE__,ret); return ret; } static int sensor_remove(struct i2c_client *client) { struct sensor *sensor = to_sensor(client); struct soc_camera_device *icd = client->dev.platform_data; icd->ops = NULL; i2c_set_clientdata(client, NULL); client->driver = NULL; kfree(sensor); return 0; } static const struct i2c_device_id sensor_id[] = { {SENSOR_NAME_STRING(), 0 }, { } }; MODULE_DEVICE_TABLE(i2c, sensor_id); static struct i2c_driver sensor_i2c_driver = { .driver = { .name = SENSOR_NAME_STRING(), }, .probe = sensor_probe, .remove = sensor_remove, .id_table = sensor_id, }; static int __init sensor_mod_init(void) { SENSOR_DG("\n%s..%s.. \n",__FUNCTION__,SENSOR_NAME_STRING()); return i2c_add_driver(&sensor_i2c_driver); } static void __exit sensor_mod_exit(void) { i2c_del_driver(&sensor_i2c_driver); } device_initcall_sync(sensor_mod_init); module_exit(sensor_mod_exit); MODULE_DESCRIPTION(SENSOR_NAME_STRING(Camera sensor driver)); MODULE_AUTHOR("ddl <kernel@rock-chips>"); MODULE_LICENSE("GPL");
gpl-2.0
TeamBliss-Devices/android_kernel_samsung_v2wifixx
drivers/devfreq/nocp_monitor.c
370
4150
/* * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/list.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/debugfs.h> #include <linux/hrtimer.h> #include <linux/kobject.h> #include "noc_probe.h" #include "nocp_monitor.h" static inline struct bw_monitor_member *get_member(enum monitor_member id); static LIST_HEAD(monitor_list); /* start nocp counter */ static void nocp_monitor_start(void) { struct nocp_info *tmp_nocp; list_for_each_entry(tmp_nocp, &monitor_list, mon_list) { stop_nocp(tmp_nocp); set_env_nocp(tmp_nocp); start_nocp(tmp_nocp); } } /* get nocp counter */ static void nocp_monitor_get_cnt(unsigned long *monitor_cnt, unsigned long *us) { struct nocp_info *tmp_nocp; struct bw_monitor_member *member; list_for_each_entry(tmp_nocp, &monitor_list, mon_list) { member = get_member(tmp_nocp->id); monitor_cnt[tmp_nocp->id] = member->cnt[BW_MON_DATA_EVENT]; us[tmp_nocp->id] = div64_u64(member->ns, 1000); } } /* monitor noc probe and get counter */ static void nocp_monitor(unsigned long *monitor_cnt) { struct nocp_info *tmp_nocp; unsigned int val0, val1, val2, val3; struct bw_monitor_member *member; if (nocp_monitor_get_authority(&monitor_list) == BW_MON_ACCEPT) { list_for_each_entry(tmp_nocp, &monitor_list, mon_list) { member = get_member(tmp_nocp->id); stop_nocp(tmp_nocp); get_cnt_nocp(tmp_nocp, &val0, &val1, &val2, &val3); member->cnt[BW_MON_DATA_EVENT] = ((val1 << 16) | val0); member->cnt[BW_MON_CYCLE_CNT] = ((val3 << 16) | val2); if (monitor_cnt) monitor_cnt[tmp_nocp->id] = member->cnt[BW_MON_DATA_EVENT]; start_nocp(tmp_nocp); } } } static struct bw_monitor_t nocp_mon = { .start = nocp_monitor_start, .get_cnt = nocp_monitor_get_cnt, .monitor = nocp_monitor, .monitor_ip = BW_MON_NOCP, }; static inline struct bw_monitor_member *get_member(enum monitor_member id) { return &nocp_mon.member[id]; } /* get monitor authority. it decides based on monitor mode. */ enum bw_monitor_authority nocp_monitor_get_authority(struct list_head *target_list) { struct nocp_info *tmp_nocp; enum bw_monitor_authority authority = BW_MON_ACCEPT; ktime_t reset_time, read_time, t; unsigned long long ns = 0; struct bw_monitor_member *member; if ((nocp_mon.mode == BW_MON_STANDALONE) || (nocp_mon.mode == BW_MON_USERCTRL)) { if (target_list != &monitor_list) authority = BW_MON_REJECT; list_for_each_entry(tmp_nocp, target_list, mon_list) { member = get_member(tmp_nocp->id); if (!ns) { read_time = ktime_get(); t = ktime_sub(read_time, member->reset_time); member->ns = ktime_to_ns(t); member->reset_time = ktime_get(); ns = member->ns; reset_time = member->reset_time; } else { member->ns = ns; member->reset_time = reset_time; } } } return authority; } /* nocp updates its counter */ void nocp_monitor_update_cnt(enum monitor_member id, unsigned long evt, unsigned long cycle) { ktime_t read_time, t; struct bw_monitor_member *member; if (nocp_mon.mode == BW_MON_BUSFREQ) { member = get_member(id); spin_lock(&nocp_mon.bw_mon_lock); read_time = ktime_get(); t = ktime_sub(read_time, member->reset_time); member->ns = ktime_to_ns(t); member->reset_time = ktime_get(); member->cnt[BW_MON_DATA_EVENT] = evt; member->cnt[BW_MON_CYCLE_CNT] = cycle; spin_unlock(&nocp_mon.bw_mon_lock); } } /* register noc probes */ void nocp_monitor_regist_list(struct list_head *target_list) { struct nocp_info *tmp_nocp; struct bw_monitor_member *member; list_for_each_entry(tmp_nocp, target_list, list) { list_add(&tmp_nocp->mon_list, &monitor_list); member = get_member(tmp_nocp->id); member->name = tmp_nocp->name; } } /* nocp monitor init and register it into bandwidth monitor */ static int __init nocp_monitor_init(void) { register_bw_monitor(&nocp_mon); return 0; } late_initcall(nocp_monitor_init);
gpl-2.0